diff --git a/.asf.yaml b/.asf.yaml index e96b43cf0..cb0520c17 100644 --- a/.asf.yaml +++ b/.asf.yaml @@ -29,6 +29,13 @@ github: rebase: false features: issues: true + protected_branches: + main: + required_status_checks: + # require branches to be up-to-date before merging + strict: true + # don't require any jobs to pass + contexts: [] staging: whoami: asf-staging diff --git a/.cargo/config.toml b/.cargo/config.toml index 91a099a61..af951327f 100644 --- a/.cargo/config.toml +++ b/.cargo/config.toml @@ -1,12 +1,5 @@ [target.x86_64-apple-darwin] -rustflags = [ - "-C", "link-arg=-undefined", - "-C", "link-arg=dynamic_lookup", -] +rustflags = ["-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup"] [target.aarch64-apple-darwin] -rustflags = [ - "-C", "link-arg=-undefined", - "-C", "link-arg=dynamic_lookup", -] - +rustflags = ["-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup"] diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 350be46d5..455a0dc1a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -15,50 +15,248 @@ # specific language governing permissions and limitations # under the License. -name: Python Release Build +# Reusable workflow for running building +# This ensures the same tests run for both debug (PRs) and release (main/tags) builds + +name: Build + on: - pull_request: - branches: ["main"] - push: - tags: ["*-rc*"] - branches: ["branch-*"] + workflow_call: + inputs: + build_mode: + description: 'Build mode: debug or release' + required: true + type: string + run_wheels: + description: 'Whether to build distribution wheels' + required: false + type: boolean + default: false + +env: + CARGO_TERM_COLOR: always + RUST_BACKTRACE: 1 jobs: - build: + # ============================================ + # Linting Jobs + # ============================================ + lint-rust: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + with: + toolchain: "nightly" + components: rustfmt + + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + + - name: Check formatting + run: cargo +nightly fmt --all -- --check + + lint-python: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + - name: Install Python uses: actions/setup-python@v5 with: - python-version: "3.11" + python-version: "3.12" + + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + - name: Install dependencies - run: | - python -m pip install --upgrade pip - pip install ruff - # Update output format to enable automatic inline annotations. + run: uv sync --dev --no-install-package datafusion + - name: Run Ruff - run: ruff check --output-format=github python/ + run: | + uv run --no-project ruff check --output-format=github python/ + uv run --no-project ruff format --check python/ + + - name: Run codespell + run: | + uv run --no-project codespell --toml pyproject.toml + + lint-toml: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Install taplo + uses: taiki-e/install-action@v2 + with: + tool: taplo-cli + + # if you encounter an error, try running 'taplo format' to fix the formatting automatically. + - name: Check Cargo.toml formatting + run: taplo format --check + + check-crates-patch: + if: inputs.build_mode == 'release' + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - name: Ensure [patch.crates-io] is empty + run: python3 dev/check_crates_patch.py generate-license: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 - - uses: actions-rs/toolchain@v1 + - uses: actions/checkout@v6 + + - uses: astral-sh/setup-uv@v6 with: - profile: minimal - toolchain: stable - override: true + enable-cache: true + + - name: Install cargo-license + uses: taiki-e/install-action@v2 + with: + tool: cargo-license + - name: Generate license file - run: python ./dev/create_license.py - - uses: actions/upload-artifact@v3 + run: uv run --no-project python ./dev/create_license.py + + - uses: actions/upload-artifact@v6 with: name: python-wheel-license path: LICENSE.txt + # ============================================ + # Build - Linux x86_64 + # ============================================ + build-manylinux-x86_64: + needs: [generate-license, lint-rust, lint-python] + name: ManyLinux x86_64 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v6 + + - run: rm LICENSE.txt + - name: Download LICENSE.txt + uses: actions/download-artifact@v7 + with: + name: python-wheel-license + path: . + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + with: + key: ${{ inputs.build_mode }} + + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + + - name: Build (release mode) + uses: PyO3/maturin-action@v1 + if: inputs.build_mode == 'release' + with: + target: x86_64-unknown-linux-gnu + manylinux: "2_28" + args: --release --strip --features protoc,substrait --out dist + rustup-components: rust-std + + - name: Build (debug mode) + uses: PyO3/maturin-action@v1 + if: inputs.build_mode == 'debug' + with: + target: x86_64-unknown-linux-gnu + manylinux: "2_28" + args: --features protoc,substrait --out dist + rustup-components: rust-std + + - name: Build FFI test library + uses: PyO3/maturin-action@v1 + with: + target: x86_64-unknown-linux-gnu + manylinux: "2_28" + working-directory: examples/datafusion-ffi-example + args: --out dist + rustup-components: rust-std + + - name: Archive wheels + uses: actions/upload-artifact@v6 + with: + name: dist-manylinux-x86_64 + path: dist/* + + - name: Archive FFI test wheel + uses: actions/upload-artifact@v6 + with: + name: test-ffi-manylinux-x86_64 + path: examples/datafusion-ffi-example/dist/* + + # ============================================ + # Build - Linux ARM64 + # ============================================ + build-manylinux-aarch64: + needs: [generate-license, lint-rust, lint-python] + name: ManyLinux arm64 + runs-on: ubuntu-24.04-arm + steps: + - uses: actions/checkout@v6 + + - run: rm LICENSE.txt + - name: Download LICENSE.txt + uses: actions/download-artifact@v7 + with: + name: python-wheel-license + path: . + + - name: Setup Rust + uses: dtolnay/rust-toolchain@stable + + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + with: + key: ${{ inputs.build_mode }} + + - uses: astral-sh/setup-uv@v6 + with: + enable-cache: true + + - name: Build (release mode) + uses: PyO3/maturin-action@v1 + if: inputs.build_mode == 'release' + with: + target: aarch64-unknown-linux-gnu + manylinux: "2_28" + args: --release --strip --features protoc,substrait --out dist + rustup-components: rust-std + + - name: Build (debug mode) + uses: PyO3/maturin-action@v1 + if: inputs.build_mode == 'debug' + with: + target: aarch64-unknown-linux-gnu + manylinux: "2_28" + args: --features protoc,substrait --out dist + rustup-components: rust-std + + - name: Archive wheels + uses: actions/upload-artifact@v6 + if: inputs.build_mode == 'release' + with: + name: dist-manylinux-aarch64 + path: dist/* + + # ============================================ + # Build - macOS arm64 / Windows + # ============================================ build-python-mac-win: - needs: [generate-license] - name: Mac/Win + needs: [generate-license, lint-rust, lint-python] + name: macOS arm64 & Windows runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -66,37 +264,49 @@ jobs: python-version: ["3.10"] os: [macos-latest, windows-latest] steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - - - name: Upgrade pip - run: python -m pip install --upgrade pip - - - name: Install maturin - run: pip install maturin==1.5.1 + - uses: dtolnay/rust-toolchain@stable - run: rm LICENSE.txt - name: Download LICENSE.txt - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v7 with: name: python-wheel-license path: . + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + with: + key: ${{ inputs.build_mode }} + + - uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@v3 with: - version: "3.20.2" + version: "27.4" repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Build Python package - run: maturin build --release --strip --features substrait + - name: Install dependencies + run: uv sync --dev --no-install-package datafusion + + # Run clippy BEFORE maturin so we can avoid rebuilding. The features must match + # exactly the features used by maturin. Linux maturin builds need to happen in a + # container so only run this for our mac runner. + - name: Run Clippy + if: matrix.os != 'windows-latest' + run: cargo clippy --no-deps --all-targets --features substrait -- -D warnings + + - name: Build Python package (release mode) + if: inputs.build_mode == 'release' + run: uv run --no-project maturin build --release --strip --features substrait + + - name: Build Python package (debug mode) + if: inputs.build_mode != 'release' + run: uv run --no-project maturin build --features substrait - name: List Windows wheels if: matrix.os == 'windows-latest' @@ -110,157 +320,215 @@ jobs: run: find target/wheels/ - name: Archive wheels - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v6 + if: inputs.build_mode == 'release' with: - name: dist + name: dist-${{ matrix.os }} path: target/wheels/* - build-macos-aarch64: - needs: [generate-license] - name: Mac arm64 - runs-on: macos-latest + # ============================================ + # Build - macOS x86_64 (release only) + # ============================================ + build-macos-x86_64: + if: inputs.build_mode == 'release' + needs: [generate-license, lint-rust, lint-python] + runs-on: macos-15-intel strategy: fail-fast: false matrix: python-version: ["3.10"] steps: - - uses: actions/checkout@v4 - - - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - uses: actions-rs/toolchain@v1 - with: - toolchain: stable - - - name: Set up Rust targets - run: rustup target add aarch64-apple-darwin - - - name: Upgrade pip - run: python -m pip install --upgrade pip + - uses: actions/checkout@v6 - - name: Install maturin - run: pip install maturin==1.5.1 + - uses: dtolnay/rust-toolchain@stable - run: rm LICENSE.txt - name: Download LICENSE.txt - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v7 with: name: python-wheel-license path: . + - name: Cache Cargo + uses: Swatinem/rust-cache@v2 + with: + key: ${{ inputs.build_mode }} + + - uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + - name: Install Protoc - uses: arduino/setup-protoc@v1 + uses: arduino/setup-protoc@v3 with: - version: "3.20.2" + version: "27.4" repo-token: ${{ secrets.GITHUB_TOKEN }} - - name: Build Python package - run: maturin build --release --strip --target aarch64-apple-darwin --features substrait + - name: Install dependencies + run: uv sync --dev --no-install-package datafusion + + - name: Build (release mode) + run: | + uv run --no-project maturin build --release --strip --features substrait + - name: List Mac wheels run: find target/wheels/ - name: Archive wheels - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v6 with: - name: dist + name: dist-macos-aarch64 path: target/wheels/* - build-manylinux: + # ============================================ + # Build - Source Distribution + # ============================================ + + build-sdist: needs: [generate-license] - name: Manylinux + name: Source distribution + if: inputs.build_mode == 'release' runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 - run: rm LICENSE.txt - name: Download LICENSE.txt - uses: actions/download-artifact@v3 + uses: actions/download-artifact@v7 with: name: python-wheel-license path: . - run: cat LICENSE.txt - - name: Build wheels + - name: Build sdist uses: PyO3/maturin-action@v1 - env: - RUST_BACKTRACE: 1 with: - rust-toolchain: nightly - target: x86_64 + rust-toolchain: stable manylinux: auto - rustup-components: rust-std rustfmt # Keep them in one line due to https://github.com/PyO3/maturin-action/issues/153 - args: --release --manylinux 2014 --features protoc,substrait - - name: Archive wheels - uses: actions/upload-artifact@v3 - with: - name: dist - path: target/wheels/* + rustup-components: rust-std rustfmt + args: --release --sdist --out dist --features protoc,substrait + - name: Assert sdist build does not generate wheels + run: | + if [ "$(ls -A target/wheels)" ]; then + echo "Error: Sdist build generated wheels" + exit 1 + else + echo "Directory is clean" + fi + shell: bash - build-manylinux-aarch64: - needs: [generate-license] - name: Manylinux arm64 + # ============================================ + # Build - Source Distribution + # ============================================ + + merge-build-artifacts: runs-on: ubuntu-latest + name: Merge build artifacts + if: inputs.build_mode == 'release' + needs: + - build-python-mac-win + - build-macos-x86_64 + - build-manylinux-x86_64 + - build-manylinux-aarch64 + - build-sdist steps: - - uses: actions/checkout@v4 - - run: rm LICENSE.txt - - name: Download LICENSE.txt - uses: actions/download-artifact@v3 - with: - name: python-wheel-license - path: . - - run: cat LICENSE.txt - - name: Build wheels - uses: PyO3/maturin-action@v1 - env: - RUST_BACKTRACE: 1 - with: - rust-toolchain: nightly - target: aarch64 - # Use manylinux_2_28-cross because the manylinux2014-cross has GCC 4.8.5, which causes the build to fail - manylinux: 2_28 - rustup-components: rust-std rustfmt # Keep them in one line due to https://github.com/PyO3/maturin-action/issues/153 - args: --release --features protoc,substrait - - name: Archive wheels - uses: actions/upload-artifact@v3 + - name: Merge Build Artifacts + uses: actions/upload-artifact/merge@v6 with: name: dist - path: target/wheels/* + pattern: dist-* - build-sdist: - needs: [generate-license] - name: Source distribution + # ============================================ + # Build - Documentation + # ============================================ + # Documentation build job that runs after wheels are built + build-docs: + name: Build docs runs-on: ubuntu-latest + needs: [build-manylinux-x86_64] # Only need the Linux wheel for docs + # Only run docs on main branch pushes, tags, or PRs + if: github.event_name == 'push' || github.event_name == 'pull_request' steps: - - uses: actions/checkout@v4 - - run: rm LICENSE.txt - - name: Download LICENSE.txt - uses: actions/download-artifact@v3 + - name: Set target branch + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') + id: target-branch + run: | + set -x + if test '${{ github.ref }}' = 'refs/heads/main'; then + echo "value=asf-staging" >> "$GITHUB_OUTPUT" + elif test '${{ github.ref_type }}' = 'tag'; then + echo "value=asf-site" >> "$GITHUB_OUTPUT" + else + echo "Unsupported input: ${{ github.ref }} / ${{ github.ref_type }}" + exit 1 + fi + + - name: Checkout docs sources + uses: actions/checkout@v6 + + - name: Checkout docs target branch + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') + uses: actions/checkout@v6 with: - name: python-wheel-license - path: . - - run: cat LICENSE.txt - - name: Build sdist - uses: PyO3/maturin-action@v1 + fetch-depth: 0 + ref: ${{ steps.target-branch.outputs.value }} + path: docs-target + + - name: Setup Python + uses: actions/setup-python@v6 with: - rust-toolchain: stable - manylinux: auto - rustup-components: rust-std rustfmt - args: --release --sdist --out dist --features protoc,substrait - - name: Archive wheels - uses: actions/upload-artifact@v3 + python-version: "3.10" + + - name: Install dependencies + uses: astral-sh/setup-uv@v7 with: - name: dist - path: target/wheels/* + enable-cache: true + + # Download the Linux wheel built in the previous job + - name: Download pre-built Linux wheel + uses: actions/download-artifact@v7 + with: + name: dist-manylinux-x86_64 + path: wheels/ - # NOTE: PyPI publish needs to be done manually for now after release passed the vote - # release: - # name: Publish in PyPI - # needs: [build-manylinux, build-python-mac-win] - # runs-on: ubuntu-latest - # steps: - # - uses: actions/download-artifact@v3 - # - name: Publish to PyPI - # uses: pypa/gh-action-pypi-publish@master - # with: - # user: __token__ - # password: ${{ secrets.pypi_password }} + # Install from the pre-built wheels + - name: Install from pre-built wheels + run: | + set -x + uv venv + # Install documentation dependencies + uv sync --dev --no-install-package datafusion --group docs + # Install all pre-built wheels + WHEELS=$(find wheels/ -name "*.whl") + if [ -n "$WHEELS" ]; then + echo "Installing wheels:" + echo "$WHEELS" + uv pip install wheels/*.whl + else + echo "ERROR: No wheels found!" + exit 1 + fi + + - name: Build docs + run: | + set -x + cd docs + curl -O https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv + curl -O https://d37ci6vzurychx.cloudfront.net/trip-data/yellow_tripdata_2021-01.parquet + uv run --no-project make html + + - name: Copy & push the generated HTML + if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') + run: | + set -x + cd docs-target + # delete anything but: 1) '.'; 2) '..'; 3) .git/ + find ./ | grep -vE "^./$|^../$|^./.git" | xargs rm -rf + cp ../.asf.yaml . + cp -r ../docs/build/html/* . + git status --porcelain + if [ "$(git status --porcelain)" != "" ]; then + git config user.name "github-actions[bot]" + git config user.email "github-actions[bot]@users.noreply.github.com" + git add --all + git commit -m 'Publish built docs triggered by ${{ github.sha }}' + git push || git push --force + fi diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000..ab284b522 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# CI workflow for pull requests - runs tests in DEBUG mode for faster feedback + +name: CI + +on: + pull_request: + branches: ["main"] + +concurrency: + group: ${{ github.repository }}-${{ github.head_ref || github.sha }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + build: + uses: ./.github/workflows/build.yml + with: + build_mode: debug + run_wheels: false + secrets: inherit + + test: + needs: build + uses: ./.github/workflows/test.yml + secrets: inherit diff --git a/.github/workflows/conda.yml b/.github/workflows/conda.yml deleted file mode 100644 index f25a431c4..000000000 --- a/.github/workflows/conda.yml +++ /dev/null @@ -1,108 +0,0 @@ -name: Build conda nightly -on: - push: - branches: - - main - pull_request: - paths: - - Cargo.toml - - Cargo.lock - - pyproject.toml - - conda/recipes/** - - .github/workflows/conda.yml - schedule: - - cron: '0 0 * * 0' - -# When this workflow is queued, automatically cancel any previous running -# or pending jobs from the same branch -concurrency: - group: conda-${{ github.head_ref }} - cancel-in-progress: true - -# Required shell entrypoint to have properly activated conda environments -defaults: - run: - shell: bash -l {0} - -jobs: - conda: - name: "Build conda nightlies (python: ${{ matrix.python }}, arch: ${{ matrix.arch }})" - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python: ["3.8", "3.9", "3.10", "3.11"] - arch: ["linux-64", "linux-aarch64"] - steps: - - name: Manage disk space - if: matrix.arch == 'linux-aarch64' - run: | - sudo mkdir -p /opt/empty_dir || true - for d in \ - /opt/ghc \ - /opt/hostedtoolcache \ - /usr/lib/jvm \ - /usr/local/.ghcup \ - /usr/local/lib/android \ - /usr/local/share/powershell \ - /usr/share/dotnet \ - /usr/share/swift \ - ; do - sudo rsync --stats -a --delete /opt/empty_dir/ $d || true - done - sudo apt-get purge -y -f firefox \ - google-chrome-stable \ - microsoft-edge-stable - sudo apt-get autoremove -y >& /dev/null - sudo apt-get autoclean -y >& /dev/null - sudo docker image prune --all --force - df -h - - name: Create swapfile - if: matrix.arch == 'linux-aarch64' - run: | - sudo fallocate -l 10GiB /swapfile || true - sudo chmod 600 /swapfile || true - sudo mkswap /swapfile || true - sudo swapon /swapfile || true - - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - name: Set up Python - uses: conda-incubator/setup-miniconda@v3.0.4 - with: - miniforge-variant: Mambaforge - use-mamba: true - python-version: "3.8" - channel-priority: strict - - name: Install dependencies - run: | - mamba install -c conda-forge boa conda-verify - - which python - pip list - mamba list - # Clean the conda cache - - name: Clean Conda Cache - run: conda clean --all --yes - - name: Build conda packages - run: | - # suffix for nightly package versions - export VERSION_SUFFIX=a`date +%y%m%d` - - conda mambabuild conda/recipes \ - --python ${{ matrix.python }} \ - --variants "{target_platform: [${{ matrix.arch }}]}" \ - --error-overlinking \ - --no-test \ - --no-anaconda-upload \ - --output-folder packages - - name: Test conda packages - if: matrix.arch == 'linux-64' # can only test native platform packages - run: | - conda mambabuild --test packages/${{ matrix.arch }}/*.tar.bz2 - - name: Upload conda packages as artifacts - uses: actions/upload-artifact@v3 - with: - name: "conda nightlies (python - ${{ matrix.python }}, arch - ${{ matrix.arch }})" - # need to install all conda channel metadata to properly install locally - path: packages/ diff --git a/.github/workflows/dev.yml b/.github/workflows/dev.yml index 44481818e..2c8ecbc5e 100644 --- a/.github/workflows/dev.yml +++ b/.github/workflows/dev.yml @@ -25,10 +25,10 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout - uses: actions/checkout@v4 + uses: actions/checkout@v6 - name: Setup Python - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: - python-version: "3.10" + python-version: "3.14" - name: Audit licenses run: ./dev/release/run-rat.sh . diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml deleted file mode 100644 index 60dc15927..000000000 --- a/.github/workflows/docs.yaml +++ /dev/null @@ -1,96 +0,0 @@ -on: - push: - branches: - - main - tags-ignore: - - "**-rc**" - pull_request: - branches: - - main - -name: Deploy DataFusion Python site - -jobs: - debug-github-context: - name: Print github context - runs-on: ubuntu-latest - steps: - - name: Dump GitHub context - env: - GITHUB_CONTEXT: ${{ toJson(github) }} - run: | - echo "$GITHUB_CONTEXT" - build-docs: - name: Build docs - runs-on: ubuntu-latest - steps: - - name: Set target branch - if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') - id: target-branch - run: | - set -x - if test '${{ github.ref }}' = 'refs/heads/main'; then - echo "value=asf-staging" >> "$GITHUB_OUTPUT" - elif test '${{ github.ref_type }}' = 'tag'; then - echo "value=asf-site" >> "$GITHUB_OUTPUT" - else - echo "Unsupported input: ${{ github.ref }} / ${{ github.ref_type }}" - exit 1 - fi - - name: Checkout docs sources - uses: actions/checkout@v4 - - name: Checkout docs target branch - if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') - uses: actions/checkout@v4 - with: - fetch-depth: 0 - ref: ${{ steps.target-branch.outputs.value }} - path: docs-target - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: "3.11" - - - name: Install Protoc - uses: arduino/setup-protoc@v1 - with: - version: '3.20.2' - repo-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Install dependencies - run: | - set -x - python3 -m venv venv - source venv/bin/activate - pip install -r requirements-311.txt - pip install -r docs/requirements.txt - - name: Build Datafusion - run: | - set -x - source venv/bin/activate - maturin develop - - - name: Build docs - run: | - set -x - source venv/bin/activate - cd docs - make html - - - name: Copy & push the generated HTML - if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref_type == 'tag') - run: | - set -x - cd docs-target - # delete anything but: 1) '.'; 2) '..'; 3) .git/ - find ./ | grep -vE "^./$|^../$|^./.git" | xargs rm -rf - cp ../.asf.yaml . - cp -r ../docs/build/html/* . - git status --porcelain - if [ "$(git status --porcelain)" != "" ]; then - git config user.name "github-actions[bot]" - git config user.email "github-actions[bot]@users.noreply.github.com" - git add --all - git commit -m 'Publish built docs triggered by ${{ github.sha }}' - git push || git push --force - fi diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 000000000..bddc89eac --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,49 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Release workflow - runs tests in RELEASE mode and builds distribution wheels +# Triggered on: +# - Merges to main +# - Release candidate tags (*-rc*) +# - Release tags (e.g., 45.0.0) + +name: Release Build + +on: + push: + branches: + - "main" + tags: + - "*-rc*" # Release candidates (e.g., 45.0.0-rc1) + - "[0-9]+.*" # Release tags (e.g., 45.0.0) + +concurrency: + group: ${{ github.repository }}-${{ github.head_ref || github.sha }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + build: + uses: ./.github/workflows/build.yml + with: + build_mode: release + run_wheels: true + secrets: inherit + + test: + needs: build + uses: ./.github/workflows/test.yml + secrets: inherit diff --git a/.github/workflows/take.yml b/.github/workflows/take.yml new file mode 100644 index 000000000..86dc190ad --- /dev/null +++ b/.github/workflows/take.yml @@ -0,0 +1,41 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +name: Assign the issue via a `take` comment +on: + issue_comment: + types: created + +permissions: + issues: write + +jobs: + issue_assign: + runs-on: ubuntu-latest + if: (!github.event.issue.pull_request) && github.event.comment.body == 'take' + concurrency: + group: ${{ github.actor }}-issue-assign + steps: + - run: | + CODE=$(curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -LI https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.issue.number }}/assignees/${{ github.event.comment.user.login }} -o /dev/null -w '%{http_code}\n' -s) + if [ "$CODE" -eq "204" ] + then + echo "Assigning issue ${{ github.event.issue.number }} to ${{ github.event.comment.user.login }}" + curl -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" -d '{"assignees": ["${{ github.event.comment.user.login }}"]}' https://api.github.com/repos/${{ github.repository }}/issues/${{ github.event.issue.number }}/assignees + else + echo "Cannot assign issue ${{ github.event.issue.number }} to ${{ github.event.comment.user.login }}" + fi \ No newline at end of file diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml deleted file mode 100644 index 4f47dc984..000000000 --- a/.github/workflows/test.yaml +++ /dev/null @@ -1,131 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -name: Python test -on: - push: - branches: [main] - pull_request: - branches: [main] - -concurrency: - group: ${{ github.repository }}-${{ github.head_ref || github.sha }}-${{ github.workflow }} - cancel-in-progress: true - -jobs: - test-matrix: - runs-on: ubuntu-latest - strategy: - fail-fast: false - matrix: - python-version: - - "3.10" - - "3.11" - - "3.12" - toolchain: - - "stable" - - steps: - - uses: actions/checkout@v4 - - - name: Setup Rust Toolchain - uses: actions-rs/toolchain@v1 - id: rust-toolchain - with: - toolchain: ${{ matrix.toolchain }} - override: true - - - name: Install Protoc - uses: arduino/setup-protoc@v1 - with: - version: '3.20.2' - repo-token: ${{ secrets.GITHUB_TOKEN }} - - - name: Setup Python - uses: actions/setup-python@v5 - with: - python-version: ${{ matrix.python-version }} - - - name: Cache Cargo - uses: actions/cache@v4 - with: - path: ~/.cargo - key: cargo-cache-${{ steps.rust-toolchain.outputs.rustc_hash }}-${{ hashFiles('Cargo.lock') }} - - - name: Check Formatting - uses: actions-rs/cargo@v1 - if: ${{ matrix.python-version == '3.10' && matrix.toolchain == 'stable' }} - with: - command: fmt - args: -- --check - - - name: Run Clippy - uses: actions-rs/cargo@v1 - if: ${{ matrix.python-version == '3.10' && matrix.toolchain == 'stable' }} - with: - command: clippy - args: --all-targets --all-features -- -D clippy::all -A clippy::redundant_closure - - - name: Create Virtualenv (3.12) - if: ${{ matrix.python-version == '3.12' }} - run: | - python -m venv venv - source venv/bin/activate - pip install -r requirements-312.txt - - - name: Create Virtualenv (3.10) - if: ${{ matrix.python-version == '3.10' }} - run: | - python -m venv venv - source venv/bin/activate - pip install -r requirements-310.txt - - - name: Create Virtualenv (3.11) - if: ${{ matrix.python-version == '3.11' }} - run: | - python -m venv venv - source venv/bin/activate - pip install -r requirements-311.txt - - - name: Run tests - env: - RUST_BACKTRACE: 1 - run: | - git submodule update --init - source venv/bin/activate - pip install -e . -vv - pytest -v . - - - name: Cache the generated dataset - id: cache-tpch-dataset - uses: actions/cache@v4 - with: - path: benchmarks/tpch/data - key: tpch-data-2.18.0 - - - name: Run dbgen to create 1 Gb dataset - if: ${{ steps.cache-tpch-dataset.outputs.cache-hit != 'true' }} - run: | - cd benchmarks/tpch - RUN_IN_CI=TRUE ./tpch-gen.sh 1 - - - name: Run TPC-H examples - run: | - source venv/bin/activate - cd examples/tpch - python convert_data_to_parquet.py - pytest _tests.py diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 000000000..692563019 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,133 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +# Reusable workflow for running tests +# This ensures the same tests run for both debug (PRs) and release (main/tags) builds + +name: Test + +on: + workflow_call: + +jobs: + test-matrix: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + python-version: + - "3.10" + - "3.11" + - "3.12" + - "3.13" + - "3.14" + toolchain: + - "stable" + + steps: + - uses: actions/checkout@v6 + + - name: Verify example datafusion version + run: | + MAIN_VERSION=$(grep -A 1 "name = \"datafusion-common\"" Cargo.lock | grep "version = " | head -1 | sed 's/.*version = "\(.*\)"/\1/') + EXAMPLE_VERSION=$(grep -A 1 "name = \"datafusion-common\"" examples/datafusion-ffi-example/Cargo.lock | grep "version = " | head -1 | sed 's/.*version = "\(.*\)"/\1/') + echo "Main crate datafusion version: $MAIN_VERSION" + echo "FFI example datafusion version: $EXAMPLE_VERSION" + + if [ "$MAIN_VERSION" != "$EXAMPLE_VERSION" ]; then + echo "❌ Error: FFI example datafusion versions don't match!" + exit 1 + fi + + - name: Setup Python + uses: actions/setup-python@v6 + with: + python-version: ${{ matrix.python-version }} + + - name: Cache Cargo + uses: actions/cache@v5 + with: + path: ~/.cargo + key: cargo-cache-${{ matrix.toolchain }}-${{ hashFiles('Cargo.lock') }} + + - name: Install dependencies + uses: astral-sh/setup-uv@v7 + with: + enable-cache: true + + # Download the Linux wheel built in the build workflow + - name: Download pre-built Linux wheel + uses: actions/download-artifact@v7 + with: + name: dist-manylinux-x86_64 + path: wheels/ + + # Download the FFI test wheel + - name: Download pre-built FFI test wheel + uses: actions/download-artifact@v7 + with: + name: test-ffi-manylinux-x86_64 + path: wheels/ + + # Install from the pre-built wheels + - name: Install from pre-built wheels + run: | + set -x + uv venv + # Install development dependencies + uv sync --dev --no-install-package datafusion + # Install all pre-built wheels + WHEELS=$(find wheels/ -name "*.whl") + if [ -n "$WHEELS" ]; then + echo "Installing wheels:" + echo "$WHEELS" + uv pip install wheels/*.whl + else + echo "ERROR: No wheels found!" + exit 1 + fi + + - name: Run tests + env: + RUST_BACKTRACE: 1 + run: | + git submodule update --init + uv run --no-project pytest -v --import-mode=importlib + + - name: FFI unit tests + run: | + cd examples/datafusion-ffi-example + uv run --no-project pytest python/tests/_test*.py + + - name: Cache the generated dataset + id: cache-tpch-dataset + uses: actions/cache@v5 + with: + path: benchmarks/tpch/data + key: tpch-data-2.18.0 + + - name: Run dbgen to create 1 Gb dataset + if: ${{ steps.cache-tpch-dataset.outputs.cache-hit != 'true' }} + run: | + cd benchmarks/tpch + RUN_IN_CI=TRUE ./tpch-gen.sh 1 + + - name: Run TPC-H examples + run: | + cd examples/tpch + uv run --no-project python convert_data_to_parquet.py + uv run --no-project pytest _tests.py diff --git a/.github/workflows/verify-release-candidate.yml b/.github/workflows/verify-release-candidate.yml new file mode 100644 index 000000000..a10a4faa9 --- /dev/null +++ b/.github/workflows/verify-release-candidate.yml @@ -0,0 +1,78 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +name: Verify Release Candidate + +# NOTE: This workflow is intended to be run manually via workflow_dispatch. + +on: + workflow_dispatch: + inputs: + version: + description: Version number (e.g., 52.0.0) + required: true + type: string + rc_number: + description: Release candidate number (e.g., 0) + required: true + type: string + +concurrency: + group: ${{ github.repository }}-${{ github.ref }}-${{ github.workflow }} + cancel-in-progress: true + +jobs: + verify: + name: Verify RC (${{ matrix.os }}-${{ matrix.arch }}) + strategy: + fail-fast: false + matrix: + include: + # Linux + - os: linux + arch: x64 + runner: ubuntu-latest + - os: linux + arch: arm64 + runner: ubuntu-24.04-arm + + # macOS + - os: macos + arch: arm64 + runner: macos-latest + - os: macos + arch: x64 + runner: macos-15-intel + + # Windows + - os: windows + arch: x64 + runner: windows-latest + runs-on: ${{ matrix.runner }} + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + - name: Set up protoc + uses: arduino/setup-protoc@v3 + with: + version: "27.4" + repo-token: ${{ secrets.GITHUB_TOKEN }} + + - name: Run release candidate verification + shell: bash + run: ./dev/release/verify-release-candidate.sh "${{ inputs.version }}" "${{ inputs.rc_number }}" diff --git a/.gitignore b/.gitignore index 0030b907b..614d82327 100644 --- a/.gitignore +++ b/.gitignore @@ -4,6 +4,7 @@ target /docs/temp /docs/build .DS_Store +.vscode # Byte-compiled / optimized / DLL files __pycache__/ @@ -24,6 +25,7 @@ dist # intended to run in multiple environments; otherwise, check them in: .python-version venv +.venv apache-rat-*.jar *rat.txt @@ -31,3 +33,6 @@ apache-rat-*.jar CHANGELOG.md.bak docs/mdbook/book + +.pyo3_build_config + diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8509fae2c..8ae6a4e32 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -17,12 +17,12 @@ repos: - repo: https://github.com/rhysd/actionlint - rev: v1.6.23 + rev: v1.7.6 hooks: - - id: actionlint-docker + - id: actionlint-docker - repo: https://github.com/astral-sh/ruff-pre-commit # Ruff version. - rev: v0.3.0 + rev: v0.15.1 hooks: # Run the linter. - id: ruff @@ -33,17 +33,25 @@ repos: - id: rust-fmt name: Rust fmt description: Run cargo fmt on files included in the commit. rustfmt should be installed before-hand. - entry: cargo fmt --all -- + entry: cargo +nightly fmt --all -- pass_filenames: true types: [file, rust] language: system - id: rust-clippy name: Rust clippy description: Run cargo clippy on files included in the commit. clippy should be installed before-hand. - entry: cargo clippy --all-targets --all-features -- -Dclippy::all -Aclippy::redundant_closure + entry: cargo clippy --all-targets --all-features -- -Dclippy::all -D warnings -Aclippy::redundant_closure pass_filenames: false types: [file, rust] language: system + - repo: https://github.com/codespell-project/codespell + rev: v2.4.1 + hooks: + - id: codespell + args: [ --toml, "pyproject.toml"] + additional_dependencies: + - tomli + default_language_version: python: python3 diff --git a/CHANGELOG.md b/CHANGELOG.md index 32a74d825..ae40911d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -19,570 +19,4 @@ # DataFusion Python Changelog -## [39.0.0](https://github.com/apache/datafusion-python/tree/39.0.0) (2024-06-25) - -**Merged pull requests:** - -- ci: add substrait feature to linux builds [#720](https://github.com/apache/datafusion-python/pull/720) (Michael-J-Ward) -- Docs deploy action [#721](https://github.com/apache/datafusion-python/pull/721) (Michael-J-Ward) -- update deps [#723](https://github.com/apache/datafusion-python/pull/723) (Michael-J-Ward) -- Upgrade maturin [#725](https://github.com/apache/datafusion-python/pull/725) (Michael-J-Ward) -- Upgrade datafusion 39 [#728](https://github.com/apache/datafusion-python/pull/728) (Michael-J-Ward) -- use ScalarValue::to_pyarrow to convert to python object [#731](https://github.com/apache/datafusion-python/pull/731) (Michael-J-Ward) -- Pyo3 `Bound<'py, T>` api [#734](https://github.com/apache/datafusion-python/pull/734) (Michael-J-Ward) -- github test action: drop python 3.7, add python 3.12 [#736](https://github.com/apache/datafusion-python/pull/736) (Michael-J-Ward) -- Pyarrow filter pushdowns [#735](https://github.com/apache/datafusion-python/pull/735) (Michael-J-Ward) -- build(deps): bump syn from 2.0.66 to 2.0.67 [#738](https://github.com/apache/datafusion-python/pull/738) (dependabot[bot]) -- Pyo3 refactorings [#740](https://github.com/apache/datafusion-python/pull/740) (Michael-J-Ward) -- UDAF `sum` workaround [#741](https://github.com/apache/datafusion-python/pull/741) (Michael-J-Ward) - -## [38.0.1](https://github.com/apache/datafusion-python/tree/38.0.1) (2024-05-25) - -**Implemented enhancements:** - -- feat: add python bindings for ends_with function [#693](https://github.com/apache/datafusion-python/pull/693) (richtia) -- feat: expose `named_struct` in python [#700](https://github.com/apache/datafusion-python/pull/700) (Michael-J-Ward) - -**Merged pull requests:** - -- Add document about basics of working with expressions [#668](https://github.com/apache/datafusion-python/pull/668) (timsaucer) -- chore: Update Python release process now that DataFusion is TLP [#674](https://github.com/apache/datafusion-python/pull/674) (andygrove) -- Fix Docs [#676](https://github.com/apache/datafusion-python/pull/676) (Michael-J-Ward) -- Add examples from TPC-H [#666](https://github.com/apache/datafusion-python/pull/666) (timsaucer) -- fix conda nightly builds, attempt 2 [#689](https://github.com/apache/datafusion-python/pull/689) (Michael-J-Ward) -- Upgrade to datafusion 38 [#691](https://github.com/apache/datafusion-python/pull/691) (Michael-J-Ward) -- chore: update to maturin's recommended project layout for rust/python… [#695](https://github.com/apache/datafusion-python/pull/695) (Michael-J-Ward) -- chore: update cargo deps [#698](https://github.com/apache/datafusion-python/pull/698) (Michael-J-Ward) -- feat: add python bindings for ends_with function [#693](https://github.com/apache/datafusion-python/pull/693) (richtia) -- feat: expose `named_struct` in python [#700](https://github.com/apache/datafusion-python/pull/700) (Michael-J-Ward) -- Website fixes [#702](https://github.com/apache/datafusion-python/pull/702) (Michael-J-Ward) - -## [37.1.0](https://github.com/apache/datafusion-python/tree/37.1.0) (2024-05-08) - -**Implemented enhancements:** - -- feat: add execute_stream and execute_stream_partitioned [#610](https://github.com/apache/datafusion-python/pull/610) (mesejo) - -**Documentation updates:** - -- docs: update docs CI to install python-311 requirements [#661](https://github.com/apache/datafusion-python/pull/661) (Michael-J-Ward) - -**Merged pull requests:** - -- Switch to Ruff for Python linting [#529](https://github.com/apache/datafusion-python/pull/529) (andygrove) -- Remove sql-on-pandas/polars/cudf examples [#602](https://github.com/apache/datafusion-python/pull/602) (andygrove) -- build(deps): bump object_store from 0.9.0 to 0.9.1 [#611](https://github.com/apache/datafusion-python/pull/611) (dependabot[bot]) -- More missing array funcs [#605](https://github.com/apache/datafusion-python/pull/605) (judahrand) -- feat: add execute_stream and execute_stream_partitioned [#610](https://github.com/apache/datafusion-python/pull/610) (mesejo) -- build(deps): bump uuid from 1.7.0 to 1.8.0 [#615](https://github.com/apache/datafusion-python/pull/615) (dependabot[bot]) -- Bind SQLOptions and relative ctx method #567 [#588](https://github.com/apache/datafusion-python/pull/588) (giacomorebecchi) -- bugfix: no panic on empty table [#613](https://github.com/apache/datafusion-python/pull/613) (mesejo) -- Expose `register_listing_table` [#618](https://github.com/apache/datafusion-python/pull/618) (henrifroese) -- Expose unnest feature [#641](https://github.com/apache/datafusion-python/pull/641) (timsaucer) -- Update domain names and paths in asf yaml [#643](https://github.com/apache/datafusion-python/pull/643) (andygrove) -- use python 3.11 to publish docs [#645](https://github.com/apache/datafusion-python/pull/645) (andygrove) -- docs: update docs CI to install python-311 requirements [#661](https://github.com/apache/datafusion-python/pull/661) (Michael-J-Ward) -- Upgrade Datafusion to v37.1.0 [#669](https://github.com/apache/datafusion-python/pull/669) (Michael-J-Ward) - -## [36.0.0](https://github.com/apache/datafusion-python/tree/36.0.0) (2024-03-02) - -**Implemented enhancements:** - -- feat: Add `flatten` array function [#562](https://github.com/apache/datafusion-python/pull/562) (mobley-trent) - -**Documentation updates:** - -- docs: Add ASF attribution [#580](https://github.com/apache/datafusion-python/pull/580) (simicd) - -**Merged pull requests:** - -- Allow PyDataFrame to be used from other projects [#582](https://github.com/apache/datafusion-python/pull/582) (andygrove) -- docs: Add ASF attribution [#580](https://github.com/apache/datafusion-python/pull/580) (simicd) -- Add array functions [#560](https://github.com/apache/datafusion-python/pull/560) (ongchi) -- feat: Add `flatten` array function [#562](https://github.com/apache/datafusion-python/pull/562) (mobley-trent) - -## [35.0.0](https://github.com/apache/datafusion-python/tree/35.0.0) (2024-01-20) - -**Merged pull requests:** - -- build(deps): bump syn from 2.0.41 to 2.0.43 [#559](https://github.com/apache/datafusion-python/pull/559) (dependabot[bot]) -- build(deps): bump tokio from 1.35.0 to 1.35.1 [#558](https://github.com/apache/datafusion-python/pull/558) (dependabot[bot]) -- build(deps): bump async-trait from 0.1.74 to 0.1.77 [#556](https://github.com/apache/datafusion-python/pull/556) (dependabot[bot]) -- build(deps): bump pyo3 from 0.20.0 to 0.20.2 [#557](https://github.com/apache/datafusion-python/pull/557) (dependabot[bot]) - -## [34.0.0](https://github.com/apache/datafusion-python/tree/34.0.0) (2023-12-28) - -**Merged pull requests:** - -- Adjust visibility of crate private members & Functions [#537](https://github.com/apache/datafusion-python/pull/537) (jdye64) -- Update json.rst [#538](https://github.com/apache/datafusion-python/pull/538) (ray-andrew) -- Enable mimalloc local_dynamic_tls feature [#540](https://github.com/apache/datafusion-python/pull/540) (jdye64) -- Enable substrait feature to be built by default in CI, for nightlies … [#544](https://github.com/apache/datafusion-python/pull/544) (jdye64) - -## [33.0.0](https://github.com/apache/datafusion-python/tree/33.0.0) (2023-11-16) - -**Merged pull requests:** - -- First pass at getting architectured builds working [#350](https://github.com/apache/datafusion-python/pull/350) (charlesbluca) -- Remove libprotobuf dep [#527](https://github.com/apache/datafusion-python/pull/527) (jdye64) - -## [32.0.0](https://github.com/apache/datafusion-python/tree/32.0.0) (2023-10-21) - -**Implemented enhancements:** - -- feat: expose PyWindowFrame [#509](https://github.com/apache/datafusion-python/pull/509) (dlovell) -- add Binary String Functions;encode,decode [#494](https://github.com/apache/datafusion-python/pull/494) (jiangzhx) -- add bit_and,bit_or,bit_xor,bool_add,bool_or [#496](https://github.com/apache/datafusion-python/pull/496) (jiangzhx) -- add first_value last_value [#498](https://github.com/apache/datafusion-python/pull/498) (jiangzhx) -- add regr\_\* functions [#499](https://github.com/apache/datafusion-python/pull/499) (jiangzhx) -- Add random missing bindings [#522](https://github.com/apache/datafusion-python/pull/522) (jdye64) -- Allow for multiple input files per table instead of a single file [#519](https://github.com/apache/datafusion-python/pull/519) (jdye64) -- Add support for window function bindings [#521](https://github.com/apache/datafusion-python/pull/521) (jdye64) - -**Merged pull requests:** - -- Prepare 31.0.0 release [#500](https://github.com/apache/datafusion-python/pull/500) (andygrove) -- Improve release process documentation [#505](https://github.com/apache/datafusion-python/pull/505) (andygrove) -- add Binary String Functions;encode,decode [#494](https://github.com/apache/datafusion-python/pull/494) (jiangzhx) -- build(deps): bump mimalloc from 0.1.38 to 0.1.39 [#502](https://github.com/apache/datafusion-python/pull/502) (dependabot[bot]) -- build(deps): bump syn from 2.0.32 to 2.0.35 [#503](https://github.com/apache/datafusion-python/pull/503) (dependabot[bot]) -- build(deps): bump syn from 2.0.35 to 2.0.37 [#506](https://github.com/apache/datafusion-python/pull/506) (dependabot[bot]) -- Use latest DataFusion [#511](https://github.com/apache/datafusion-python/pull/511) (andygrove) -- add bit_and,bit_or,bit_xor,bool_add,bool_or [#496](https://github.com/apache/datafusion-python/pull/496) (jiangzhx) -- use DataFusion 32 [#515](https://github.com/apache/datafusion-python/pull/515) (andygrove) -- add first_value last_value [#498](https://github.com/apache/datafusion-python/pull/498) (jiangzhx) -- build(deps): bump regex-syntax from 0.7.5 to 0.8.1 [#517](https://github.com/apache/datafusion-python/pull/517) (dependabot[bot]) -- build(deps): bump pyo3-build-config from 0.19.2 to 0.20.0 [#516](https://github.com/apache/datafusion-python/pull/516) (dependabot[bot]) -- add regr\_\* functions [#499](https://github.com/apache/datafusion-python/pull/499) (jiangzhx) -- Add random missing bindings [#522](https://github.com/apache/datafusion-python/pull/522) (jdye64) -- build(deps): bump rustix from 0.38.18 to 0.38.19 [#523](https://github.com/apache/datafusion-python/pull/523) (dependabot[bot]) -- Allow for multiple input files per table instead of a single file [#519](https://github.com/apache/datafusion-python/pull/519) (jdye64) -- Add support for window function bindings [#521](https://github.com/apache/datafusion-python/pull/521) (jdye64) -- Small clippy fix [#524](https://github.com/apache/datafusion-python/pull/524) (andygrove) - -## [31.0.0](https://github.com/apache/datafusion-python/tree/31.0.0) (2023-09-12) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/28.0.0...31.0.0) - -**Implemented enhancements:** - -- feat: add case function (#447) [#448](https://github.com/apache/datafusion-python/pull/448) (mesejo) -- feat: add compression options [#456](https://github.com/apache/datafusion-python/pull/456) (mesejo) -- feat: add register_json [#458](https://github.com/apache/datafusion-python/pull/458) (mesejo) -- feat: add basic compression configuration to write_parquet [#459](https://github.com/apache/datafusion-python/pull/459) (mesejo) -- feat: add example of reading parquet from s3 [#460](https://github.com/apache/datafusion-python/pull/460) (mesejo) -- feat: add register_avro and read_table [#461](https://github.com/apache/datafusion-python/pull/461) (mesejo) -- feat: add missing scalar math functions [#465](https://github.com/apache/datafusion-python/pull/465) (mesejo) - -**Documentation updates:** - -- docs: include pre-commit hooks section in contributor guide [#455](https://github.com/apache/datafusion-python/pull/455) (mesejo) - -**Merged pull requests:** - -- Build Linux aarch64 wheel [#443](https://github.com/apache/datafusion-python/pull/443) (gokselk) -- feat: add case function (#447) [#448](https://github.com/apache/datafusion-python/pull/448) (mesejo) -- enhancement(docs): Add user guide (#432) [#445](https://github.com/apache/datafusion-python/pull/445) (mesejo) -- docs: include pre-commit hooks section in contributor guide [#455](https://github.com/apache/datafusion-python/pull/455) (mesejo) -- feat: add compression options [#456](https://github.com/apache/datafusion-python/pull/456) (mesejo) -- Upgrade to DF 28.0.0-rc1 [#457](https://github.com/apache/datafusion-python/pull/457) (andygrove) -- feat: add register_json [#458](https://github.com/apache/datafusion-python/pull/458) (mesejo) -- feat: add basic compression configuration to write_parquet [#459](https://github.com/apache/datafusion-python/pull/459) (mesejo) -- feat: add example of reading parquet from s3 [#460](https://github.com/apache/datafusion-python/pull/460) (mesejo) -- feat: add register_avro and read_table [#461](https://github.com/apache/datafusion-python/pull/461) (mesejo) -- feat: add missing scalar math functions [#465](https://github.com/apache/datafusion-python/pull/465) (mesejo) -- build(deps): bump arduino/setup-protoc from 1 to 2 [#452](https://github.com/apache/datafusion-python/pull/452) (dependabot[bot]) -- Revert "build(deps): bump arduino/setup-protoc from 1 to 2 (#452)" [#474](https://github.com/apache/datafusion-python/pull/474) (viirya) -- Minor: fix wrongly copied function description [#497](https://github.com/apache/datafusion-python/pull/497) (viirya) -- Upgrade to Datafusion 31.0.0 [#491](https://github.com/apache/datafusion-python/pull/491) (judahrand) -- Add `isnan` and `iszero` [#495](https://github.com/apache/datafusion-python/pull/495) (judahrand) - -## 30.0.0 - -- Skipped due to a breaking change in DataFusion - -## 29.0.0 - -- Skipped - -## [28.0.0](https://github.com/apache/datafusion-python/tree/28.0.0) (2023-07-25) - -**Implemented enhancements:** - -- feat: expose offset in python API [#437](https://github.com/apache/datafusion-python/pull/437) (cpcloud) - -**Merged pull requests:** - -- File based input utils [#433](https://github.com/apache/datafusion-python/pull/433) (jdye64) -- Upgrade to 28.0.0-rc1 [#434](https://github.com/apache/datafusion-python/pull/434) (andygrove) -- Introduces utility for obtaining SqlTable information from a file like location [#398](https://github.com/apache/datafusion-python/pull/398) (jdye64) -- feat: expose offset in python API [#437](https://github.com/apache/datafusion-python/pull/437) (cpcloud) -- Use DataFusion 28 [#439](https://github.com/apache/datafusion-python/pull/439) (andygrove) - -## [27.0.0](https://github.com/apache/datafusion-python/tree/27.0.0) (2023-07-03) - -**Merged pull requests:** - -- LogicalPlan.to_variant() make public [#412](https://github.com/apache/datafusion-python/pull/412) (jdye64) -- Prepare 27.0.0 release [#423](https://github.com/apache/datafusion-python/pull/423) (andygrove) - -## [26.0.0](https://github.com/apache/datafusion-python/tree/26.0.0) (2023-06-11) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/25.0.0...26.0.0) - -**Merged pull requests:** - -- Add Expr::Case when_then_else support to rex_call_operands function [#388](https://github.com/apache/datafusion-python/pull/388) (jdye64) -- Introduce BaseSessionContext abstract class [#390](https://github.com/apache/datafusion-python/pull/390) (jdye64) -- CRUD Schema support for `BaseSessionContext` [#392](https://github.com/apache/datafusion-python/pull/392) (jdye64) -- CRUD Table support for `BaseSessionContext` [#394](https://github.com/apache/datafusion-python/pull/394) (jdye64) - -## [25.0.0](https://github.com/apache/datafusion-python/tree/25.0.0) (2023-05-23) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/24.0.0...25.0.0) - -**Merged pull requests:** - -- Prepare 24.0.0 Release [#376](https://github.com/apache/datafusion-python/pull/376) (andygrove) -- build(deps): bump uuid from 1.3.1 to 1.3.2 [#359](https://github.com/apache/datafusion-python/pull/359) (dependabot[bot]) -- build(deps): bump mimalloc from 0.1.36 to 0.1.37 [#361](https://github.com/apache/datafusion-python/pull/361) (dependabot[bot]) -- build(deps): bump regex-syntax from 0.6.29 to 0.7.1 [#334](https://github.com/apache/datafusion-python/pull/334) (dependabot[bot]) -- upgrade maturin to 0.15.1 [#379](https://github.com/apache/datafusion-python/pull/379) (Jimexist) -- Expand Expr to include RexType basic support [#378](https://github.com/apache/datafusion-python/pull/378) (jdye64) -- Add Python script for generating changelog [#383](https://github.com/apache/datafusion-python/pull/383) (andygrove) - -## [24.0.0](https://github.com/apache/datafusion-python/tree/24.0.0) (2023-05-09) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/23.0.0...24.0.0) - -**Documentation updates:** - -- Fix link to user guide [#354](https://github.com/apache/datafusion-python/pull/354) (andygrove) - -**Merged pull requests:** - -- Add interface to serialize Substrait plans to Python Bytes. [#344](https://github.com/apache/datafusion-python/pull/344) (kylebrooks-8451) -- Add partition_count property to ExecutionPlan. [#346](https://github.com/apache/datafusion-python/pull/346) (kylebrooks-8451) -- Remove unsendable from all Rust pyclass types. [#348](https://github.com/apache/datafusion-python/pull/348) (kylebrooks-8451) -- Fix link to user guide [#354](https://github.com/apache/datafusion-python/pull/354) (andygrove) -- Fix SessionContext execute. [#353](https://github.com/apache/datafusion-python/pull/353) (kylebrooks-8451) -- Pub mod expr in lib.rs [#357](https://github.com/apache/datafusion-python/pull/357) (jdye64) -- Add benchmark derived from TPC-H [#355](https://github.com/apache/datafusion-python/pull/355) (andygrove) -- Add db-benchmark [#365](https://github.com/apache/datafusion-python/pull/365) (andygrove) -- First pass of documentation in mdBook [#364](https://github.com/apache/datafusion-python/pull/364) (MrPowers) -- Add 'pub' and '#[pyo3(get, set)]' to DataTypeMap [#371](https://github.com/apache/datafusion-python/pull/371) (jdye64) -- Fix db-benchmark [#369](https://github.com/apache/datafusion-python/pull/369) (andygrove) -- Docs explaining how to view query plans [#373](https://github.com/apache/datafusion-python/pull/373) (andygrove) -- Improve db-benchmark [#372](https://github.com/apache/datafusion-python/pull/372) (andygrove) -- Make expr member of PyExpr public [#375](https://github.com/apache/datafusion-python/pull/375) (jdye64) - -## [23.0.0](https://github.com/apache/datafusion-python/tree/23.0.0) (2023-04-23) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/22.0.0...23.0.0) - -**Merged pull requests:** - -- Improve API docs, README, and examples for configuring context [#321](https://github.com/apache/datafusion-python/pull/321) (andygrove) -- Osx build linker args [#330](https://github.com/apache/datafusion-python/pull/330) (jdye64) -- Add requirements file for python 3.11 [#332](https://github.com/apache/datafusion-python/pull/332) (r4ntix) -- mac arm64 build [#338](https://github.com/apache/datafusion-python/pull/338) (andygrove) -- Add conda.yaml baseline workflow file [#281](https://github.com/apache/datafusion-python/pull/281) (jdye64) -- Prepare for 23.0.0 release [#335](https://github.com/apache/datafusion-python/pull/335) (andygrove) -- Reuse the Tokio Runtime [#341](https://github.com/apache/datafusion-python/pull/341) (kylebrooks-8451) - -## [22.0.0](https://github.com/apache/datafusion-python/tree/22.0.0) (2023-04-10) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/21.0.0...22.0.0) - -**Merged pull requests:** - -- Fix invalid build yaml [#308](https://github.com/apache/datafusion-python/pull/308) (andygrove) -- Try fix release build [#309](https://github.com/apache/datafusion-python/pull/309) (andygrove) -- Fix release build [#310](https://github.com/apache/datafusion-python/pull/310) (andygrove) -- Enable datafusion-substrait protoc feature, to remove compile-time dependency on protoc [#312](https://github.com/apache/datafusion-python/pull/312) (andygrove) -- Fix Mac/Win release builds in CI [#313](https://github.com/apache/datafusion-python/pull/313) (andygrove) -- install protoc in docs workflow [#314](https://github.com/apache/datafusion-python/pull/314) (andygrove) -- Fix documentation generation in CI [#315](https://github.com/apache/datafusion-python/pull/315) (andygrove) -- Source wheel fix [#319](https://github.com/apache/datafusion-python/pull/319) (andygrove) - -## [21.0.0](https://github.com/apache/datafusion-python/tree/21.0.0) (2023-03-30) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/20.0.0...21.0.0) - -**Merged pull requests:** - -- minor: Fix minor warning on unused import [#289](https://github.com/apache/datafusion-python/pull/289) (viirya) -- feature: Implement `describe()` method [#293](https://github.com/apache/datafusion-python/pull/293) (simicd) -- fix: Printed results not visible in debugger & notebooks [#296](https://github.com/apache/datafusion-python/pull/296) (simicd) -- add package.include and remove wildcard dependency [#295](https://github.com/apache/datafusion-python/pull/295) (andygrove) -- Update main branch name in docs workflow [#303](https://github.com/apache/datafusion-python/pull/303) (andygrove) -- Upgrade to DF 21 [#301](https://github.com/apache/datafusion-python/pull/301) (andygrove) - -## [20.0.0](https://github.com/apache/datafusion-python/tree/20.0.0) (2023-03-17) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/0.8.0...20.0.0) - -**Implemented enhancements:** - -- Empty relation bindings [#208](https://github.com/apache/datafusion-python/pull/208) (jdye64) -- wrap display_name and canonical_name functions [#214](https://github.com/apache/datafusion-python/pull/214) (jdye64) -- Add PyAlias bindings [#216](https://github.com/apache/datafusion-python/pull/216) (jdye64) -- Add bindings for scalar_variable [#218](https://github.com/apache/datafusion-python/pull/218) (jdye64) -- Bindings for LIKE type expressions [#220](https://github.com/apache/datafusion-python/pull/220) (jdye64) -- Bool expr bindings [#223](https://github.com/apache/datafusion-python/pull/223) (jdye64) -- Between bindings [#229](https://github.com/apache/datafusion-python/pull/229) (jdye64) -- Add bindings for GetIndexedField [#227](https://github.com/apache/datafusion-python/pull/227) (jdye64) -- Add bindings for case, cast, and trycast [#232](https://github.com/apache/datafusion-python/pull/232) (jdye64) -- add remaining expr bindings [#233](https://github.com/apache/datafusion-python/pull/233) (jdye64) -- feature: Additional export methods [#236](https://github.com/apache/datafusion-python/pull/236) (simicd) -- Add Python wrapper for LogicalPlan::Union [#240](https://github.com/apache/datafusion-python/pull/240) (iajoiner) -- feature: Create dataframe from pandas, polars, dictionary, list or pyarrow Table [#242](https://github.com/apache/datafusion-python/pull/242) (simicd) -- Add Python wrappers for `LogicalPlan::Join` and `LogicalPlan::CrossJoin` [#246](https://github.com/apache/datafusion-python/pull/246) (iajoiner) -- feature: Set table name from ctx functions [#260](https://github.com/apache/datafusion-python/pull/260) (simicd) -- Explain bindings [#264](https://github.com/apache/datafusion-python/pull/264) (jdye64) -- Extension bindings [#266](https://github.com/apache/datafusion-python/pull/266) (jdye64) -- Subquery alias bindings [#269](https://github.com/apache/datafusion-python/pull/269) (jdye64) -- Create memory table [#271](https://github.com/apache/datafusion-python/pull/271) (jdye64) -- Create view bindings [#273](https://github.com/apache/datafusion-python/pull/273) (jdye64) -- Re-export Datafusion dependencies [#277](https://github.com/apache/datafusion-python/pull/277) (jdye64) -- Distinct bindings [#275](https://github.com/apache/datafusion-python/pull/275) (jdye64) -- Drop table bindings [#283](https://github.com/apache/datafusion-python/pull/283) (jdye64) -- Bindings for LogicalPlan::Repartition [#285](https://github.com/apache/datafusion-python/pull/285) (jdye64) -- Expand Rust return type support for Arrow DataTypes in ScalarValue [#287](https://github.com/apache/datafusion-python/pull/287) (jdye64) - -**Documentation updates:** - -- docs: Example of calling Python UDF & UDAF in SQL [#258](https://github.com/apache/datafusion-python/pull/258) (simicd) - -**Merged pull requests:** - -- Minor docs updates [#210](https://github.com/apache/datafusion-python/pull/210) (andygrove) -- Empty relation bindings [#208](https://github.com/apache/datafusion-python/pull/208) (jdye64) -- wrap display_name and canonical_name functions [#214](https://github.com/apache/datafusion-python/pull/214) (jdye64) -- Add PyAlias bindings [#216](https://github.com/apache/datafusion-python/pull/216) (jdye64) -- Add bindings for scalar_variable [#218](https://github.com/apache/datafusion-python/pull/218) (jdye64) -- Bindings for LIKE type expressions [#220](https://github.com/apache/datafusion-python/pull/220) (jdye64) -- Bool expr bindings [#223](https://github.com/apache/datafusion-python/pull/223) (jdye64) -- Between bindings [#229](https://github.com/apache/datafusion-python/pull/229) (jdye64) -- Add bindings for GetIndexedField [#227](https://github.com/apache/datafusion-python/pull/227) (jdye64) -- Add bindings for case, cast, and trycast [#232](https://github.com/apache/datafusion-python/pull/232) (jdye64) -- add remaining expr bindings [#233](https://github.com/apache/datafusion-python/pull/233) (jdye64) -- Pre-commit hooks [#228](https://github.com/apache/datafusion-python/pull/228) (jdye64) -- Implement new release process [#149](https://github.com/apache/datafusion-python/pull/149) (andygrove) -- feature: Additional export methods [#236](https://github.com/apache/datafusion-python/pull/236) (simicd) -- Add Python wrapper for LogicalPlan::Union [#240](https://github.com/apache/datafusion-python/pull/240) (iajoiner) -- feature: Create dataframe from pandas, polars, dictionary, list or pyarrow Table [#242](https://github.com/apache/datafusion-python/pull/242) (simicd) -- Fix release instructions [#238](https://github.com/apache/datafusion-python/pull/238) (andygrove) -- Add Python wrappers for `LogicalPlan::Join` and `LogicalPlan::CrossJoin` [#246](https://github.com/apache/datafusion-python/pull/246) (iajoiner) -- docs: Example of calling Python UDF & UDAF in SQL [#258](https://github.com/apache/datafusion-python/pull/258) (simicd) -- feature: Set table name from ctx functions [#260](https://github.com/apache/datafusion-python/pull/260) (simicd) -- Upgrade to DataFusion 19 [#262](https://github.com/apache/datafusion-python/pull/262) (andygrove) -- Explain bindings [#264](https://github.com/apache/datafusion-python/pull/264) (jdye64) -- Extension bindings [#266](https://github.com/apache/datafusion-python/pull/266) (jdye64) -- Subquery alias bindings [#269](https://github.com/apache/datafusion-python/pull/269) (jdye64) -- Create memory table [#271](https://github.com/apache/datafusion-python/pull/271) (jdye64) -- Create view bindings [#273](https://github.com/apache/datafusion-python/pull/273) (jdye64) -- Re-export Datafusion dependencies [#277](https://github.com/apache/datafusion-python/pull/277) (jdye64) -- Distinct bindings [#275](https://github.com/apache/datafusion-python/pull/275) (jdye64) -- build(deps): bump actions/checkout from 2 to 3 [#244](https://github.com/apache/datafusion-python/pull/244) (dependabot[bot]) -- build(deps): bump actions/upload-artifact from 2 to 3 [#245](https://github.com/apache/datafusion-python/pull/245) (dependabot[bot]) -- build(deps): bump actions/download-artifact from 2 to 3 [#243](https://github.com/apache/datafusion-python/pull/243) (dependabot[bot]) -- Use DataFusion 20 [#278](https://github.com/apache/datafusion-python/pull/278) (andygrove) -- Drop table bindings [#283](https://github.com/apache/datafusion-python/pull/283) (jdye64) -- Bindings for LogicalPlan::Repartition [#285](https://github.com/apache/datafusion-python/pull/285) (jdye64) -- Expand Rust return type support for Arrow DataTypes in ScalarValue [#287](https://github.com/apache/datafusion-python/pull/287) (jdye64) - -## [0.8.0](https://github.com/apache/datafusion-python/tree/0.8.0) (2023-02-22) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/0.8.0-rc1...0.8.0) - -**Implemented enhancements:** - -- Add support for cuDF physical execution engine [\#202](https://github.com/apache/datafusion-python/issues/202) -- Make it easier to create a Pandas dataframe from DataFusion query results [\#139](https://github.com/apache/datafusion-python/issues/139) - -**Fixed bugs:** - -- Build error: could not compile `thiserror` due to 2 previous errors [\#69](https://github.com/apache/datafusion-python/issues/69) - -**Closed issues:** - -- Integrate with the new `object_store` crate [\#22](https://github.com/apache/datafusion-python/issues/22) - -**Merged pull requests:** - -- Update README in preparation for 0.8 release [\#206](https://github.com/apache/datafusion-python/pull/206) ([andygrove](https://github.com/andygrove)) -- Add support for cudf as a physical execution engine [\#205](https://github.com/apache/datafusion-python/pull/205) ([jdye64](https://github.com/jdye64)) -- Run `maturin develop` instead of `cargo build` in verification script [\#200](https://github.com/apache/datafusion-python/pull/200) ([andygrove](https://github.com/andygrove)) -- Add tests for recently added functionality [\#199](https://github.com/apache/datafusion-python/pull/199) ([andygrove](https://github.com/andygrove)) -- Implement `to_pandas()` [\#197](https://github.com/apache/datafusion-python/pull/197) ([simicd](https://github.com/simicd)) -- Add Python wrapper for LogicalPlan::Sort [\#196](https://github.com/apache/datafusion-python/pull/196) ([andygrove](https://github.com/andygrove)) -- Add Python wrapper for LogicalPlan::Aggregate [\#195](https://github.com/apache/datafusion-python/pull/195) ([andygrove](https://github.com/andygrove)) -- Add Python wrapper for LogicalPlan::Limit [\#193](https://github.com/apache/datafusion-python/pull/193) ([andygrove](https://github.com/andygrove)) -- Add Python wrapper for LogicalPlan::Filter [\#192](https://github.com/apache/datafusion-python/pull/192) ([andygrove](https://github.com/andygrove)) -- Add experimental support for executing SQL with Polars and Pandas [\#190](https://github.com/apache/datafusion-python/pull/190) ([andygrove](https://github.com/andygrove)) -- Update changelog for 0.8 release [\#188](https://github.com/apache/datafusion-python/pull/188) ([andygrove](https://github.com/andygrove)) -- Add ability to execute ExecutionPlan and get a stream of RecordBatch [\#186](https://github.com/apache/datafusion-python/pull/186) ([andygrove](https://github.com/andygrove)) -- Dffield bindings [\#185](https://github.com/apache/datafusion-python/pull/185) ([jdye64](https://github.com/jdye64)) -- Add bindings for DFSchema [\#183](https://github.com/apache/datafusion-python/pull/183) ([jdye64](https://github.com/jdye64)) -- test: Window functions [\#182](https://github.com/apache/datafusion-python/pull/182) ([simicd](https://github.com/simicd)) -- Add bindings for Projection [\#180](https://github.com/apache/datafusion-python/pull/180) ([jdye64](https://github.com/jdye64)) -- Table scan bindings [\#178](https://github.com/apache/datafusion-python/pull/178) ([jdye64](https://github.com/jdye64)) -- Make session configurable [\#176](https://github.com/apache/datafusion-python/pull/176) ([andygrove](https://github.com/andygrove)) -- Upgrade to DataFusion 18.0.0 [\#175](https://github.com/apache/datafusion-python/pull/175) ([andygrove](https://github.com/andygrove)) -- Use latest DataFusion rev in preparation for DF 18 release [\#174](https://github.com/apache/datafusion-python/pull/174) ([andygrove](https://github.com/andygrove)) -- Arrow type bindings [\#173](https://github.com/apache/datafusion-python/pull/173) ([jdye64](https://github.com/jdye64)) -- Pyo3 bump [\#171](https://github.com/apache/datafusion-python/pull/171) ([jdye64](https://github.com/jdye64)) -- feature: Add additional aggregation functions [\#170](https://github.com/apache/datafusion-python/pull/170) ([simicd](https://github.com/simicd)) -- Make from_substrait_plan return DataFrame instead of LogicalPlan [\#164](https://github.com/apache/datafusion-python/pull/164) ([andygrove](https://github.com/andygrove)) -- feature: Implement count method [\#163](https://github.com/apache/datafusion-python/pull/163) ([simicd](https://github.com/simicd)) -- CI Fixes [\#162](https://github.com/apache/datafusion-python/pull/162) ([jdye64](https://github.com/jdye64)) -- Upgrade to DataFusion 17 [\#160](https://github.com/apache/datafusion-python/pull/160) ([andygrove](https://github.com/andygrove)) -- feature: Improve string representation of datafusion classes [\#159](https://github.com/apache/datafusion-python/pull/159) ([simicd](https://github.com/simicd)) -- Make PyExecutionPlan.plan public [\#156](https://github.com/apache/datafusion-python/pull/156) ([andygrove](https://github.com/andygrove)) -- Expose methods on logical and execution plans [\#155](https://github.com/apache/datafusion-python/pull/155) ([andygrove](https://github.com/andygrove)) -- Fix clippy for new Rust version [\#154](https://github.com/apache/datafusion-python/pull/154) ([andygrove](https://github.com/andygrove)) -- Add DataFrame methods for accessing plans [\#153](https://github.com/apache/datafusion-python/pull/153) ([andygrove](https://github.com/andygrove)) -- Use DataFusion rev 5238e8c97f998b4d2cb9fab85fb182f325a1a7fb [\#150](https://github.com/apache/datafusion-python/pull/150) ([andygrove](https://github.com/andygrove)) -- build\(deps\): bump async-trait from 0.1.61 to 0.1.62 [\#148](https://github.com/apache/datafusion-python/pull/148) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Rename default branch from master to main [\#147](https://github.com/apache/datafusion-python/pull/147) ([andygrove](https://github.com/andygrove)) -- Substrait bindings [\#145](https://github.com/apache/datafusion-python/pull/145) ([jdye64](https://github.com/jdye64)) -- build\(deps\): bump uuid from 0.8.2 to 1.2.2 [\#143](https://github.com/apache/datafusion-python/pull/143) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Prepare for 0.8.0 release [\#141](https://github.com/apache/datafusion-python/pull/141) ([andygrove](https://github.com/andygrove)) -- Improve README and add more examples [\#137](https://github.com/apache/datafusion-python/pull/137) ([andygrove](https://github.com/andygrove)) -- test: Expand tests for built-in functions [\#129](https://github.com/apache/datafusion-python/pull/129) ([simicd](https://github.com/simicd)) -- build\(deps\): bump object_store from 0.5.2 to 0.5.3 [\#126](https://github.com/apache/datafusion-python/pull/126) ([dependabot[bot]](https://github.com/apps/dependabot)) -- build\(deps\): bump mimalloc from 0.1.32 to 0.1.34 [\#125](https://github.com/apache/datafusion-python/pull/125) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Introduce conda directory containing datafusion-dev.yaml conda enviro… [\#124](https://github.com/apache/datafusion-python/pull/124) ([jdye64](https://github.com/jdye64)) -- build\(deps\): bump bzip2 from 0.4.3 to 0.4.4 [\#121](https://github.com/apache/datafusion-python/pull/121) ([dependabot[bot]](https://github.com/apps/dependabot)) -- build\(deps\): bump tokio from 1.23.0 to 1.24.1 [\#119](https://github.com/apache/datafusion-python/pull/119) ([dependabot[bot]](https://github.com/apps/dependabot)) -- build\(deps\): bump async-trait from 0.1.60 to 0.1.61 [\#118](https://github.com/apache/datafusion-python/pull/118) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Upgrade to DataFusion 16.0.0 [\#115](https://github.com/apache/datafusion-python/pull/115) ([andygrove](https://github.com/andygrove)) -- Bump async-trait from 0.1.57 to 0.1.60 [\#114](https://github.com/apache/datafusion-python/pull/114) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump object_store from 0.5.1 to 0.5.2 [\#112](https://github.com/apache/datafusion-python/pull/112) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Bump tokio from 1.21.2 to 1.23.0 [\#109](https://github.com/apache/datafusion-python/pull/109) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Add entries for publishing production \(asf-site\) and staging docs [\#107](https://github.com/apache/datafusion-python/pull/107) ([martin-g](https://github.com/martin-g)) -- Add a workflow that builds the docs and deploys them at staged or production [\#104](https://github.com/apache/datafusion-python/pull/104) ([martin-g](https://github.com/martin-g)) -- Upgrade to DataFusion 15.0.0 [\#103](https://github.com/apache/datafusion-python/pull/103) ([andygrove](https://github.com/andygrove)) -- build\(deps\): bump futures from 0.3.24 to 0.3.25 [\#102](https://github.com/apache/datafusion-python/pull/102) ([dependabot[bot]](https://github.com/apps/dependabot)) -- build\(deps\): bump pyo3 from 0.17.2 to 0.17.3 [\#101](https://github.com/apache/datafusion-python/pull/101) ([dependabot[bot]](https://github.com/apps/dependabot)) -- build\(deps\): bump mimalloc from 0.1.30 to 0.1.32 [\#98](https://github.com/apache/datafusion-python/pull/98) ([dependabot[bot]](https://github.com/apps/dependabot)) -- build\(deps\): bump rand from 0.7.3 to 0.8.5 [\#97](https://github.com/apache/datafusion-python/pull/97) ([dependabot[bot]](https://github.com/apps/dependabot)) -- Fix GitHub actions warnings [\#95](https://github.com/apache/datafusion-python/pull/95) ([martin-g](https://github.com/martin-g)) -- Fixes \#81 - Add CI workflow for source distribution [\#93](https://github.com/apache/datafusion-python/pull/93) ([martin-g](https://github.com/martin-g)) -- post-release updates [\#91](https://github.com/apache/datafusion-python/pull/91) ([andygrove](https://github.com/andygrove)) -- Build for manylinux 2014 [\#88](https://github.com/apache/datafusion-python/pull/88) ([martin-g](https://github.com/martin-g)) -- update release readme tag [\#86](https://github.com/apache/datafusion-python/pull/86) ([Jimexist](https://github.com/Jimexist)) -- Upgrade Maturin to 0.14.2 [\#85](https://github.com/apache/datafusion-python/pull/85) ([martin-g](https://github.com/martin-g)) -- Update release instructions [\#83](https://github.com/apache/datafusion-python/pull/83) ([andygrove](https://github.com/andygrove)) -- \[Functions\] - Add python function binding to `functions` [\#73](https://github.com/apache/datafusion-python/pull/73) ([francis-du](https://github.com/francis-du)) - -## [0.8.0-rc1](https://github.com/apache/datafusion-python/tree/0.8.0-rc1) (2023-02-17) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/0.7.0-rc2...0.8.0-rc1) - -**Implemented enhancements:** - -- Add bindings for datafusion_common::DFField [\#184](https://github.com/apache/datafusion-python/issues/184) -- Add bindings for DFSchema/DFSchemaRef [\#181](https://github.com/apache/datafusion-python/issues/181) -- Add bindings for datafusion_expr Projection [\#179](https://github.com/apache/datafusion-python/issues/179) -- Add bindings for `TableScan` struct from `datafusion_expr::TableScan` [\#177](https://github.com/apache/datafusion-python/issues/177) -- Add a "mapping" struct for types [\#172](https://github.com/apache/datafusion-python/issues/172) -- Improve string representation of datafusion classes \(dataframe, context, expression, ...\) [\#158](https://github.com/apache/datafusion-python/issues/158) -- Add DataFrame count method [\#151](https://github.com/apache/datafusion-python/issues/151) -- \[REQUEST\] Github Actions Improvements [\#146](https://github.com/apache/datafusion-python/issues/146) -- Change default branch name from master to main [\#144](https://github.com/apache/datafusion-python/issues/144) -- Bump pyo3 to 0.18.0 [\#140](https://github.com/apache/datafusion-python/issues/140) -- Add script for Python linting [\#134](https://github.com/apache/datafusion-python/issues/134) -- Add Python bindings for substrait module [\#132](https://github.com/apache/datafusion-python/issues/132) -- Expand unit tests for built-in functions [\#128](https://github.com/apache/datafusion-python/issues/128) -- support creating arrow-datafusion-python conda environment [\#122](https://github.com/apache/datafusion-python/issues/122) -- Build Python source distribution in GitHub workflow [\#81](https://github.com/apache/datafusion-python/issues/81) -- EPIC: Add all functions to python binding `functions` [\#72](https://github.com/apache/datafusion-python/issues/72) - -**Fixed bugs:** - -- Build is broken [\#161](https://github.com/apache/datafusion-python/issues/161) -- Out of memory when sorting [\#157](https://github.com/apache/datafusion-python/issues/157) -- window_lead test appears to be non-deterministic [\#135](https://github.com/apache/datafusion-python/issues/135) -- Reading csv does not work [\#130](https://github.com/apache/datafusion-python/issues/130) -- Github actions produce a lot of warnings [\#94](https://github.com/apache/datafusion-python/issues/94) -- ASF source release tarball has wrong directory name [\#90](https://github.com/apache/datafusion-python/issues/90) -- Python Release Build failing after upgrading to maturin 14.2 [\#87](https://github.com/apache/datafusion-python/issues/87) -- Maturin build hangs on Linux ARM64 [\#84](https://github.com/apache/datafusion-python/issues/84) -- Cannot install on Mac M1 from source tarball from testpypi [\#82](https://github.com/apache/datafusion-python/issues/82) -- ImportPathMismatchError when running pytest locally [\#77](https://github.com/apache/datafusion-python/issues/77) - -**Closed issues:** - -- Publish documentation for Python bindings [\#39](https://github.com/apache/datafusion-python/issues/39) -- Add Python binding for `approx_median` [\#32](https://github.com/apache/datafusion-python/issues/32) -- Release version 0.7.0 [\#7](https://github.com/apache/datafusion-python/issues/7) - -## [0.7.0-rc2](https://github.com/apache/datafusion-python/tree/0.7.0-rc2) (2022-11-26) - -[Full Changelog](https://github.com/apache/datafusion-python/compare/0.7.0...0.7.0-rc2) - -## [Unreleased](https://github.com/datafusion-contrib/datafusion-python/tree/HEAD) - -[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.1...HEAD) - -**Merged pull requests:** - -- use \_\_getitem\_\_ for df column selection [\#41](https://github.com/datafusion-contrib/datafusion-python/pull/41) ([Jimexist](https://github.com/Jimexist)) -- fix demo in readme [\#40](https://github.com/datafusion-contrib/datafusion-python/pull/40) ([Jimexist](https://github.com/Jimexist)) -- Implement select_columns [\#39](https://github.com/datafusion-contrib/datafusion-python/pull/39) ([andygrove](https://github.com/andygrove)) -- update readme and changelog [\#38](https://github.com/datafusion-contrib/datafusion-python/pull/38) ([Jimexist](https://github.com/Jimexist)) -- Add PyDataFrame.explain [\#36](https://github.com/datafusion-contrib/datafusion-python/pull/36) ([andygrove](https://github.com/andygrove)) -- Release 0.5.0 [\#34](https://github.com/datafusion-contrib/datafusion-python/pull/34) ([Jimexist](https://github.com/Jimexist)) -- disable nightly in workflow [\#33](https://github.com/datafusion-contrib/datafusion-python/pull/33) ([Jimexist](https://github.com/Jimexist)) -- update requirements to 37 and 310, update readme [\#32](https://github.com/datafusion-contrib/datafusion-python/pull/32) ([Jimexist](https://github.com/Jimexist)) -- Add custom global allocator [\#30](https://github.com/datafusion-contrib/datafusion-python/pull/30) ([matthewmturner](https://github.com/matthewmturner)) -- Remove pandas dependency [\#25](https://github.com/datafusion-contrib/datafusion-python/pull/25) ([matthewmturner](https://github.com/matthewmturner)) -- upgrade datafusion and pyo3 [\#20](https://github.com/datafusion-contrib/datafusion-python/pull/20) ([Jimexist](https://github.com/Jimexist)) -- update maturin 0.12+ [\#17](https://github.com/datafusion-contrib/datafusion-python/pull/17) ([Jimexist](https://github.com/Jimexist)) -- Update README.md [\#16](https://github.com/datafusion-contrib/datafusion-python/pull/16) ([Jimexist](https://github.com/Jimexist)) -- apply cargo clippy --fix [\#15](https://github.com/datafusion-contrib/datafusion-python/pull/15) ([Jimexist](https://github.com/Jimexist)) -- update test workflow to include rust clippy and check [\#14](https://github.com/datafusion-contrib/datafusion-python/pull/14) ([Jimexist](https://github.com/Jimexist)) -- use maturin 0.12.6 [\#13](https://github.com/datafusion-contrib/datafusion-python/pull/13) ([Jimexist](https://github.com/Jimexist)) -- apply cargo fmt [\#12](https://github.com/datafusion-contrib/datafusion-python/pull/12) ([Jimexist](https://github.com/Jimexist)) -- use stable not nightly [\#11](https://github.com/datafusion-contrib/datafusion-python/pull/11) ([Jimexist](https://github.com/Jimexist)) -- ci: test against more compilers, setup clippy and fix clippy lints [\#9](https://github.com/datafusion-contrib/datafusion-python/pull/9) ([cpcloud](https://github.com/cpcloud)) -- Fix use of importlib.metadata and unify requirements.txt [\#8](https://github.com/datafusion-contrib/datafusion-python/pull/8) ([cpcloud](https://github.com/cpcloud)) -- Ship the Cargo.lock file in the source distribution [\#7](https://github.com/datafusion-contrib/datafusion-python/pull/7) ([cpcloud](https://github.com/cpcloud)) -- add \_\_version\_\_ attribute to datafusion object [\#3](https://github.com/datafusion-contrib/datafusion-python/pull/3) ([tfeda](https://github.com/tfeda)) -- fix ci by fixing directories [\#2](https://github.com/datafusion-contrib/datafusion-python/pull/2) ([Jimexist](https://github.com/Jimexist)) -- setup workflow [\#1](https://github.com/datafusion-contrib/datafusion-python/pull/1) ([Jimexist](https://github.com/Jimexist)) - -## [0.5.1](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.1) (2022-03-15) - -[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.1-rc1...0.5.1) - -## [0.5.1-rc1](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.1-rc1) (2022-03-15) - -[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.0...0.5.1-rc1) - -## [0.5.0](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.0) (2022-03-10) - -[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.0-rc2...0.5.0) - -## [0.5.0-rc2](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.0-rc2) (2022-03-10) - -[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.0-rc1...0.5.0-rc2) - -**Closed issues:** - -- Add support for Ballista [\#37](https://github.com/datafusion-contrib/datafusion-python/issues/37) -- Implement DataFrame.explain [\#35](https://github.com/datafusion-contrib/datafusion-python/issues/35) - -## [0.5.0-rc1](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.0-rc1) (2022-03-09) - -[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/4c98b8e9c3c3f8e2e6a8f2d1ffcfefda344c4680...0.5.0-rc1) - -**Closed issues:** - -- Investigate exposing additional optimizations [\#28](https://github.com/datafusion-contrib/datafusion-python/issues/28) -- Use custom allocator in Python build [\#27](https://github.com/datafusion-contrib/datafusion-python/issues/27) -- Why is pandas a requirement? [\#24](https://github.com/datafusion-contrib/datafusion-python/issues/24) -- Unable to build [\#18](https://github.com/datafusion-contrib/datafusion-python/issues/18) -- Setup CI against multiple Python version [\#6](https://github.com/datafusion-contrib/datafusion-python/issues/6) +The changelogs have now moved [here](./dev/changelog). diff --git a/Cargo.lock b/Cargo.lock index 3c282e071..40b1ba7f1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,37 +1,70 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] -name = "addr2line" -version = "0.22.0" +name = "abi_stable" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e4503c46a5c0c7844e948c9a4d6acd9f50cccb4de1c48eb9e291ea17470c678" +checksum = "69d6512d3eb05ffe5004c59c206de7f99c34951504056ce23fc953842f12c445" dependencies = [ - "gimli", + "abi_stable_derive", + "abi_stable_shared", + "const_panic", + "core_extensions", + "crossbeam-channel", + "generational-arena", + "libloading", + "lock_api", + "parking_lot", + "paste", + "repr_offset", + "rustc_version", + "serde", + "serde_derive", + "serde_json", ] [[package]] -name = "adler" -version = "1.0.2" +name = "abi_stable_derive" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7178468b407a4ee10e881bc7a328a65e739f0863615cca4429d43916b05e898" +dependencies = [ + "abi_stable_shared", + "as_derive_utils", + "core_extensions", + "proc-macro2", + "quote", + "rustc_version", + "syn 1.0.109", + "typed-arena", +] + +[[package]] +name = "abi_stable_shared" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" +checksum = "b2b5df7688c123e63f4d4d649cba63f2967ba7f7861b1664fca3f77d3dad2b63" +dependencies = [ + "core_extensions", +] [[package]] -name = "adler32" -version = "1.2.0" +name = "adler2" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "ahash" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", "const-random", - "getrandom", + "getrandom 0.3.4", "once_cell", "version_check", "zerocopy", @@ -39,9 +72,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] @@ -63,15 +96,9 @@ dependencies = [ [[package]] name = "allocator-api2" -version = "0.2.18" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c6cb57a04249c6480766f7f7cef5467412af1490f8d1e243141daddada3264f" - -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "android_system_properties" @@ -84,55 +111,74 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.86" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d1d046238990b9cf5bcde22a3fb3584ee5cf65fb2765f454ed428c7a0063da" +checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" [[package]] name = "apache-avro" -version = "0.16.0" +version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ceb7c683b2f8f40970b70e39ff8be514c95b96fcb9c4af87e1ed2cb2e10801a0" +checksum = "36fa98bc79671c7981272d91a8753a928ff6a1cd8e4f20a44c45bd5d313840bf" dependencies = [ + "bigdecimal", + "bon", "bzip2", "crc32fast", "digest", - "lazy_static", - "libflate", + "liblzma", "log", + "miniz_oxide", "num-bigint", "quad-rand", "rand", "regex-lite", "serde", + "serde_bytes", "serde_json", "snap", - "strum 0.25.0", - "strum_macros 0.25.3", + "strum", + "strum_macros", "thiserror", - "typed-builder", "uuid", - "xz2", - "zstd 0.12.4", + "zstd", +] + +[[package]] +name = "ar_archive_writer" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7eb93bbb63b9c227414f6eb3a0adfddca591a8ce1e9b60661bb08969b87e340b" +dependencies = [ + "object", +] + +[[package]] +name = "arc-swap" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" +dependencies = [ + "rustversion", ] [[package]] name = "arrayref" -version = "0.3.7" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b4930d2cb77ce62f89ee5d5289b4ac049559b1c45539271f5ed4fdc7db34545" +checksum = "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" [[package]] name = "arrayvec" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" +checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "arrow" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ae9728f104939be6d8d9b368a354b4929b0569160ea1641f0721b55a861ce38" +checksum = "602268ce9f569f282cedb9a9f6bac569b680af47b9b077d515900c03c5d190da" dependencies = [ "arrow-arith", "arrow-array", @@ -143,33 +189,32 @@ dependencies = [ "arrow-ipc", "arrow-json", "arrow-ord", + "arrow-pyarrow", "arrow-row", "arrow-schema", "arrow-select", "arrow-string", - "pyo3", ] [[package]] name = "arrow-arith" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7029a5b3efbeafbf4a12d12dc16b8f9e9bff20a410b8c25c5d28acc089e1043" +checksum = "cd53c6bf277dea91f136ae8e3a5d7041b44b5e489e244e637d00ae302051f56f" dependencies = [ "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", "chrono", - "half", - "num", + "num-traits", ] [[package]] name = "arrow-array" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d33238427c60271710695f17742f45b1a5dc5bcfc5c15331c25ddfe7abf70d97" +checksum = "e53796e07a6525edaf7dc28b540d477a934aff14af97967ad1d5550878969b9e" dependencies = [ "ahash", "arrow-buffer", @@ -178,93 +223,95 @@ dependencies = [ "chrono", "chrono-tz", "half", - "hashbrown", - "num", + "hashbrown 0.16.1", + "num-complex", + "num-integer", + "num-traits", ] [[package]] name = "arrow-buffer" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe9b95e825ae838efaf77e366c00d3fc8cca78134c9db497d6bda425f2e7b7c1" +checksum = "f2c1a85bb2e94ee10b76531d8bc3ce9b7b4c0d508cabfb17d477f63f2617bd20" dependencies = [ "bytes", "half", - "num", + "num-bigint", + "num-traits", ] [[package]] name = "arrow-cast" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cf8385a9d5b5fcde771661dd07652b79b9139fea66193eda6a88664400ccab" +checksum = "89fb245db6b0e234ed8e15b644edb8664673fefe630575e94e62cd9d489a8a26" dependencies = [ "arrow-array", "arrow-buffer", "arrow-data", + "arrow-ord", "arrow-schema", "arrow-select", "atoi", - "base64 0.22.1", + "base64", "chrono", "comfy-table", "half", "lexical-core", - "num", + "num-traits", "ryu", ] [[package]] name = "arrow-csv" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea5068bef430a86690059665e40034625ec323ffa4dd21972048eebb0127adc" +checksum = "d374882fb465a194462527c0c15a93aa19a554cf690a6b77a26b2a02539937a7" dependencies = [ "arrow-array", - "arrow-buffer", "arrow-cast", - "arrow-data", "arrow-schema", "chrono", "csv", "csv-core", - "lazy_static", - "lexical-core", "regex", ] [[package]] name = "arrow-data" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb29be98f987bcf217b070512bb7afba2f65180858bca462edf4a39d84a23e10" +checksum = "189d210bc4244c715fa3ed9e6e22864673cccb73d5da28c2723fb2e527329b33" dependencies = [ "arrow-buffer", "arrow-schema", "half", - "num", + "num-integer", + "num-traits", ] [[package]] name = "arrow-ipc" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffc68f6523970aa6f7ce1dc9a33a7d9284cfb9af77d4ad3e617dbe5d79cc6ec8" +checksum = "7968c2e5210c41f4909b2ef76f6e05e172b99021c2def5edf3cc48fdd39d1d6c" dependencies = [ "arrow-array", "arrow-buffer", - "arrow-cast", "arrow-data", "arrow-schema", + "arrow-select", "flatbuffers", "lz4_flex", + "zstd", ] [[package]] name = "arrow-json" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2041380f94bd6437ab648e6c2085a045e45a0c44f91a1b9a4fe3fed3d379bfb1" +checksum = "92111dba5bf900f443488e01f00d8c4ddc2f47f5c50039d18120287b580baa22" dependencies = [ "arrow-array", "arrow-buffer", @@ -274,70 +321,84 @@ dependencies = [ "chrono", "half", "indexmap", + "itoa", "lexical-core", - "num", - "serde", + "memchr", + "num-traits", + "ryu", + "serde_core", "serde_json", + "simdutf8", ] [[package]] name = "arrow-ord" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fcb56ed1547004e12203652f12fe12e824161ff9d1e5cf2a7dc4ff02ba94f413" +checksum = "211136cb253577ee1a6665f741a13136d4e563f64f5093ffd6fb837af90b9495" dependencies = [ "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", "arrow-select", - "half", - "num", +] + +[[package]] +name = "arrow-pyarrow" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "205437da4c0877c756c81bfe847a621d0a740cd00a155109d65510a1a62ebcd9" +dependencies = [ + "arrow-array", + "arrow-data", + "arrow-schema", + "pyo3", ] [[package]] name = "arrow-row" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "575b42f1fc588f2da6977b94a5ca565459f5ab07b60545e17243fb9a7ed6d43e" +checksum = "8e0f20145f9f5ea3fe383e2ba7a7487bf19be36aa9dbf5dd6a1f92f657179663" dependencies = [ - "ahash", "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", "half", - "hashbrown", ] [[package]] name = "arrow-schema" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32aae6a60458a2389c0da89c9de0b7932427776127da1a738e2efc21d32f3393" +checksum = "1b47e0ca91cc438d2c7879fe95e0bca5329fff28649e30a88c6f760b1faeddcb" dependencies = [ - "bitflags 2.5.0", + "bitflags", + "serde_core", + "serde_json", ] [[package]] name = "arrow-select" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de36abaef8767b4220d7b4a8c2fe5ffc78b47db81b03d77e2136091c3ba39102" +checksum = "750a7d1dda177735f5e82a314485b6915c7cccdbb278262ac44090f4aba4a325" dependencies = [ "ahash", "arrow-array", "arrow-buffer", "arrow-data", "arrow-schema", - "num", + "num-traits", ] [[package]] name = "arrow-string" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e435ada8409bcafc910bc3e0077f532a4daa20e99060a496685c0e3e53cc2597" +checksum = "e1eab1208bc4fe55d768cdc9b9f3d9df5a794cdb3ee2586bf89f9b30dc31ad8c" dependencies = [ "arrow-array", "arrow-buffer", @@ -345,27 +406,42 @@ dependencies = [ "arrow-schema", "arrow-select", "memchr", - "num", + "num-traits", "regex", "regex-syntax", ] +[[package]] +name = "as_derive_utils" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff3c96645900a44cf11941c111bd08a6573b0e2f9f69bc9264b179d8fae753c4" +dependencies = [ + "core_extensions", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "async-compression" -version = "0.4.11" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd066d0b4ef8ecb03a55319dc13aa6910616d0f44008a045bb1835af830abff5" +checksum = "7d67d43201f4d20c78bcda740c142ca52482d81da80681533d33bf3f0596c8e2" dependencies = [ - "bzip2", - "flate2", - "futures-core", - "futures-io", - "memchr", + "compression-codecs", + "compression-core", "pin-project-lite", "tokio", - "xz2", - "zstd 0.13.0", - "zstd-safe 7.0.0", +] + +[[package]] +name = "async-ffi" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4de21c0feef7e5a556e51af767c953f0501f7f300ba785cc99c47bdc8081a50" +dependencies = [ + "abi_stable", ] [[package]] @@ -376,18 +452,18 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "async-trait" -version = "0.1.80" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6fa2087f2753a7da8cc1c0dbfcf89579dd57458e36769de5ac750b4671737ca" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] @@ -407,30 +483,9 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c4b4d0bd25bd0b74681c0ad21497610ce1b7c91b1022cd21c80c6fbdd9476b0" - -[[package]] -name = "backtrace" -version = "0.3.72" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17c6a35df3749d2e8bb1b7b21a976d82b15548788d2735b9d82f329268f71a11" -dependencies = [ - "addr2line", - "cc", - "cfg-if", - "libc", - "miniz_oxide", - "object", - "rustc-demangle", -] - -[[package]] -name = "base64" -version = "0.21.7" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d297deb1925b89f2ccc13d7635fa0714f12c87adce1c75356b39ca9b7178567" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "base64" @@ -439,16 +494,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] -name = "bitflags" -version = "1.3.2" +name = "bigdecimal" +version = "0.4.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" +checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695" +dependencies = [ + "autocfg", + "libm", + "num-bigint", + "num-integer", + "num-traits", + "serde", +] [[package]] name = "bitflags" -version = "2.5.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" [[package]] name = "blake2" @@ -461,15 +524,16 @@ dependencies = [ [[package]] name = "blake3" -version = "1.5.1" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30cca6d3674597c30ddf2c587bf8d9d65c9a84d2326d941cc79c9842dfe0ef52" +checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", "constant_time_eq", + "cpufeatures", ] [[package]] @@ -481,11 +545,36 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bon" +version = "3.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d13a61f2963b88eef9c1be03df65d42f6996dfeac1054870d950fcf66686f83" +dependencies = [ + "bon-macros", + "rustversion", +] + +[[package]] +name = "bon-macros" +version = "3.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d314cc62af2b6b0c65780555abb4d02a03dd3b799cd42419044f0c38d99738c0" +dependencies = [ + "darling", + "ident_case", + "prettyplease", + "proc-macro2", + "quote", + "rustversion", + "syn 2.0.117", +] + [[package]] name = "brotli" -version = "6.0.0" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f7971dbd9326d58187408ab83117d8ac1bb9c17b085fdacd1cf2f598719b6b" +checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -494,9 +583,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.1" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a45bd2e4095a8b518033b128020dd4a55aab1c0a381ba4404a472630f4bc362" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -504,9 +593,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.16.0" +version = "3.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79296716171880943b8470b5f8d03aa55eb2e645a4874bdbb28adb49162e012c" +checksum = "5c6f81257d10a0f602a294ae4182251151ff97dbb504ef9afcdda4a64b24d9b4" [[package]] name = "byteorder" @@ -516,103 +605,105 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.6.0" +version = "1.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" [[package]] name = "bzip2" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bdb116a6ef3f6c3698828873ad02c3014b3c85cadb88496095628e3ef1e347f8" -dependencies = [ - "bzip2-sys", - "libc", -] - -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +checksum = "f3a53fac24f34a81bc9954b5d6cfce0c21e18ec6959f44f56e8e90e4bb7c346c" dependencies = [ - "cc", - "libc", - "pkg-config", + "libbz2-rs-sys", ] [[package]] name = "cc" -version = "1.0.98" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41c270e7540d725e65ac7f1b212ac8ce349719624d7bcff99f8e2e488e8cf03f" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ + "find-msvc-tools", "jobserver", "libc", - "once_cell", + "shlex", ] [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "cfg_aliases" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" [[package]] name = "chrono" -version = "0.4.38" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a21f936df1771bf62b77f047b726c4625ff2e8aa607c01ec06e5a05bd8463401" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" dependencies = [ - "android-tzdata", "iana-time-zone", "num-traits", "serde", - "windows-targets 0.52.5", + "windows-link", ] [[package]] name = "chrono-tz" -version = "0.9.0" +version = "0.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93698b29de5e97ad0ae26447b344c482a7284c737d9ddc5f9e52b74a336671bb" +checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3" dependencies = [ "chrono", - "chrono-tz-build", "phf", ] [[package]] -name = "chrono-tz-build" -version = "0.3.0" +name = "cmake" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c088aee841df9c3041febbb73934cfc39708749bf96dc827e3359cd39ef11b1" +checksum = "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" dependencies = [ - "parse-zoneinfo", - "phf", - "phf_codegen", + "cc", ] [[package]] -name = "cmake" -version = "0.1.50" +name = "comfy-table" +version = "7.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31c789563b815f77f4250caee12365734369f942439b7defd71e18a48197130" +checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47" dependencies = [ - "cc", + "unicode-segmentation", + "unicode-width", ] [[package]] -name = "comfy-table" -version = "7.1.1" +name = "compression-codecs" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b34115915337defe99b2aff5c2ce6771e5fbc4079f4b506301f5cf394c8452f7" +checksum = "eb7b51a7d9c967fc26773061ba86150f19c50c0d65c887cb1fbe295fd16619b7" dependencies = [ - "strum 0.26.2", - "strum_macros 0.26.4", - "unicode-width", + "bzip2", + "compression-core", + "flate2", + "liblzma", + "memchr", + "zstd", + "zstd-safe", ] +[[package]] +name = "compression-core" +version = "0.4.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75984efb6ed102a0d42db99afb6c1948f0380d1d91808d5529916e6c08b49d8d" + [[package]] name = "const-random" version = "0.1.18" @@ -628,22 +719,31 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ - "getrandom", + "getrandom 0.2.17", "once_cell", "tiny-keccak", ] +[[package]] +name = "const_panic" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e262cdaac42494e3ae34c43969f9cdeb7da178bdb4b66fa6a1ea2edb4c8ae652" +dependencies = [ + "typewit", +] + [[package]] name = "constant_time_eq" -version = "0.3.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7144d30dcf0fafbce74250a3963025d8d52177934239851c917d29f1df280c2" +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" [[package]] name = "core-foundation" -version = "0.9.4" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91e195e091a93c46f7102ec7818a2aa394e1e1771c3ab4825963fa03e45afb8f" +checksum = "b2a6cd9ae233e7f62ba4e9353e81a88df7fc8a5987b8d445b4d90c879bd156f6" dependencies = [ "core-foundation-sys", "libc", @@ -651,88 +751,148 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.6" +version = "0.8.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ea2b9bc92be3c2baa9334a323ebca2d6f074ff852cd1d7b11064035cd3868f" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] -name = "core2" -version = "0.4.0" +name = "core_extensions" +version = "1.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b49ba7ef1ad6107f8824dbe97de947cbaac53c44e7f9756a1fba0d37c1eec505" +checksum = "42bb5e5d0269fd4f739ea6cedaf29c16d81c27a7ce7582008e90eb50dcd57003" dependencies = [ - "memchr", + "core_extensions_proc_macros", ] +[[package]] +name = "core_extensions_proc_macros" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "533d38ecd2709b7608fb8e18e4504deb99e9a72879e6aa66373a76d8dc4259ea" + [[package]] name = "cpufeatures" -version = "0.2.12" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53fe5e26ff1b7aef8bca9c6080520cfb8d9333c7568e1829cef191a9723e5504" +checksum = "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" dependencies = [ "libc", ] [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + [[package]] name = "crunchy" -version = "0.2.2" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" [[package]] name = "crypto-common" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" +checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" dependencies = [ "generic-array", "typenum", ] +[[package]] +name = "cstr" +version = "0.2.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68523903c8ae5aacfa32a0d9ae60cadeb764e1da14ee0d26b1f3089f13a54636" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "csv" -version = "1.3.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" +checksum = "52cd9d68cf7efc6ddfaaee42e7288d3a99d613d4b50f76ce9827ae0c6e14f938" dependencies = [ "csv-core", "itoa", "ryu", - "serde", + "serde_core", ] [[package]] name = "csv-core" -version = "0.1.11" +version = "0.1.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5efa2b3d7902f4b634a20cae3c9c4e6209dc4779feb6863329607560143efa70" +checksum = "704a3c26996a80471189265814dbc2c257598b96b8a7feae2d31ace646bb9782" dependencies = [ "memchr", ] [[package]] -name = "dary_heap" -version = "0.3.6" +name = "darling" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "25ae13da2f202d56bd7f91c25fba009e7717a1e4a1cc98a76d844b65ae912e9d" +dependencies = [ + "darling_core", + "darling_macro", +] + +[[package]] +name = "darling_core" +version = "0.23.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9865a50f7c335f53564bb694ef660825eb8610e0a53d3e11bf1b0d3df31e03b0" +dependencies = [ + "ident_case", + "proc-macro2", + "quote", + "strsim", + "syn 2.0.117", +] + +[[package]] +name = "darling_macro" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7762d17f1241643615821a8455a0b2c3e803784b058693d990b11f2dce25a0ca" +checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" +dependencies = [ + "darling_core", + "quote", + "syn 2.0.117", +] [[package]] name = "dashmap" -version = "5.5.3" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" dependencies = [ "cfg-if", - "hashbrown", + "crossbeam-utils", + "hashbrown 0.14.5", "lock_api", "once_cell", "parking_lot_core", @@ -740,106 +900,309 @@ dependencies = [ [[package]] name = "datafusion" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f92d2d7a9cba4580900b32b009848d9eb35f1028ac84cdd6ddcf97612cd0068" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ - "ahash", - "apache-avro", "arrow", - "arrow-array", - "arrow-ipc", "arrow-schema", - "async-compression", "async-trait", "bytes", "bzip2", "chrono", - "dashmap", + "datafusion-catalog", + "datafusion-catalog-listing", "datafusion-common", "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-datasource-arrow", + "datafusion-datasource-avro", + "datafusion-datasource-csv", + "datafusion-datasource-json", + "datafusion-datasource-parquet", "datafusion-execution", "datafusion-expr", + "datafusion-expr-common", "datafusion-functions", "datafusion-functions-aggregate", - "datafusion-functions-array", + "datafusion-functions-nested", + "datafusion-functions-table", + "datafusion-functions-window", "datafusion-optimizer", "datafusion-physical-expr", + "datafusion-physical-expr-adapter", "datafusion-physical-expr-common", + "datafusion-physical-optimizer", "datafusion-physical-plan", + "datafusion-session", "datafusion-sql", "flate2", "futures", - "glob", - "half", - "hashbrown", - "indexmap", - "itertools 0.12.1", + "itertools", + "liblzma", "log", - "num-traits", - "num_cpus", "object_store", "parking_lot", "parquet", - "paste", - "pin-project-lite", "rand", + "regex", "sqlparser", "tempfile", "tokio", - "tokio-util", "url", "uuid", - "xz2", - "zstd 0.13.0", + "zstd", +] + +[[package]] +name = "datafusion-catalog" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "dashmap", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "itertools", + "log", + "object_store", + "parking_lot", + "tokio", +] + +[[package]] +name = "datafusion-catalog-listing" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-expr-adapter", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "futures", + "itertools", + "log", + "object_store", ] [[package]] name = "datafusion-common" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "effed030d2c1667eb1e11df5372d4981eaf5d11a521be32220b3985ae5ba6971" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "ahash", "apache-avro", "arrow", - "arrow-array", - "arrow-buffer", - "arrow-schema", + "arrow-ipc", "chrono", "half", - "hashbrown", - "instant", + "hashbrown 0.16.1", + "indexmap", + "itertools", "libc", - "num_cpus", + "log", "object_store", "parquet", - "pyo3", + "paste", + "recursive", "sqlparser", + "tokio", + "web-time", ] [[package]] name = "datafusion-common-runtime" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0091318129dad1359f08e4c6c71f855163c35bba05d1dbf983196f727857894" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "futures", + "log", + "tokio", +] + +[[package]] +name = "datafusion-datasource" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-compression", + "async-trait", + "bytes", + "bzip2", + "chrono", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-expr-adapter", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "flate2", + "futures", + "glob", + "itertools", + "liblzma", + "log", + "object_store", + "rand", + "tokio", + "tokio-util", + "url", + "zstd", +] + +[[package]] +name = "datafusion-datasource-arrow" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ + "arrow", + "arrow-ipc", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "itertools", + "object_store", "tokio", ] +[[package]] +name = "datafusion-datasource-avro" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "apache-avro", + "arrow", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-datasource", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "num-traits", + "object_store", +] + +[[package]] +name = "datafusion-datasource-csv" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "object_store", + "regex", + "tokio", +] + +[[package]] +name = "datafusion-datasource-json" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "object_store", + "serde_json", + "tokio", + "tokio-stream", +] + +[[package]] +name = "datafusion-datasource-parquet" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-aggregate-common", + "datafusion-physical-expr", + "datafusion-physical-expr-adapter", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-pruning", + "datafusion-session", + "futures", + "itertools", + "log", + "object_store", + "parking_lot", + "parquet", + "tokio", +] + +[[package]] +name = "datafusion-doc" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" + [[package]] name = "datafusion-execution" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8385aba84fc4a06d3ebccfbcbf9b4f985e80c762fac634b49079f7cc14933fb1" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "arrow", + "arrow-buffer", + "async-trait", "chrono", "dashmap", "datafusion-common", "datafusion-expr", + "datafusion-physical-expr-common", "futures", - "hashbrown", "log", "object_store", "parking_lot", @@ -850,43 +1213,91 @@ dependencies = [ [[package]] name = "datafusion-expr" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebb192f0055d2ce64e38ac100abc18e4e6ae9734d3c28eee522bbbd6a32108a3" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ - "ahash", "arrow", - "arrow-array", - "arrow-buffer", + "async-trait", "chrono", "datafusion-common", + "datafusion-doc", + "datafusion-expr-common", + "datafusion-functions-aggregate-common", + "datafusion-functions-window-common", + "datafusion-physical-expr-common", + "indexmap", + "itertools", "paste", + "recursive", "serde_json", "sqlparser", - "strum 0.26.2", - "strum_macros 0.26.4", +] + +[[package]] +name = "datafusion-expr-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "indexmap", + "itertools", + "paste", +] + +[[package]] +name = "datafusion-ffi" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "abi_stable", + "arrow", + "arrow-schema", + "async-ffi", + "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-aggregate-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-proto", + "datafusion-proto-common", + "datafusion-session", + "futures", + "log", + "prost", + "semver", + "tokio", ] [[package]] name = "datafusion-functions" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "27c081ae5b7edd712b92767fb8ed5c0e32755682f8075707666cd70835807c0b" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "arrow", - "base64 0.22.1", + "arrow-buffer", + "base64", "blake2", "blake3", "chrono", + "chrono-tz", "datafusion-common", + "datafusion-doc", "datafusion-execution", "datafusion-expr", - "datafusion-physical-expr", - "hashbrown", + "datafusion-expr-common", + "datafusion-macros", "hex", - "itertools 0.12.1", + "itertools", "log", "md-5", + "memchr", + "num-traits", "rand", "regex", "sha2", @@ -896,200 +1307,363 @@ dependencies = [ [[package]] name = "datafusion-functions-aggregate" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feb28a4ea52c28a26990646986a27c4052829a2a2572386258679e19263f8b78" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "ahash", "arrow", - "arrow-schema", "datafusion-common", + "datafusion-doc", "datafusion-execution", "datafusion-expr", + "datafusion-functions-aggregate-common", + "datafusion-macros", + "datafusion-physical-expr", "datafusion-physical-expr-common", + "half", "log", + "num-traits", "paste", - "sqlparser", ] [[package]] -name = "datafusion-functions-array" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89b17c02a74cdc87380a56758ec27e7d417356bf806f33062700908929aedb8a" +name = "datafusion-functions-aggregate-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "ahash", + "arrow", + "datafusion-common", + "datafusion-expr-common", + "datafusion-physical-expr-common", +] + +[[package]] +name = "datafusion-functions-nested" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "arrow", - "arrow-array", - "arrow-buffer", "arrow-ord", - "arrow-schema", "datafusion-common", + "datafusion-doc", "datafusion-execution", "datafusion-expr", + "datafusion-expr-common", "datafusion-functions", - "itertools 0.12.1", + "datafusion-functions-aggregate", + "datafusion-functions-aggregate-common", + "datafusion-macros", + "datafusion-physical-expr-common", + "hashbrown 0.16.1", + "itertools", + "itoa", "log", "paste", ] [[package]] -name = "datafusion-optimizer" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12172f2a6c9eb4992a51e62d709eeba5dedaa3b5369cce37ff6c2260e100ba76" +name = "datafusion-functions-table" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "arrow", "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-expr", + "datafusion-physical-plan", + "parking_lot", + "paste", +] + +[[package]] +name = "datafusion-functions-window" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-doc", + "datafusion-expr", + "datafusion-functions-window-common", + "datafusion-macros", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "log", + "paste", +] + +[[package]] +name = "datafusion-functions-window-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "datafusion-common", + "datafusion-physical-expr-common", +] + +[[package]] +name = "datafusion-macros" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "datafusion-doc", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "datafusion-optimizer" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", "chrono", "datafusion-common", "datafusion-expr", + "datafusion-expr-common", "datafusion-physical-expr", - "hashbrown", "indexmap", - "itertools 0.12.1", + "itertools", "log", + "recursive", + "regex", "regex-syntax", ] [[package]] name = "datafusion-physical-expr" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a3fce531b623e94180f6cd33d620ef01530405751b6ddd2fd96250cdbd78e2e" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "ahash", "arrow", - "arrow-array", - "arrow-buffer", - "arrow-ord", - "arrow-schema", - "arrow-string", - "base64 0.22.1", - "chrono", "datafusion-common", - "datafusion-execution", "datafusion-expr", - "datafusion-functions-aggregate", + "datafusion-expr-common", + "datafusion-functions-aggregate-common", "datafusion-physical-expr-common", "half", - "hashbrown", - "hex", + "hashbrown 0.16.1", "indexmap", - "itertools 0.12.1", - "log", + "itertools", + "parking_lot", "paste", "petgraph", - "regex", + "recursive", + "tokio", +] + +[[package]] +name = "datafusion-physical-expr-adapter" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-expr", + "datafusion-functions", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "itertools", ] [[package]] name = "datafusion-physical-expr-common" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "046400b6a2cc3ed57a7c576f5ae6aecc77804ac8e0186926b278b189305b2a77" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "ahash", + "arrow", + "chrono", + "datafusion-common", + "datafusion-expr-common", + "hashbrown 0.16.1", + "indexmap", + "itertools", + "parking_lot", +] + +[[package]] +name = "datafusion-physical-optimizer" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "arrow", "datafusion-common", + "datafusion-execution", "datafusion-expr", - "rand", + "datafusion-expr-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-pruning", + "itertools", + "recursive", ] [[package]] name = "datafusion-physical-plan" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4aed47f5a2ad8766260befb375b201592e86a08b260256e168ae4311426a2bff" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "ahash", "arrow", - "arrow-array", - "arrow-buffer", "arrow-ord", "arrow-schema", "async-trait", - "chrono", "datafusion-common", "datafusion-common-runtime", "datafusion-execution", "datafusion-expr", - "datafusion-functions-aggregate", + "datafusion-functions", + "datafusion-functions-aggregate-common", + "datafusion-functions-window-common", "datafusion-physical-expr", "datafusion-physical-expr-common", "futures", "half", - "hashbrown", + "hashbrown 0.16.1", "indexmap", - "itertools 0.12.1", + "itertools", "log", - "once_cell", + "num-traits", "parking_lot", "pin-project-lite", - "rand", "tokio", ] +[[package]] +name = "datafusion-proto" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "chrono", + "datafusion-catalog", + "datafusion-catalog-listing", + "datafusion-common", + "datafusion-datasource", + "datafusion-datasource-arrow", + "datafusion-datasource-csv", + "datafusion-datasource-json", + "datafusion-datasource-parquet", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-table", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-proto-common", + "object_store", + "prost", + "rand", +] + +[[package]] +name = "datafusion-proto-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "prost", +] + +[[package]] +name = "datafusion-pruning" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-datasource", + "datafusion-expr-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "itertools", + "log", +] + [[package]] name = "datafusion-python" -version = "39.0.0" +version = "52.0.0" dependencies = [ "arrow", + "arrow-select", "async-trait", + "cstr", "datafusion", - "datafusion-common", - "datafusion-expr", - "datafusion-functions-array", - "datafusion-optimizer", - "datafusion-sql", + "datafusion-ffi", + "datafusion-proto", "datafusion-substrait", "futures", + "log", "mimalloc", "object_store", "parking_lot", "prost", "prost-types", "pyo3", + "pyo3-async-runtimes", "pyo3-build-config", - "rand", - "regex-syntax", - "sqlparser", - "syn 2.0.68", + "pyo3-log", + "serde_json", "tokio", "url", "uuid", ] +[[package]] +name = "datafusion-session" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "async-trait", + "datafusion-common", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-plan", + "parking_lot", +] + [[package]] name = "datafusion-sql" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fa92bb1fd15e46ce5fb6f1c85f3ac054592560f294429a28e392b5f9cd4255e" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ "arrow", - "arrow-array", - "arrow-schema", + "bigdecimal", + "chrono", "datafusion-common", "datafusion-expr", + "datafusion-functions-nested", + "indexmap", "log", + "recursive", "regex", "sqlparser", - "strum 0.26.2", ] [[package]] name = "datafusion-substrait" -version = "39.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8249d69665c1cd32e07789ed6dd1da6528a23019ef16d3483db52952b6f9f68a" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" dependencies = [ - "arrow-buffer", "async-recursion", + "async-trait", "chrono", "datafusion", - "itertools 0.12.1", + "half", + "itertools", "object_store", "pbjson-types", "prost", "substrait", + "tokio", + "url", ] [[package]] @@ -1104,69 +1678,81 @@ dependencies = [ ] [[package]] -name = "doc-comment" -version = "0.3.3" +name = "displaydoc" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] [[package]] name = "dyn-clone" -version = "1.0.17" +version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d6ef0072f8a535281e4876be788938b528e9a1d43900b82c2569af7da799125" +checksum = "d0881ea181b1df73ff77ffaaf9c7544ecc11e82fba9b5f27b262a3c73a332555" [[package]] name = "either" -version = "1.12.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dca9240753cf90908d7e4aac30f630662b02aebaa1b58a3cadabdb23385b58b" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] name = "equivalent" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443807d6dff69373d433ab9ef5378ad8df50ca6298caf15de6e52e24aaf54d5" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.9" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "534c5cf6194dfab3db3242765c03bbe257cf92f22b38f6bc0c58d59108a820ba" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "fastrand" -version = "2.1.0" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc0510504f03c51ada170672ac806f1f105a88aa97a5281117e1ddc3368e51a" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" [[package]] name = "fixedbitset" -version = "0.4.2" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flatbuffers" -version = "24.3.25" +version = "25.12.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8add37afff2d4ffa83bc748a70b4b1370984f6980768554182424ef71447c35f" +checksum = "35f6839d7b3b98adde531effaf34f0c2badc6f4735d26fe74709d8e513a96ef3" dependencies = [ - "bitflags 1.3.2", + "bitflags", "rustc_version", ] [[package]] name = "flate2" -version = "1.0.30" +version = "1.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f54427cfd1c7829e2a139fcefea601bf088ebca651d2bf53ebc600eac295dae" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" dependencies = [ "crc32fast", "miniz_oxide", + "zlib-rs", ] [[package]] @@ -1175,20 +1761,32 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] [[package]] name = "futures" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645c6916888f6cb6350d2550b80fb63e734897a8498abe35cfb732b6487804b0" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -1201,9 +1799,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eac8f7d7865dcb88bd4373ab671c8cf4508703796caa2b1985a9ca867b3fcb78" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -1211,15 +1809,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a576fc72ae164fca6b9db127eaa9a9dda0d61316034f33a0a0d4eda41f02b01d" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", @@ -1228,38 +1826,38 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a44623e20b9681a318efdd71c299b6b222ed6f231972bfe2f224ebad6311f0c1" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-macro" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "futures-sink" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fb8e00e87438d937621c1c6269e53f536c14d3fbd6a042bb24879e57d474fb5" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-util" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6401deb83407ab3da39eba7e33987a73c3df0c82b4bb5813ee871c19c41d48" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -1269,10 +1867,18 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] +[[package]] +name = "generational-arena" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877e94aff08e743b651baaea359664321055749b398adff8740a7399af7796e7" +dependencies = [ + "cfg-if", +] + [[package]] name = "generic-array" version = "0.14.7" @@ -1285,32 +1891,55 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", + "js-sys", "libc", "wasi", + "wasm-bindgen", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "js-sys", + "libc", + "r-efi", + "wasip2", + "wasm-bindgen", ] [[package]] -name = "gimli" -version = "0.29.0" +name = "getrandom" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40ecd4077b5ae9fd2e9e169b102c6c330d0605168eb0e8bf79952b256dbefffd" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", + "wasip3", +] [[package]] name = "glob" -version = "0.3.1" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "h2" -version = "0.4.5" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa82e28a107a8cc405f0839610bdc9b15f1e25ec7d696aa5cf173edbcb1486ab" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -1327,13 +1956,14 @@ dependencies = [ [[package]] name = "half" -version = "2.4.1" +version = "2.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dd08c532ae367adf81c312a4580bc67f1d0fe8bc9c460520283f4c0ff277888" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" dependencies = [ "cfg-if", "crunchy", "num-traits", + "zerocopy", ] [[package]] @@ -1341,28 +1971,32 @@ name = "hashbrown" version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ - "ahash", - "allocator-api2", + "foldhash 0.1.5", ] [[package]] -name = "heck" -version = "0.4.1" +name = "hashbrown" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash 0.2.0", +] [[package]] name = "heck" version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - -[[package]] -name = "hermit-abi" -version = "0.3.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" [[package]] name = "hex" @@ -1372,20 +2006,19 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "1.1.0" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] [[package]] name = "http-body" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cac85db508abc24a2e48553ba12a996e87244a0395ce011e62b37158745d643" +checksum = "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" dependencies = [ "bytes", "http", @@ -1393,9 +2026,9 @@ dependencies = [ [[package]] name = "http-body-util" -version = "0.1.1" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0475f8b2ac86659c21b64320d5d653f9efe42acd2a4e560073ec61a155a34f1d" +checksum = "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" dependencies = [ "bytes", "futures-core", @@ -1406,31 +2039,33 @@ dependencies = [ [[package]] name = "httparse" -version = "1.8.0" +version = "1.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" +checksum = "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" [[package]] name = "humantime" -version = "2.1.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" [[package]] name = "hyper" -version = "1.3.1" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe575dd17d0862a9a33781c8c4696a55c320909004a67a00fb286ba8b1bc496d" +checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", + "futures-core", "h2", "http", "http-body", "httparse", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", @@ -1438,15 +2073,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.26.0" +version = "0.27.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a0bea761b46ae2b24eb4aef630d8d1c398157b6fc29e6350ecf090a0b70c952c" +checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ - "futures-util", "http", "hyper", "hyper-util", "rustls", + "rustls-native-certs", "rustls-pki-types", "tokio", "tokio-rustls", @@ -1455,34 +2090,38 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.5" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b875924a60b96e5d7b9ae7b066540b1dd1cbd90d1828f54c92e02a283351c56" +checksum = "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" dependencies = [ + "base64", "bytes", "futures-channel", "futures-util", "http", "http-body", "hyper", + "ipnet", + "libc", + "percent-encoding", "pin-project-lite", "socket2", "tokio", - "tower", "tower-service", "tracing", ] [[package]] name = "iana-time-zone" -version = "0.1.60" +version = "0.1.65" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7ffbb5a1b541ea2561f8c41c087286cc091e21e556a4f09a8f6cbf17b69b141" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" dependencies = [ "android_system_properties", "core-foundation-sys", "iana-time-zone-haiku", "js-sys", + "log", "wasm-bindgen", "windows-core", ] @@ -1497,41 +2136,129 @@ dependencies = [ ] [[package]] -name = "idna" -version = "0.5.0" +name = "icu_collections" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "634d9b1461af396cad843f47fdba5597a4f9e6ddd4bfb6ff5d85028c25cb12f6" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" dependencies = [ - "unicode-bidi", - "unicode-normalization", + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", ] [[package]] -name = "indexmap" -version = "2.2.6" +name = "icu_locale_core" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" dependencies = [ - "equivalent", - "hashbrown", + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", ] [[package]] -name = "indoc" -version = "2.0.5" +name = "icu_normalizer_data" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b248f5224d1d606005e02c97f5aa4e88eeb230488bcc03bc9ca4d7991399f2b5" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" [[package]] -name = "instant" -version = "0.1.13" +name = "icu_properties" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0242819d153cba4b4b05a5a8f2a7e9bbf97b6055b2a002b395c96b5ff3c0222" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" dependencies = [ - "cfg-if", - "js-sys", - "wasm-bindgen", - "web-sys", + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "ident_case" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e0384b61958566e926dc50660321d12159025e767c18e043daf26b70104c39" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", ] [[package]] @@ -1542,63 +2269,66 @@ checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "ipnet" -version = "2.9.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] -name = "itertools" -version = "0.11.0" +name = "iri-string" +version = "0.7.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1c173a5686ce8bfa551b3563d0c2170bf24ca44da99c7ca4bfdab5418c3fe57" +checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" dependencies = [ - "either", + "memchr", + "serde", ] [[package]] name = "itertools" -version = "0.12.1" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.11" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jobserver" -version = "0.1.31" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2b099aaa34a9751c5bf0878add70444e1ed2dd73f347be99003d4577277de6e" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ + "getrandom 0.3.4", "libc", ] [[package]] name = "js-sys" -version = "0.3.69" +version = "0.3.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" dependencies = [ + "once_cell", "wasm-bindgen", ] [[package]] -name = "lazy_static" -version = "1.4.0" +name = "leb128fmt" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "lexical-core" -version = "0.8.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cde5de06e8d4c2faabc400238f9ae1c74d5412d03a7bd067645ccbc47070e46" +checksum = "7d8d125a277f807e55a77304455eb7b1cb52f2b18c143b60e766c120bd64a594" dependencies = [ "lexical-parse-float", "lexical-parse-integer", @@ -1609,96 +2339,101 @@ dependencies = [ [[package]] name = "lexical-parse-float" -version = "0.8.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "683b3a5ebd0130b8fb52ba0bdc718cc56815b6a097e28ae5a6997d0ad17dc05f" +checksum = "52a9f232fbd6f550bc0137dcb5f99ab674071ac2d690ac69704593cb4abbea56" dependencies = [ "lexical-parse-integer", "lexical-util", - "static_assertions", ] [[package]] name = "lexical-parse-integer" -version = "0.8.6" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d0994485ed0c312f6d965766754ea177d07f9c00c9b82a5ee62ed5b47945ee9" +checksum = "9a7a039f8fb9c19c996cd7b2fcce303c1b2874fe1aca544edc85c4a5f8489b34" dependencies = [ "lexical-util", - "static_assertions", ] [[package]] name = "lexical-util" -version = "0.8.5" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5255b9ff16ff898710eb9eb63cb39248ea8a5bb036bea8085b1a767ff6c4e3fc" -dependencies = [ - "static_assertions", -] +checksum = "2604dd126bb14f13fb5d1bd6a66155079cb9fa655b37f875b3a742c705dbed17" [[package]] name = "lexical-write-float" -version = "0.8.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "accabaa1c4581f05a3923d1b4cfd124c329352288b7b9da09e766b0668116862" +checksum = "50c438c87c013188d415fbabbb1dceb44249ab81664efbd31b14ae55dabb6361" dependencies = [ "lexical-util", "lexical-write-integer", - "static_assertions", ] [[package]] name = "lexical-write-integer" -version = "0.8.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1b6f3d1f4422866b68192d62f77bc5c700bee84f3069f2469d7bc8c77852446" +checksum = "409851a618475d2d5796377cad353802345cba92c867d9fbcde9cf4eac4e14df" dependencies = [ "lexical-util", - "static_assertions", ] +[[package]] +name = "libbz2-rs-sys" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c4a545a15244c7d945065b5d392b2d2d7f21526fba56ce51467b06ed445e8f7" + [[package]] name = "libc" -version = "0.2.155" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97b3888a4aecf77e811145cadf6eef5901f4782c53886191b2f693f24761847c" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] -name = "libflate" -version = "2.1.0" +name = "libloading" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45d9dfdc14ea4ef0900c1cddbc8dcd553fbaacd8a4a282cf4018ae9dd04fb21e" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" dependencies = [ - "adler32", - "core2", - "crc32fast", - "dary_heap", - "libflate_lz77", + "cfg-if", + "winapi", +] + +[[package]] +name = "liblzma" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6033b77c21d1f56deeae8014eb9fbe7bdf1765185a6c508b5ca82eeaed7f899" +dependencies = [ + "liblzma-sys", ] [[package]] -name = "libflate_lz77" -version = "2.1.0" +name = "liblzma-sys" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6e0d73b369f386f1c44abd9c570d5318f55ccde816ff4b562fa452e5182863d" +checksum = "9f2db66f3268487b5033077f266da6777d057949b8f93c8ad82e441df25e6186" dependencies = [ - "core2", - "hashbrown", - "rle-decode-fast", + "cc", + "libc", + "pkg-config", ] [[package]] name = "libm" -version = "0.2.8" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec2a862134d2a7d32d7983ddcdd1c4923530833c9f2ea1a44fc5fa473989058" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" [[package]] name = "libmimalloc-sys" -version = "0.1.39" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23aa6811d3bd4deb8a84dde645f943476d13b248d818edcf8ce0b2f37f036b44" +checksum = "667f4fec20f29dfc6bc7357c582d91796c169ad7e2fce709468aefeb2c099870" dependencies = [ "cc", "libc", @@ -1706,44 +2441,44 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.4.14" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78b3ae25bc7c8c38cec158d1f2757ee79e9b3740fbc7ccf0e59e4b08d793fa89" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" dependencies = [ - "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.21" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] -name = "lz4_flex" -version = "0.11.3" +name = "lru-slab" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75761162ae2b0e580d7e7c390558127e5f01b4194debd6221fd8c207fc80e3f5" -dependencies = [ - "twox-hash", -] +checksum = "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" [[package]] -name = "lzma-sys" -version = "0.1.20" +name = "lz4_flex" +version = "0.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fda04ab3764e6cde78b9974eec4f779acaba7c4e84b36eca3cf77c581b85d27" +checksum = "ab6473172471198271ff72e9379150e9dfd70d8e533e0752a27e515b48dd375e" dependencies = [ - "cc", - "libc", - "pkg-config", + "twox-hash", ] [[package]] @@ -1758,82 +2493,55 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" - -[[package]] -name = "memoffset" -version = "0.9.1" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" -dependencies = [ - "autocfg", -] +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "mimalloc" -version = "0.1.43" +version = "0.1.48" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68914350ae34959d83f732418d51e2427a794055d0b9529f48259ac07af65633" +checksum = "e1ee66a4b64c74f4ef288bcbb9192ad9c3feaad75193129ac8509af543894fd8" dependencies = [ "libmimalloc-sys", ] -[[package]] -name = "mime" -version = "0.3.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" - [[package]] name = "miniz_oxide" -version = "0.7.3" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87dfd01fe195c66b572b37921ad8803d010623c0aca821bea2302239d155cdae" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ - "adler", + "adler2", + "simd-adler32", ] [[package]] name = "mio" -version = "0.8.11" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", - "windows-sys 0.48.0", + "windows-sys 0.61.2", ] [[package]] name = "multimap" -version = "0.10.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defc4c55412d89136f966bbb339008b474350e5e6e78d2714439c386b3137a03" - -[[package]] -name = "num" -version = "0.4.3" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35bd024e8b2ff75562e5f34e7f4905839deb4b22955ef5e73d2fea1b9813cb23" -dependencies = [ - "num-bigint", - "num-complex", - "num-integer", - "num-iter", - "num-rational", - "num-traits", -] +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" [[package]] name = "num-bigint" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c165a9ab64cf766f73521c0dd2cfdff64f488b8f0b3e621face3462d3db536d7" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" dependencies = [ "num-integer", "num-traits", + "serde", ] [[package]] @@ -1854,28 +2562,6 @@ dependencies = [ "num-traits", ] -[[package]] -name = "num-iter" -version = "0.1.45" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1429034a0490724d0075ebb2bc9e875d6503c3cf69e235a8941aa757d83ef5bf" -dependencies = [ - "autocfg", - "num-integer", - "num-traits", -] - -[[package]] -name = "num-rational" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" -dependencies = [ - "num-bigint", - "num-integer", - "num-traits", -] - [[package]] name = "num-traits" version = "0.2.19" @@ -1886,39 +2572,33 @@ dependencies = [ "libm", ] -[[package]] -name = "num_cpus" -version = "1.16.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4161fcb6d602d4d2081af7c3a45852d875a03dd337a6bfdd6e06407b61342a43" -dependencies = [ - "hermit-abi", - "libc", -] - [[package]] name = "object" -version = "0.35.0" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8ec7ab813848ba4522158d5517a6093db1ded27575b070f4177b8d12b41db5e" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", ] [[package]] name = "object_store" -version = "0.10.1" +version = "0.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbebfd32c213ba1907fa7a9c9138015a8de2b43e30c5aa45b18f7deb46786ad6" +checksum = "c2858065e55c148d294a9f3aae3b0fa9458edadb41a108397094566f4e3c0dfb" dependencies = [ "async-trait", - "base64 0.22.1", + "base64", "bytes", "chrono", + "form_urlencoded", "futures", + "http", + "http-body-util", + "httparse", "humantime", "hyper", - "itertools 0.12.1", + "itertools", "md-5", "parking_lot", "percent-encoding", @@ -1926,27 +2606,30 @@ dependencies = [ "rand", "reqwest", "ring", - "rustls-pemfile", + "rustls-pki-types", "serde", "serde_json", - "snafu", + "serde_urlencoded", + "thiserror", "tokio", "tracing", "url", "walkdir", + "wasm-bindgen-futures", + "web-time", ] [[package]] name = "once_cell" -version = "1.19.0" +version = "1.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fdb12b2476b595f9358c5161aa467c2438859caa136dec86c26fdd2efe17b92" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" [[package]] name = "openssl-probe" -version = "0.1.5" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" +checksum = "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" [[package]] name = "ordered-float" @@ -1959,9 +2642,9 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" dependencies = [ "lock_api", "parking_lot_core", @@ -1969,60 +2652,51 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" dependencies = [ "cfg-if", "libc", "redox_syscall", "smallvec", - "windows-targets 0.52.5", + "windows-link", ] [[package]] name = "parquet" -version = "52.0.0" +version = "58.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c3b5322cc1bbf67f11c079c42be41a55949099b78732f7dba9e15edde40eab" +checksum = "3f491d0ef1b510194426ee67ddc18a9b747ef3c42050c19322a2cd2e1666c29b" dependencies = [ "ahash", "arrow-array", "arrow-buffer", - "arrow-cast", "arrow-data", "arrow-ipc", "arrow-schema", "arrow-select", - "base64 0.22.1", + "base64", "brotli", "bytes", "chrono", "flate2", "futures", "half", - "hashbrown", + "hashbrown 0.16.1", "lz4_flex", - "num", "num-bigint", + "num-integer", + "num-traits", "object_store", "paste", "seq-macro", + "simdutf8", "snap", "thrift", "tokio", "twox-hash", - "zstd 0.13.0", - "zstd-sys", -] - -[[package]] -name = "parse-zoneinfo" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24" -dependencies = [ - "regex", + "zstd", ] [[package]] @@ -2033,31 +2707,31 @@ checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" [[package]] name = "pbjson" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1030c719b0ec2a2d25a5df729d6cff1acf3cc230bf766f4f97833591f7577b90" +checksum = "898bac3fa00d0ba57a4e8289837e965baa2dee8c3749f3b11d45a64b4223d9c3" dependencies = [ - "base64 0.21.7", + "base64", "serde", ] [[package]] name = "pbjson-build" -version = "0.6.2" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2580e33f2292d34be285c5bc3dba5259542b083cfad6037b6d70345f24dcb735" +checksum = "af22d08a625a2213a78dbb0ffa253318c5c79ce3133d32d296655a7bdfb02095" dependencies = [ - "heck 0.4.1", - "itertools 0.11.0", + "heck", + "itertools", "prost", "prost-types", ] [[package]] name = "pbjson-types" -version = "0.6.0" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18f596653ba4ac51bdecbb4ef6773bc7f56042dc13927910de1684ad3d32aa12" +checksum = "8e748e28374f10a330ee3bb9f29b828c0ac79831a32bab65015ad9b661ead526" dependencies = [ "bytes", "chrono", @@ -2070,83 +2744,45 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "petgraph" -version = "0.6.5" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" dependencies = [ "fixedbitset", + "hashbrown 0.15.5", "indexmap", + "serde", ] [[package]] name = "phf" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ade2d8b8f33c7333b51bcf0428d37e217e9f32192ae4772156f65063b8ce03dc" -dependencies = [ - "phf_shared", -] - -[[package]] -name = "phf_codegen" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8d39688d359e6b34654d328e262234662d16cc0f60ec8dcbe5e718709342a5a" -dependencies = [ - "phf_generator", - "phf_shared", -] - -[[package]] -name = "phf_generator" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48e4cc64c2ad9ebe670cb8fd69dd50ae301650392e81c05f9bfcb2d5bdbc24b0" +checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7" dependencies = [ "phf_shared", - "rand", ] [[package]] name = "phf_shared" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90fcb95eef784c2ac79119d1dd819e162b5da872ce6f3c3abe1e8ca1c082f72b" +checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981" dependencies = [ "siphasher", ] -[[package]] -name = "pin-project" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" -dependencies = [ - "pin-project-internal", -] - -[[package]] -name = "pin-project-internal" -version = "1.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.68", -] - [[package]] name = "pin-project-lite" -version = "0.2.14" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" [[package]] name = "pin-utils" @@ -2156,46 +2792,58 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkg-config" -version = "0.3.30" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" [[package]] name = "portable-atomic" -version = "1.6.0" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" + +[[package]] +name = "potential_utf" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7170ef9988bc169ba16dd36a7fa041e5c4cbeb6a35b76d4c03daded371eae7c0" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] [[package]] name = "ppv-lite86" -version = "0.2.17" +version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] [[package]] name = "prettyplease" -version = "0.2.20" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f12335488a2f3b0a83b14edad48dca9879ce89b2edd10e80237e4e852dd645e" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "proc-macro2" -version = "1.0.85" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22244ce15aa966053a896d1accb3a6e68469b97c7f33f284b99f0d576879fc23" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] [[package]] name = "prost" -version = "0.12.6" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", "prost-derive", @@ -2203,160 +2851,248 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.6" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" dependencies = [ - "bytes", - "heck 0.5.0", - "itertools 0.12.1", + "heck", + "itertools", "log", "multimap", - "once_cell", "petgraph", "prettyplease", "prost", "prost-types", "regex", - "syn 2.0.68", + "syn 2.0.117", "tempfile", ] [[package]] name = "prost-derive" -version = "0.12.6" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools", "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "prost-types" -version = "0.12.6" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" +dependencies = [ + "prost", +] + +[[package]] +name = "protobuf-src" +version = "2.1.1+27.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +checksum = "6217c3504da19b85a3a4b2e9a5183d635822d83507ba0986624b5c05b83bfc40" dependencies = [ - "prost", + "cmake", ] [[package]] -name = "protobuf-src" -version = "2.0.1+26.1" +name = "psm" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8ba1cfa4b9dc098926b8cce388bf434b93516db3ecf6e8b1a37eb643d733ee7" +checksum = "3852766467df634d74f0b2d7819bf8dc483a0eb2e3b0f50f756f9cfe8b0d18d8" dependencies = [ - "cmake", + "ar_archive_writer", + "cc", ] [[package]] name = "pyo3" -version = "0.21.2" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5e00b96a521718e08e03b1a622f01c8a8deb50719335de3f60b3b3950f069d8" +checksum = "cf85e27e86080aafd5a22eae58a162e133a589551542b3e5cee4beb27e54f8e1" dependencies = [ - "cfg-if", - "indoc", "libc", - "memoffset", - "parking_lot", + "once_cell", "portable-atomic", "pyo3-build-config", "pyo3-ffi", "pyo3-macros", - "unindent", ] [[package]] -name = "pyo3-build-config" -version = "0.21.2" +name = "pyo3-async-runtimes" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7883df5835fafdad87c0d888b266c8ec0f4c9ca48a5bed6bbb592e8dedee1b50" +checksum = "9e7364a95bf00e8377bbf9b0f09d7ff9715a29d8fcf93b47d1a967363b973178" dependencies = [ + "futures-channel", + "futures-util", "once_cell", + "pin-project-lite", + "pyo3", + "tokio", +] + +[[package]] +name = "pyo3-build-config" +version = "0.28.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bf94ee265674bf76c09fa430b0e99c26e319c945d96ca0d5a8215f31bf81cf7" +dependencies = [ "target-lexicon", ] [[package]] name = "pyo3-ffi" -version = "0.21.2" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01be5843dc60b916ab4dad1dca6d20b9b4e6ddc8e15f50c47fe6d85f1fb97403" +checksum = "491aa5fc66d8059dd44a75f4580a2962c1862a1c2945359db36f6c2818b748dc" dependencies = [ "libc", "pyo3-build-config", ] +[[package]] +name = "pyo3-log" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26c2ec80932c5c3b2d4fbc578c9b56b2d4502098587edb8bef5b6bfcad43682e" +dependencies = [ + "arc-swap", + "log", + "pyo3", +] + [[package]] name = "pyo3-macros" -version = "0.21.2" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77b34069fc0682e11b31dbd10321cbf94808394c56fd996796ce45217dfac53c" +checksum = "f5d671734e9d7a43449f8480f8b38115df67bef8d21f76837fa75ee7aaa5e52e" dependencies = [ "proc-macro2", "pyo3-macros-backend", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "pyo3-macros-backend" -version = "0.21.2" +version = "0.28.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08260721f32db5e1a5beae69a55553f56b99bd0e1c3e6e0a5e8851a9d0f5a85c" +checksum = "22faaa1ce6c430a1f71658760497291065e6450d7b5dc2bcf254d49f66ee700a" dependencies = [ - "heck 0.4.1", + "heck", "proc-macro2", "pyo3-build-config", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "quad-rand" -version = "0.2.1" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "658fa1faf7a4cc5f057c9ee5ef560f717ad9d8dc66d975267f709624d6e1ab88" +checksum = "5a651516ddc9168ebd67b24afd085a718be02f8858fe406591b013d101ce2f40" [[package]] name = "quick-xml" -version = "0.31.0" +version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1004a344b30a54e2ee58d66a71b32d2db2feb0a31f9a2d302bf0536f15de2a33" +checksum = "b66c2058c55a409d601666cffe35f04333cf1013010882cec174a7467cd4e21c" dependencies = [ "memchr", "serde", ] +[[package]] +name = "quinn" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" +dependencies = [ + "bytes", + "cfg_aliases", + "pin-project-lite", + "quinn-proto", + "quinn-udp", + "rustc-hash", + "rustls", + "socket2", + "thiserror", + "tokio", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-proto" +version = "0.11.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" +dependencies = [ + "bytes", + "getrandom 0.3.4", + "lru-slab", + "rand", + "ring", + "rustc-hash", + "rustls", + "rustls-pki-types", + "slab", + "thiserror", + "tinyvec", + "tracing", + "web-time", +] + +[[package]] +name = "quinn-udp" +version = "0.5.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" +dependencies = [ + "cfg_aliases", + "libc", + "once_cell", + "socket2", + "tracing", + "windows-sys 0.60.2", +] + [[package]] name = "quote" -version = "1.0.36" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa76aaf39101c457836aec0ce2316dbdc3ab723cdda1c6bd4e6ad4208acaca7" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "rand" -version = "0.8.5" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ - "libc", "rand_chacha", "rand_core", ] [[package]] name = "rand_chacha" -version = "0.3.1" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" dependencies = [ "ppv-lite86", "rand_core", @@ -2364,27 +3100,47 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.4" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "recursive" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0786a43debb760f491b1bc0269fe5e84155353c67482b9e60d0cfb596054b43e" +dependencies = [ + "recursive-proc-macro-impl", + "stacker", +] + +[[package]] +name = "recursive-proc-macro-impl" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" +checksum = "76009fbe0614077fc1a2ce255e3a1881a2e3a3527097d5dc6d8212c585e7e38b" dependencies = [ - "getrandom", + "quote", + "syn 2.0.117", ] [[package]] name = "redox_syscall" -version = "0.5.1" +version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "469052894dcb553421e483e4209ee581a45100d31b4018de03e5a7ad86374a7e" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.5.0", + "bitflags", ] [[package]] name = "regex" -version = "1.10.4" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -2394,9 +3150,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.6" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -2405,33 +3161,42 @@ dependencies = [ [[package]] name = "regex-lite" -version = "0.1.5" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b661b2f27137bdbc16f00eda72866a92bb28af1753ffbd56744fb6e2e9cd8e" +checksum = "cab834c73d247e67f4fae452806d17d3c7501756d98c8808d7c9c7aa7d18f973" [[package]] name = "regex-syntax" -version = "0.8.3" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] name = "regress" -version = "0.9.1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eae2a1ebfecc58aff952ef8ccd364329abe627762f5bf09ff42eb9d98522479" +checksum = "2057b2325e68a893284d1538021ab90279adac1139957ca2a74426c6f118fb48" dependencies = [ - "hashbrown", + "hashbrown 0.16.1", "memchr", ] +[[package]] +name = "repr_offset" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb1070755bd29dffc19d0971cab794e607839ba2ef4b69a9e6fbc8733c1b72ea" +dependencies = [ + "tstr", +] + [[package]] name = "reqwest" -version = "0.12.4" +version = "0.12.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "566cafdd92868e0939d3fb961bd0dc25fcfaaed179291093b3d43e6b3150ea10" +checksum = "eddd3ca559203180a307f12d114c268abf583f59b03cb906fd0b3ff8646c1147" dependencies = [ - "base64 0.22.1", + "base64", "bytes", "futures-core", "futures-util", @@ -2442,16 +3207,13 @@ dependencies = [ "hyper", "hyper-rustls", "hyper-util", - "ipnet", "js-sys", "log", - "mime", - "once_cell", "percent-encoding", "pin-project-lite", + "quinn", "rustls", "rustls-native-certs", - "rustls-pemfile", "rustls-pki-types", "serde", "serde_json", @@ -2460,71 +3222,65 @@ dependencies = [ "tokio", "tokio-rustls", "tokio-util", + "tower", + "tower-http", "tower-service", "url", "wasm-bindgen", "wasm-bindgen-futures", "wasm-streams", "web-sys", - "winreg", ] [[package]] name = "ring" -version = "0.17.8" +version = "0.17.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" +checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom", + "getrandom 0.2.17", "libc", - "spin", "untrusted", "windows-sys 0.52.0", ] [[package]] -name = "rle-decode-fast" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3582f63211428f83597b51b2ddb88e2a91a9d52d12831f9d08f5e624e8977422" - -[[package]] -name = "rustc-demangle" -version = "0.1.24" +name = "rustc-hash" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa0f585226d2e68097d4f95d113b15b83a82e819ab25717ec0590d9584ef366" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" dependencies = [ "semver", ] [[package]] name = "rustix" -version = "0.38.34" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70dc5ec042f7a43c4a73241207cecc9873a06d45debb38b329f8541d85c2730f" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" dependencies = [ - "bitflags 2.5.0", + "bitflags", "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "rustls" -version = "0.22.4" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ - "log", + "once_cell", "ring", "rustls-pki-types", "rustls-webpki", @@ -2534,38 +3290,31 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.7.0" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f1fb85efa936c42c6d5fc28d2629bb51e4b2f4b8a5211e297d599cc5a093792" +checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ "openssl-probe", - "rustls-pemfile", "rustls-pki-types", "schannel", "security-framework", ] [[package]] -name = "rustls-pemfile" -version = "2.1.2" +name = "rustls-pki-types" +version = "1.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29993a25686778eb88d4189742cd713c9bce943bc54251a33509dc63cbacf73d" +checksum = "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" dependencies = [ - "base64 0.22.1", - "rustls-pki-types", + "web-time", + "zeroize", ] -[[package]] -name = "rustls-pki-types" -version = "1.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976295e77ce332211c0d24d92c0e83e50f5c5f046d11082cea19f3df13a3562d" - [[package]] name = "rustls-webpki" -version = "0.102.4" +version = "0.103.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff448f7e92e913c4b7d4c6d8e4540a1724b319b4152b8aef6d4cf8339712b33e" +checksum = "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" dependencies = [ "ring", "rustls-pki-types", @@ -2574,15 +3323,15 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.17" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "955d28af4278de8121b7ebeb796b6a45735dc01436d898801014aced2773a3d6" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" -version = "1.0.18" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3cb5ba0dc43242ce17de99c180e96db90b235b8a9fdc9543c96d2209116bd9f" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -2595,18 +3344,18 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.23" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbc91545643bcf3a0bbb6569265615222618bdf33ce4ffbbd13c4bbd4c093534" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "schemars" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09c024468a378b7e36765cd36702b7a90cc3cba11654f6685c8f233408e89e92" +checksum = "3fbf2ae1b8bc8e02df939598064d22402220cd5bbcca1c76f7d6a310974d5615" dependencies = [ "dyn-clone", "schemars_derive", @@ -2616,14 +3365,14 @@ dependencies = [ [[package]] name = "schemars_derive" -version = "0.8.21" +version = "0.8.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1eee588578aff73f856ab961cd2f79e36bc45d7ded33a7562adba4667aecc0e" +checksum = "32e265784ad618884abaea0600a9adf15393368d840e0222d101a072f3f7534d" dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] @@ -2634,11 +3383,11 @@ checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" [[package]] name = "security-framework" -version = "2.11.0" +version = "3.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c627723fd09706bacdb5cf41499e95098555af3c3c29d014dc3c458ef6be11c0" +checksum = "d17b898a6d6948c3a8ee4372c17cb384f90d2e6e912ef00895b14fd7ab54ec38" dependencies = [ - "bitflags 2.5.0", + "bitflags", "core-foundation", "core-foundation-sys", "libc", @@ -2647,9 +3396,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.11.0" +version = "2.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "317936bbbd05227752583946b9e66d7ce3b489f84e11a94a510b4437fef407d7" +checksum = "321c8673b092a9a42605034a9879d73cb79101ed5fd117bc9a597b89b4e9e61a" dependencies = [ "core-foundation-sys", "libc", @@ -2657,37 +3406,58 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.23" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61697e0a1c7e512e84a621326239844a24d8207b4669b41bc18b32ea5cbf988b" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" dependencies = [ "serde", + "serde_core", ] [[package]] name = "seq-macro" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3f0bf26fd526d2a95683cd0f87bf103b8539e2ca1ef48ce002d67aad59aa0b4" +checksum = "1bc711410fbe7399f390ca1c3b60ad0f53f80e95c5eb935e52268a0e2cd49acc" [[package]] name = "serde" -version = "1.0.203" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_bytes" +version = "0.11.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5d440709e79d88e51ac01c4b72fc6cb7314017bb7da9eeff678aa94c10e3ea8" +dependencies = [ + "serde", + "serde_core", +] + +[[package]] +name = "serde_core" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7253ab4de971e72fb7be983802300c30b5a7f0c2e56fab8abfc6a214307c0094" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.203" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "500cbc0ebeb6f46627f50f3f5811ccf6bf00643be300b4c3eabc0ef55dc5b5ba" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] @@ -2698,30 +3468,32 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "serde_json" -version = "1.0.117" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "455182ea6142b14f93f4bc5320a2b31c1f266b66a4a5c858b013302a5d8cbfc3" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", - "ryu", + "memchr", "serde", + "serde_core", + "zmij", ] [[package]] name = "serde_tokenstream" -version = "0.2.0" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a00ffd23fd882d096f09fcaae2a9de8329a328628e86027e049ee051dc1621f" +checksum = "64060d864397305347a78851c51588fd283767e7e7589829e8121d65512340f1" dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] @@ -2751,9 +3523,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -2761,47 +3533,40 @@ dependencies = [ ] [[package]] -name = "siphasher" -version = "0.3.11" +name = "shlex" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] -name = "slab" -version = "0.4.9" +name = "simd-adler32" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" [[package]] -name = "smallvec" -version = "1.13.2" +name = "simdutf8" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" [[package]] -name = "snafu" -version = "0.7.5" +name = "siphasher" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4de37ad025c587a29e8f3f5605c00f70b98715ef90b9061a815b9e59e9042d6" -dependencies = [ - "doc-comment", - "snafu-derive", -] +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" [[package]] -name = "snafu-derive" -version = "0.7.5" +name = "slab" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "990079665f075b699031e9c08fd3ab99be5029b96f3b78dc0709e8f77e4efebf" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "syn 1.0.109", -] +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "snap" @@ -2811,95 +3576,86 @@ checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" [[package]] name = "socket2" -version = "0.5.7" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce305eb0b4296696835b71df73eb912e0f1ffd2556a501fcede6e0c50349191c" +checksum = "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.60.2", ] -[[package]] -name = "spin" -version = "0.9.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6980e8d7511241f8acf4aebddbb1ff938df5eebe98691418c4468d0b72a96a67" - [[package]] name = "sqlparser" -version = "0.47.0" +version = "0.61.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "295e9930cd7a97e58ca2a070541a3ca502b17f5d1fa7157376d0fabd85324f25" +checksum = "dbf5ea8d4d7c808e1af1cbabebca9a2abe603bcefc22294c5b95018d53200cb7" dependencies = [ "log", + "recursive", "sqlparser_derive", ] [[package]] name = "sqlparser_derive" -version = "0.2.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01b2e185515564f15375f593fb966b5718bc624ba77fe49fa4616ad619690554" +checksum = "a6dd45d8fc1c79299bfbb7190e42ccbbdf6a5f52e4a6ad98d92357ea965bd289" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] -name = "static_assertions" -version = "1.1.0" +name = "stable_deref_trait" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" [[package]] -name = "strum" -version = "0.25.0" +name = "stacker" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290d54ea6f91c969195bdbcd7442c8c2a2ba87da8bf60a7ee86a235d4bc1e125" +checksum = "08d74a23609d509411d10e2176dc2a4346e3b4aea2e7b1869f19fdedbc71c013" +dependencies = [ + "cc", + "cfg-if", + "libc", + "psm", + "windows-sys 0.59.0", +] [[package]] -name = "strum" -version = "0.26.2" +name = "strsim" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d8cec3501a5194c432b2b7976db6b7d10ec95c253208b45f83f7136aa985e29" -dependencies = [ - "strum_macros 0.26.4", -] +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] -name = "strum_macros" -version = "0.25.3" +name = "strum" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23dc1fa9ac9c169a78ba62f0b841814b7abae11bdd047b9c58f893439e309ea0" -dependencies = [ - "heck 0.4.1", - "proc-macro2", - "quote", - "rustversion", - "syn 2.0.68", -] +checksum = "af23d6f6c1a224baef9d3f61e287d2761385a5b88fdab4eb4c6f11aeb54c4bcf" [[package]] name = "strum_macros" -version = "0.26.4" +version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c6bee85a5a24955dc440386795aa378cd9cf82acd5f764469152d2270e581be" +checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ - "heck 0.5.0", + "heck", "proc-macro2", "quote", - "rustversion", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "substrait" -version = "0.34.1" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04c77dec9b6c4e48ac828937bbe7cf473b0933168c5d76d51a5816ace7046be9" +checksum = "62fc4b483a129b9772ccb9c3f7945a472112fdd9140da87f8a4e7f1d44e045d0" dependencies = [ - "heck 0.5.0", + "heck", "pbjson", "pbjson-build", "pbjson-types", @@ -2908,21 +3664,22 @@ dependencies = [ "prost-build", "prost-types", "protobuf-src", + "regress", "schemars", "semver", "serde", "serde_json", "serde_yaml", - "syn 2.0.68", + "syn 2.0.117", "typify", "walkdir", ] [[package]] name = "subtle" -version = "2.5.0" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81cdd64d312baedb58e21336b31bc043b77e01cc99033ce76ef539f78e965ebc" +checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "syn" @@ -2937,9 +3694,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.68" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "901fa70d88b9d6c98022e23b4136f9f3e54e4662c3bc1bd1d84a42a9a0f0c1e9" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -2948,46 +3705,61 @@ dependencies = [ [[package]] name = "sync_wrapper" -version = "0.1.2" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" +dependencies = [ + "futures-core", +] + +[[package]] +name = "synstructure" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] [[package]] name = "target-lexicon" -version = "0.12.14" +version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1fc403891a21bcfb7c37834ba66a547a8f402146eba7265b5a6d88059c9ff2f" +checksum = "adb6935a6f5c20170eeceb1a3835a49e12e19d792f6dd344ccc76a985ca5a6ca" [[package]] name = "tempfile" -version = "3.10.1" +version = "3.25.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" dependencies = [ - "cfg-if", "fastrand", + "getrandom 0.4.1", + "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] [[package]] name = "thiserror" -version = "1.0.61" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c546c80d6be4bc6a00c0f01730c08df82eaa7a7a61f11d656526506112cc1709" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.61" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "46c3384250002a6d5af4d114f2845d37b57521033f30d5c3f46c4d70e1197533" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] @@ -3010,11 +3782,21 @@ dependencies = [ "crunchy", ] +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + [[package]] name = "tinyvec" -version = "1.6.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] @@ -3027,48 +3809,57 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.38.0" +version = "1.49.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba4f4a02a7a80d6f274636f0aa95c7e383b912d41fe721a31f29e29698585a4a" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" dependencies = [ - "backtrace", "bytes", "libc", "mio", - "num_cpus", "pin-project-lite", "socket2", "tokio-macros", - "windows-sys 0.48.0", + "windows-sys 0.61.2", ] [[package]] name = "tokio-macros" -version = "2.3.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f5ae998a069d4b5aba8ee9dad856af7d520c3699e6159b185c2acd48155d39a" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "tokio-rustls" -version = "0.25.0" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ "rustls", - "rustls-pki-types", "tokio", ] +[[package]] +name = "tokio-stream" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", +] + [[package]] name = "tokio-util" -version = "0.7.11" +version = "0.7.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9cf6b47b3771c49ac75ad09a6162f53ad4b8088b76ac60e8ec1455b31a189fe1" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" dependencies = [ "bytes", "futures-core", @@ -3079,36 +3870,54 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.13" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +checksum = "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" dependencies = [ "futures-core", "futures-util", - "pin-project", "pin-project-lite", + "sync_wrapper", "tokio", "tower-layer", "tower-service", ] +[[package]] +name = "tower-http" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" +dependencies = [ + "bitflags", + "bytes", + "futures-util", + "http", + "http-body", + "iri-string", + "pin-project-lite", + "tower", + "tower-layer", + "tower-service", +] + [[package]] name = "tower-layer" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c20c8dbed6283a09604c3e69b4b7eeb54e298b8a600d4d5ecb5ad39de609f1d0" +checksum = "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" [[package]] name = "tower-service" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" +checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.40" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" dependencies = [ "pin-project-lite", "tracing-attributes", @@ -3117,20 +3926,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.27" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", ] [[package]] name = "tracing-core" -version = "0.1.32" +version = "0.1.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" dependencies = [ "once_cell", ] @@ -3139,49 +3948,52 @@ dependencies = [ name = "try-lock" version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" +checksum = "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + +[[package]] +name = "tstr" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f8e0294f14baae476d0dd0a2d780b2e24d66e349a9de876f5126777a37bdba7" +dependencies = [ + "tstr_proc_macros", +] + +[[package]] +name = "tstr_proc_macros" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78122066b0cb818b8afd08f7ed22f7fdbc3e90815035726f0840d0d26c0747a" [[package]] name = "twox-hash" -version = "1.6.3" +version = "2.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" -dependencies = [ - "cfg-if", - "static_assertions", -] +checksum = "9ea3136b675547379c4bd395ca6b938e5ad3c3d20fad76e7fe85f9e0d011419c" [[package]] -name = "typed-builder" -version = "0.16.2" +name = "typed-arena" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34085c17941e36627a879208083e25d357243812c30e7d7387c3b954f30ade16" -dependencies = [ - "typed-builder-macro", -] +checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" [[package]] -name = "typed-builder-macro" -version = "0.16.2" +name = "typenum" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f03ca4cb38206e2bef0700092660bb74d696f808514dae47fa1467cbfe26e96e" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.68", -] +checksum = "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" [[package]] -name = "typenum" -version = "1.17.0" +name = "typewit" +version = "1.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" +checksum = "f8c1ae7cc0fdb8b842d65d127cb981574b0d2b249b74d1c7a2986863dc134f71" [[package]] name = "typify" -version = "0.1.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adb6beec125971dda80a086f90b4a70f60f222990ce4d63ad0fc140492f53444" +checksum = "e6d5bcc6f62eb1fa8aa4098f39b29f93dcb914e17158b76c50360911257aa629" dependencies = [ "typify-impl", "typify-macro", @@ -3189,11 +4001,11 @@ dependencies = [ [[package]] name = "typify-impl" -version = "0.1.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93bbb24e990654aff858d80fee8114f4322f7d7a1b1ecb45129e2fcb0d0ad5ae" +checksum = "a1eb359f7ffa4f9ebe947fa11a1b2da054564502968db5f317b7e37693cb2240" dependencies = [ - "heck 0.5.0", + "heck", "log", "proc-macro2", "quote", @@ -3202,16 +4014,16 @@ dependencies = [ "semver", "serde", "serde_json", - "syn 2.0.68", + "syn 2.0.117", "thiserror", "unicode-ident", ] [[package]] name = "typify-macro" -version = "0.1.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f8e6491896e955692d68361c68db2b263e3bec317ec0b684e0e2fa882fb6e31e" +checksum = "911c32f3c8514b048c1b228361bebb5e6d73aeec01696e8cc0e82e2ffef8ab7a" dependencies = [ "proc-macro2", "quote", @@ -3220,48 +4032,33 @@ dependencies = [ "serde", "serde_json", "serde_tokenstream", - "syn 2.0.68", + "syn 2.0.117", "typify-impl", ] -[[package]] -name = "unicode-bidi" -version = "0.3.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08f95100a766bf4f8f28f90d77e0a5461bbdb219042e7679bebe79004fed8d75" - [[package]] name = "unicode-ident" -version = "1.0.12" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" - -[[package]] -name = "unicode-normalization" -version = "0.1.23" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56d1686db2308d901306f92a263857ef59ea39678a5458e7cb17f01415101f5" -dependencies = [ - "tinyvec", -] +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-segmentation" -version = "1.11.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4c87d22b6e3f4a18d4d40ef354e97c90fcb14dd91d7dc0aa9d8a1172ebf7202" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" [[package]] name = "unicode-width" -version = "0.1.13" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0336d538f7abc86d282a4189614dfaa90810dfc2c6f6427eaf88e16311dd225d" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" [[package]] -name = "unindent" -version = "0.2.3" +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7de7d73e1754487cb58364ee906a499937a0dfabd86bcb980fa99ec8c8fa2ce" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "unsafe-libyaml" @@ -3277,30 +4074,39 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.0" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e6302e3bb753d46e83516cae55ae196fc0c309407cf11ab35cc51a4c2a4633" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", "percent-encoding", + "serde", ] +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + [[package]] name = "uuid" -version = "1.9.1" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5de17fd2f7da591098415cff336e12965a28061ddace43b59cb3c430179c9439" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" dependencies = [ - "getrandom", - "serde", + "getrandom 0.4.1", + "js-sys", + "serde_core", + "wasm-bindgen", ] [[package]] name = "version_check" -version = "0.9.4" +version = "0.9.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" [[package]] name = "walkdir" @@ -3323,52 +4129,60 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasm-bindgen" -version = "0.2.92" +name = "wasip2" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ - "cfg-if", - "wasm-bindgen-macro", + "wit-bindgen", ] [[package]] -name = "wasm-bindgen-backend" -version = "0.2.92" +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" dependencies = [ - "bumpalo", - "log", + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +dependencies = [ + "cfg-if", "once_cell", - "proc-macro2", - "quote", - "syn 2.0.68", + "rustversion", + "wasm-bindgen-macro", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.42" +version = "0.4.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" dependencies = [ "cfg-if", + "futures-util", "js-sys", + "once_cell", "wasm-bindgen", "web-sys", ] [[package]] name = "wasm-bindgen-macro" -version = "0.2.92" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3376,28 +4190,53 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.92" +version = "0.2.108" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" dependencies = [ + "bumpalo", "proc-macro2", "quote", - "syn 2.0.68", - "wasm-bindgen-backend", + "syn 2.0.117", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.92" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] [[package]] name = "wasm-streams" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b65dc4c90b63b118468cf747d8bf3566c1913ef60be765b5730ead9e0a3ba129" +checksum = "15053d8d85c7eccdbefef60f06769760a563c7f0a9d6902a13d35c7800b0ad65" dependencies = [ "futures-util", "js-sys", @@ -3406,41 +4245,126 @@ dependencies = [ "web-sys", ] +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + [[package]] name = "web-sys" -version = "0.3.69" +version = "0.3.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" dependencies = [ "js-sys", "wasm-bindgen", ] +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + [[package]] name = "winapi-util" -version = "0.1.8" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d4cc384e1e73b93bafa6fb4f1df8c41695c8a91cf9c4c64358067d15a7b6c6b" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.52.0", + "windows-sys 0.61.2", ] +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + [[package]] name = "windows-core" -version = "0.52.0" +version = "0.62.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" dependencies = [ - "windows-targets 0.52.5", + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", ] [[package]] -name = "windows-sys" -version = "0.48.0" +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" dependencies = [ - "windows-targets 0.48.5", + "windows-link", ] [[package]] @@ -3449,217 +4373,397 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.5", + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.59.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +dependencies = [ + "windows-targets 0.52.6", +] + +[[package]] +name = "windows-sys" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +dependencies = [ + "windows-targets 0.53.5", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", ] [[package]] name = "windows-targets" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" dependencies = [ - "windows_aarch64_gnullvm 0.48.5", - "windows_aarch64_msvc 0.48.5", - "windows_i686_gnu 0.48.5", - "windows_i686_msvc 0.48.5", - "windows_x86_64_gnu 0.48.5", - "windows_x86_64_gnullvm 0.48.5", - "windows_x86_64_msvc 0.48.5", + "windows_aarch64_gnullvm 0.52.6", + "windows_aarch64_msvc 0.52.6", + "windows_i686_gnu 0.52.6", + "windows_i686_gnullvm 0.52.6", + "windows_i686_msvc 0.52.6", + "windows_x86_64_gnu 0.52.6", + "windows_x86_64_gnullvm 0.52.6", + "windows_x86_64_msvc 0.52.6", ] [[package]] name = "windows-targets" -version = "0.52.5" +version = "0.53.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f0713a46559409d202e70e28227288446bf7841d3211583a4b53e3f6d96e7eb" +checksum = "4945f9f551b88e0d65f3db0bc25c33b8acea4d9e41163edf90dcd0b19f9069f3" dependencies = [ - "windows_aarch64_gnullvm 0.52.5", - "windows_aarch64_msvc 0.52.5", - "windows_i686_gnu 0.52.5", - "windows_i686_gnullvm", - "windows_i686_msvc 0.52.5", - "windows_x86_64_gnu 0.52.5", - "windows_x86_64_gnullvm 0.52.5", - "windows_x86_64_msvc 0.52.5", + "windows-link", + "windows_aarch64_gnullvm 0.53.1", + "windows_aarch64_msvc 0.53.1", + "windows_i686_gnu 0.53.1", + "windows_i686_gnullvm 0.53.1", + "windows_i686_msvc 0.53.1", + "windows_x86_64_gnu 0.53.1", + "windows_x86_64_gnullvm 0.53.1", + "windows_x86_64_msvc 0.53.1", ] [[package]] name = "windows_aarch64_gnullvm" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" +checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7088eed71e8b8dda258ecc8bac5fb1153c5cffaf2578fc8ff5d61e23578d3263" +checksum = "a9d8416fa8b42f5c947f8482c43e7d89e73a173cead56d044f6a56104a6d1b53" [[package]] name = "windows_aarch64_msvc" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" +checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" [[package]] name = "windows_aarch64_msvc" -version = "0.52.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9985fd1504e250c615ca5f281c3f7a6da76213ebd5ccc9561496568a2752afb6" +checksum = "b9d782e804c2f632e395708e99a94275910eb9100b2114651e04744e9b125006" [[package]] name = "windows_i686_gnu" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" +checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" [[package]] name = "windows_i686_gnu" -version = "0.52.5" +version = "0.53.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "960e6da069d81e09becb0ca57a65220ddff016ff2d6af6a223cf372a506593a3" + +[[package]] +name = "windows_i686_gnullvm" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88ba073cf16d5372720ec942a8ccbf61626074c6d4dd2e745299726ce8b89670" +checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" [[package]] name = "windows_i686_gnullvm" -version = "0.52.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f4261229030a858f36b459e748ae97545d6f1ec60e5e0d6a3d32e0dc232ee9" +checksum = "fa7359d10048f68ab8b09fa71c3daccfb0e9b559aed648a8f95469c27057180c" [[package]] name = "windows_i686_msvc" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" +checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" [[package]] name = "windows_i686_msvc" -version = "0.52.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db3c2bf3d13d5b658be73463284eaf12830ac9a26a90c717b7f771dfe97487bf" +checksum = "1e7ac75179f18232fe9c285163565a57ef8d3c89254a30685b57d83a38d326c2" [[package]] name = "windows_x86_64_gnu" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" +checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" [[package]] name = "windows_x86_64_gnu" -version = "0.52.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4246f76bdeff09eb48875a0fd3e2af6aada79d409d33011886d3e1581517d9" +checksum = "9c3842cdd74a865a8066ab39c8a7a473c0778a3f29370b5fd6b4b9aa7df4a499" [[package]] name = "windows_x86_64_gnullvm" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" +checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "852298e482cd67c356ddd9570386e2862b5673c85bd5f88df9ab6802b334c596" +checksum = "0ffa179e2d07eee8ad8f57493436566c7cc30ac536a3379fdf008f47f6bb7ae1" [[package]] name = "windows_x86_64_msvc" -version = "0.48.5" +version = "0.52.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" +checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" [[package]] name = "windows_x86_64_msvc" -version = "0.52.5" +version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bec47e5bfd1bff0eeaf6d8b485cc1074891a197ab4225d504cb7a1ab88b02bf0" +checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] -name = "winreg" -version = "0.52.0" +name = "wit-bindgen" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a277a57398d4bfa075df44f501a17cfdf8542d224f0d36095a2adc7aee4ef0a5" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" dependencies = [ - "cfg-if", - "windows-sys 0.48.0", + "wit-bindgen-rust-macro", ] [[package]] -name = "xz2" -version = "0.1.7" +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388c44dc09d76f1536602ead6d325eb532f5c122f17782bd57fb47baeeb767e2" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ - "lzma-sys", + "proc-macro2", + "quote", + "syn 2.0.117", + "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.34" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae87e3fcd617500e5d106f0380cf7b77f3c6092aae37191433159dda23cfb087" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.34" +version = "0.8.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "15e934569e47891f7d9411f1a451d947a60e000ab3bd24fbb970f000387d1b3b" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.68", + "syn 2.0.117", + "synstructure", ] [[package]] name = "zeroize" -version = "1.8.1" +version = "1.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +checksum = "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" [[package]] -name = "zstd" -version = "0.12.4" +name = "zerotrie" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a27595e173641171fc74a1232b7b1c7a7cb6e18222c11e9dfb9888fa424c53c" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" dependencies = [ - "zstd-safe 6.0.6", + "displaydoc", + "yoke", + "zerofrom", ] [[package]] -name = "zstd" -version = "0.13.0" +name = "zerovec" +version = "0.11.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bffb3309596d527cfcba7dfc6ed6052f1d39dfbd7c867aa2e865e4a449c10110" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" dependencies = [ - "zstd-safe 7.0.0", + "yoke", + "zerofrom", + "zerovec-derive", ] [[package]] -name = "zstd-safe" -version = "6.0.6" +name = "zerovec-derive" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee98ffd0b48ee95e6c5168188e44a54550b1564d9d530ee21d5f0eaed1069581" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ - "libc", - "zstd-sys", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zlib-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c745c48e1007337ed136dc99df34128b9faa6ed542d80a1c673cf55a6d7236c8" + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", ] [[package]] name = "zstd-safe" -version = "7.0.0" +version = "7.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43747c7422e2924c11144d5229878b98180ef8b06cca4ab5af37afc8a8d8ea3e" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" dependencies = [ "zstd-sys", ] [[package]] name = "zstd-sys" -version = "2.0.9+zstd.1.5.5" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e16efa8a874a0481a574084d34cc26fdb3b99627480f785888deb6386506656" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/Cargo.toml b/Cargo.toml index a77eca0c3..b584470d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -17,49 +17,72 @@ [package] name = "datafusion-python" -version = "39.0.0" +version = "52.0.0" homepage = "https://datafusion.apache.org/python" repository = "https://github.com/apache/datafusion-python" authors = ["Apache DataFusion "] description = "Apache DataFusion DataFrame and SQL Query Engine" readme = "README.md" license = "Apache-2.0" -edition = "2021" -rust-version = "1.64" -include = ["/src", "/datafusion", "/LICENSE.txt", "pyproject.toml", "Cargo.toml", "Cargo.lock"] +edition = "2024" +rust-version = "1.88" +include = [ + "/src", + "/datafusion", + "/LICENSE.txt", + "build.rs", + "pyproject.toml", + "Cargo.toml", + "Cargo.lock", +] [features] default = ["mimalloc"] -protoc = [ "datafusion-substrait/protoc" ] +protoc = ["datafusion-substrait/protoc"] substrait = ["dep:datafusion-substrait"] [dependencies] -tokio = { version = "1.35", features = ["macros", "rt", "rt-multi-thread", "sync"] } -rand = "0.8" -pyo3 = { version = "0.21", features = ["extension-module", "abi3", "abi3-py38"] } -arrow = { version = "52", feature = ["pyarrow"] } -datafusion = { version = "39.0.0", features = ["pyarrow", "avro", "unicode_expressions"] } -datafusion-common = { version = "39.0.0", features = ["pyarrow"] } -datafusion-expr = "39.0.0" -datafusion-functions-array = "39.0.0" -datafusion-optimizer = "39.0.0" -datafusion-sql = "39.0.0" -datafusion-substrait = { version = "39.0.0", optional = true } -prost = "0.12" -prost-types = "0.12" -uuid = { version = "1.9", features = ["v4"] } -mimalloc = { version = "0.1", optional = true, default-features = false, features = ["local_dynamic_tls"] } -async-trait = "0.1" +tokio = { version = "1.49", features = [ + "macros", + "rt", + "rt-multi-thread", + "sync", +] } +pyo3 = { version = "0.28", features = [ + "extension-module", + "abi3", + "abi3-py310", +] } +pyo3-async-runtimes = { version = "0.28", features = ["tokio-runtime"] } +pyo3-log = "0.13.3" +arrow = { version = "58", features = ["pyarrow"] } +arrow-select = { version = "58" } +datafusion = { version = "53", features = ["avro", "unicode_expressions"] } +datafusion-substrait = { version = "53", optional = true } +datafusion-proto = { version = "53" } +datafusion-ffi = { version = "53" } +prost = "0.14.3" # keep in line with `datafusion-substrait` +serde_json = "1" +uuid = { version = "1.21", features = ["v4"] } +mimalloc = { version = "0.1", optional = true, default-features = false, features = [ + "local_dynamic_tls", +] } +async-trait = "0.1.89" futures = "0.3" -object_store = { version = "0.10.1", features = ["aws", "gcp", "azure"] } +cstr = "0.2" +object_store = { version = "0.13.1", features = [ + "aws", + "gcp", + "azure", + "http", +] } +url = "2" +log = "0.4.29" parking_lot = "0.12" -regex-syntax = "0.8.1" -syn = "2.0.68" -url = "2.2" -sqlparser = "0.47.0" [build-dependencies] -pyo3-build-config = "0.21" +prost-types = "0.14.3" # keep in line with `datafusion-substrait` +pyo3-build-config = "0.28" [lib] name = "datafusion_python" @@ -68,4 +91,11 @@ crate-type = ["cdylib", "rlib"] [profile.release] lto = true codegen-units = 1 - \ No newline at end of file + +# We cannot publish to crates.io with any patches in the below section. Developers +# must remove any entries in this section before creating a release candidate. +[patch.crates-io] +datafusion = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } +datafusion-substrait = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } +datafusion-proto = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } +datafusion-ffi = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } diff --git a/README.md b/README.md index b1d5397ef..810ac8710 100644 --- a/README.md +++ b/README.md @@ -30,10 +30,8 @@ DataFusion's Python bindings can be used as a foundation for building new data s planning, and logical plan optimizations, and then transpiles the logical plan to Dask operations for execution. - [DataFusion Ballista](https://github.com/apache/datafusion-ballista) is a distributed SQL query engine that extends DataFusion's Python bindings for distributed use cases. - -It is also possible to use these Python bindings directly for DataFrame and SQL operations, but you may find that -[Polars](http://pola.rs/) and [DuckDB](http://www.duckdb.org/) are more suitable for this use case, since they have -more of an end-user focus and are more actively maintained than these Python bindings. +- [DataFusion Ray](https://github.com/apache/datafusion-ray) is another distributed query engine that uses + DataFusion's Python bindings. ## Features @@ -44,6 +42,10 @@ more of an end-user focus and are more actively maintained than these Python bin - Serialize and deserialize query plans in Substrait format. - Experimental support for transpiling SQL queries to DataFrame calls with Polars, Pandas, and cuDF. +For tips on tuning parallelism, see +[Maximizing CPU Usage](docs/source/user-guide/configuration.rst#maximizing-cpu-usage) +in the configuration guide. + ## Example Usage The following example demonstrates running a SQL query against a Parquet file using DataFusion, storing the results @@ -81,13 +83,53 @@ This produces the following chart: ![Chart](examples/chart.png) +## Registering a DataFrame as a View + +You can use SessionContext's `register_view` method to convert a DataFrame into a view and register it with the context. + +```python +from datafusion import SessionContext, col, literal + +# Create a DataFusion context +ctx = SessionContext() + +# Create sample data +data = {"a": [1, 2, 3, 4, 5], "b": [10, 20, 30, 40, 50]} + +# Create a DataFrame from the dictionary +df = ctx.from_pydict(data, "my_table") + +# Filter the DataFrame (for example, keep rows where a > 2) +df_filtered = df.filter(col("a") > literal(2)) + +# Register the dataframe as a view with the context +ctx.register_view("view1", df_filtered) + +# Now run a SQL query against the registered view +df_view = ctx.sql("SELECT * FROM view1") + +# Collect the results +results = df_view.collect() + +# Convert results to a list of dictionaries for display +result_dicts = [batch.to_pydict() for batch in results] + +print(result_dicts) +``` + +This will output: + +```python +[{'a': [3, 4, 5], 'b': [30, 40, 50]}] +``` + ## Configuration It is possible to configure runtime (memory and disk settings) and configuration settings when creating a context. ```python runtime = ( - RuntimeConfig() + RuntimeEnvBuilder() .with_disk_manager_os() .with_fair_spill_pool(10000000) ) @@ -114,6 +156,11 @@ Printing the context will show the current configuration settings. print(ctx) ``` +## Extensions + +For information about how to extend DataFusion Python, please see the extensions page of the +[online documentation](https://datafusion.apache.org/python/). + ## More Examples See [examples](examples/README.md) for more information. @@ -138,7 +185,13 @@ See [examples](examples/README.md) for more information. - [Serialize query plans using Substrait](https://github.com/apache/datafusion-python/blob/main/examples/substrait.py) -## How to install (from pip) +## How to install + +### uv + +```bash +uv add datafusion +``` ### Pip @@ -164,62 +217,80 @@ You can verify the installation by running: ## How to develop -This assumes that you have rust and cargo installed. We use the workflow recommended by [pyo3](https://github.com/PyO3/pyo3) and [maturin](https://github.com/PyO3/maturin). +This assumes that you have rust and cargo installed. We use the workflow recommended by [pyo3](https://github.com/PyO3/pyo3) and [maturin](https://github.com/PyO3/maturin). The Maturin tools used in this workflow can be installed either via `uv` or `pip`. Both approaches should offer the same experience. It is recommended to use `uv` since it has significant performance improvements +over `pip`. -The Maturin tools used in this workflow can be installed either via Conda or Pip. Both approaches should offer the same experience. Multiple approaches are only offered to appease developer preference. Bootstrapping for both Conda and Pip are as follows. +Currently for protobuf support either [protobuf](https://protobuf.dev/installation/) or cmake must be installed. -Bootstrap (Conda): +Bootstrap (`uv`): + +By default `uv` will attempt to build the datafusion python package. For our development we prefer to build manually. This means +that when creating your virtual environment using `uv sync` you need to pass in the additional `--no-install-package datafusion` +and for `uv run` commands the additional parameter `--no-project` ```bash # fetch this repo git clone git@github.com:apache/datafusion-python.git -# create the conda environment for dev -conda env create -f ./conda/environments/datafusion-dev.yaml -n datafusion-dev -# activate the conda environment -conda activate datafusion-dev +# cd to the repo root +cd datafusion-python/ +# create the virtual environment +uv sync --dev --no-install-package datafusion +# activate the environment +source .venv/bin/activate ``` -Bootstrap (Pip): +Bootstrap (`pip`): ```bash # fetch this repo git clone git@github.com:apache/datafusion-python.git +# cd to the repo root +cd datafusion-python/ # prepare development environment (used to build wheel / install in development) -python3 -m venv venv +python3 -m venv .venv # activate the venv -source venv/bin/activate +source .venv/bin/activate # update pip itself if necessary python -m pip install -U pip -# install dependencies (for Python 3.8+) -python -m pip install -r requirements.in +# install dependencies +python -m pip install -r pyproject.toml ``` The tests rely on test data in git submodules. ```bash -git submodule init -git submodule update +git submodule update --init ``` Whenever rust code changes (your changes or via `git pull`): ```bash # make sure you activate the venv using "source venv/bin/activate" first -maturin develop +maturin develop --uv python -m pytest ``` +Alternatively if you are using `uv` you can do the following without +needing to activate the virtual environment: + +```bash +uv run --no-project maturin develop --uv +uv run --no-project pytest . +``` + ### Running & Installing pre-commit hooks -arrow-datafusion-python takes advantage of [pre-commit](https://pre-commit.com/) to assist developers with code linting to help reduce +`datafusion-python` takes advantage of [pre-commit](https://pre-commit.com/) to assist developers with code linting to help reduce the number of commits that ultimately fail in CI due to linter errors. Using the pre-commit hooks is optional for the developer but certainly helpful for keeping PRs clean and concise. Our pre-commit hooks can be installed by running `pre-commit install`, which will install the configurations in -your ARROW_DATAFUSION_PYTHON_ROOT/.github directory and run each time you perform a commit, failing to complete +your DATAFUSION_PYTHON_ROOT/.github directory and run each time you perform a commit, failing to complete the commit if an offending lint is found allowing you to make changes locally before pushing. -The pre-commit hooks can also be run adhoc without installing them by simply running `pre-commit run --all-files` +The pre-commit hooks can also be run adhoc without installing them by simply running `pre-commit run --all-files`. + +NOTE: the current `pre-commit` hooks require docker, and cmake. See note on protobuf above. ## Running linters without using pre-commit @@ -234,18 +305,8 @@ There are scripts in `ci/scripts` for running Rust and Python linters. ## How to update dependencies -To change test dependencies, change the `requirements.in` and run +To change test dependencies, change the `pyproject.toml` and run ```bash -# install pip-tools (this can be done only once), also consider running in venv -python -m pip install pip-tools -python -m piptools compile --generate-hashes -o requirements-310.txt +uv sync --dev --no-install-package datafusion ``` - -To update dependencies, run with `-U` - -```bash -python -m piptools compile -U --generate-hashes -o requirements-310.txt -``` - -More details [here](https://github.com/jazzband/pip-tools) diff --git a/benchmarks/db-benchmark/db-benchmark.dockerfile b/benchmarks/db-benchmark/db-benchmark.dockerfile index d8842b250..af2edd0f4 100644 --- a/benchmarks/db-benchmark/db-benchmark.dockerfile +++ b/benchmarks/db-benchmark/db-benchmark.dockerfile @@ -58,7 +58,7 @@ RUN cd pandas && \ RUN cd modin && \ virtualenv py-modin --python=/usr/bin/python3.10 -RUN Rscript -e 'install.packages(c("jsonlite","bit64","devtools","rmarkdown"), dependecies=TRUE, repos="https://cloud.r-project.org")' +RUN Rscript -e 'install.packages(c("jsonlite","bit64","devtools","rmarkdown"), dependencies=TRUE, repos="https://cloud.r-project.org")' SHELL ["/bin/bash", "-c"] diff --git a/benchmarks/db-benchmark/groupby-datafusion.py b/benchmarks/db-benchmark/groupby-datafusion.py index 3a4399f7d..533166695 100644 --- a/benchmarks/db-benchmark/groupby-datafusion.py +++ b/benchmarks/db-benchmark/groupby-datafusion.py @@ -15,27 +15,30 @@ # specific language governing permissions and limitations # under the License. -import os import gc +import os import timeit +from pathlib import Path + import datafusion as df +import pyarrow as pa from datafusion import ( - col, - functions as f, - RuntimeConfig, + RuntimeEnvBuilder, SessionConfig, SessionContext, + col, +) +from datafusion import ( + functions as f, ) -import pyarrow from pyarrow import csv as pacsv - print("# groupby-datafusion.py", flush=True) -exec(open("./_helpers/helpers.py").read()) +exec(Path.open("./_helpers/helpers.py").read()) -def ans_shape(batches): +def ans_shape(batches) -> tuple[int, int]: rows, cols = 0, 0 for batch in batches: rows += batch.num_rows @@ -46,7 +49,7 @@ def ans_shape(batches): return rows, cols -def execute(df): +def execute(df) -> list: print(df.execution_plan().display_indent()) return df.collect() @@ -63,17 +66,17 @@ def execute(df): sql = True data_name = os.environ["SRC_DATANAME"] -src_grp = os.path.join("data", data_name + ".csv") +src_grp = "data" / data_name / ".csv" print("loading dataset %s" % src_grp, flush=True) -schema = pyarrow.schema( +schema = pa.schema( [ - ("id4", pyarrow.int32()), - ("id5", pyarrow.int32()), - ("id6", pyarrow.int32()), - ("v1", pyarrow.int32()), - ("v2", pyarrow.int32()), - ("v3", pyarrow.float64()), + ("id4", pa.int32()), + ("id5", pa.int32()), + ("id6", pa.int32()), + ("v1", pa.int32()), + ("v2", pa.int32()), + ("v3", pa.float64()), ] ) @@ -85,12 +88,14 @@ def execute(df): # create a session context with explicit runtime and config settings runtime = ( - RuntimeConfig().with_disk_manager_os().with_fair_spill_pool(64 * 1024 * 1024 * 1024) + RuntimeEnvBuilder() + .with_disk_manager_os() + .with_fair_spill_pool(64 * 1024 * 1024 * 1024) ) config = ( SessionConfig() - .with_repartition_joins(False) - .with_repartition_aggregations(False) + .with_repartition_joins(enabled=False) + .with_repartition_aggregations(enabled=False) .set("datafusion.execution.coalesce_batches", "false") ) ctx = SessionContext(config, runtime) diff --git a/benchmarks/db-benchmark/join-datafusion.py b/benchmarks/db-benchmark/join-datafusion.py index 4d59c7dc2..3be296c81 100755 --- a/benchmarks/db-benchmark/join-datafusion.py +++ b/benchmarks/db-benchmark/join-datafusion.py @@ -15,21 +15,22 @@ # specific language governing permissions and limitations # under the License. -import os import gc +import os import timeit +from pathlib import Path + import datafusion as df -from datafusion import functions as f from datafusion import col +from datafusion import functions as f from pyarrow import csv as pacsv - print("# join-datafusion.py", flush=True) -exec(open("./_helpers/helpers.py").read()) +exec(Path.open("./_helpers/helpers.py").read()) -def ans_shape(batches): +def ans_shape(batches) -> tuple[int, int]: rows, cols = 0, 0 for batch in batches: rows += batch.num_rows @@ -49,15 +50,16 @@ def ans_shape(batches): on_disk = "FALSE" data_name = os.environ["SRC_DATANAME"] -src_jn_x = os.path.join("data", data_name + ".csv") +src_jn_x = "data" / data_name / ".csv" y_data_name = join_to_tbls(data_name) src_jn_y = [ - os.path.join("data", y_data_name[0] + ".csv"), - os.path.join("data", y_data_name[1] + ".csv"), - os.path.join("data", y_data_name[2] + ".csv"), + "data" / y_data_name[0] / ".csv", + "data" / y_data_name[1] / ".csv", + "data" / y_data_name[2] / ".csv", ] if len(src_jn_y) != 3: - raise Exception("Something went wrong in preparing files used for join") + error_msg = "Something went wrong in preparing files used for join" + raise Exception(error_msg) print( "loading datasets " @@ -74,7 +76,8 @@ def ans_shape(batches): ctx = df.SessionContext() print(ctx) -# TODO we should be applying projections to these table reads to crete relations of different sizes +# TODO we should be applying projections to these table reads to create relations +# of different sizes x_data = pacsv.read_csv( src_jn_x, convert_options=pacsv.ConvertOptions(auto_dict_encode=True) diff --git a/benchmarks/max_cpu_usage.py b/benchmarks/max_cpu_usage.py new file mode 100644 index 000000000..ae73baad6 --- /dev/null +++ b/benchmarks/max_cpu_usage.py @@ -0,0 +1,107 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""Benchmark script showing how to maximize CPU usage. + +This script demonstrates one example of tuning DataFusion for improved parallelism +and CPU utilization. It uses synthetic in-memory data and performs simple aggregation +operations to showcase the impact of partitioning configuration. + +IMPORTANT: This is a simplified example designed to illustrate partitioning concepts. +Actual performance in your applications may vary significantly based on many factors: + +- Type of table providers (Parquet files, CSV, databases, etc.) +- I/O operations and storage characteristics (local disk, network, cloud storage) +- Query complexity and operation types (joins, window functions, complex expressions) +- Data distribution and size characteristics +- Memory available and hardware specifications +- Network latency for distributed data sources + +It is strongly recommended that you create similar benchmarks tailored to your specific: +- Hardware configuration +- Data sources and formats +- Typical query patterns and workloads +- Performance requirements + +This will give you more accurate insights into how DataFusion configuration options +will affect your particular use case. +""" + +from __future__ import annotations + +import argparse +import multiprocessing +import time + +import pyarrow as pa +from datafusion import SessionConfig, SessionContext, col +from datafusion import functions as f + + +def main(num_rows: int, partitions: int) -> None: + """Run a simple aggregation after repartitioning. + + This function demonstrates basic partitioning concepts using synthetic data. + Real-world performance will depend on your specific data sources, query types, + and system configuration. + """ + # Create some example data (synthetic in-memory data for demonstration) + # Note: Real applications typically work with files, databases, or other + # data sources that have different I/O and distribution characteristics + array = pa.array(range(num_rows)) + batch = pa.record_batch([array], names=["a"]) + + # Configure the session to use a higher target partition count and + # enable automatic repartitioning. + config = ( + SessionConfig() + .with_target_partitions(partitions) + .with_repartition_joins(enabled=True) + .with_repartition_aggregations(enabled=True) + .with_repartition_windows(enabled=True) + ) + ctx = SessionContext(config) + + # Register the input data and repartition manually to ensure that all + # partitions are used. + df = ctx.create_dataframe([[batch]]).repartition(partitions) + + start = time.time() + df = df.aggregate([], [f.sum(col("a"))]) + df.collect() + end = time.time() + + print( + f"Processed {num_rows} rows using {partitions} partitions in {end - start:.3f}s" + ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description=__doc__) + parser.add_argument( + "--rows", + type=int, + default=1_000_000, + help="Number of rows in the generated dataset", + ) + parser.add_argument( + "--partitions", + type=int, + default=multiprocessing.cpu_count(), + help="Target number of partitions to use", + ) + args = parser.parse_args() + main(args.rows, args.partitions) diff --git a/benchmarks/tpch/create_tables.sql b/benchmarks/tpch/create_tables.sql index 4b2209c4b..9f3aeea20 100644 --- a/benchmarks/tpch/create_tables.sql +++ b/benchmarks/tpch/create_tables.sql @@ -13,7 +13,10 @@ CREATE EXTERNAL TABLE customer ( c_extra VARCHAR NOT NULL, ) STORED AS CSV -WITH HEADER ROW DELIMITER '|' +OPTIONS ( + format.delimiter '|', + format.has_header true +) LOCATION '$PATH/customer.csv'; CREATE EXTERNAL TABLE lineitem ( @@ -36,7 +39,10 @@ CREATE EXTERNAL TABLE lineitem ( l_extra VARCHAR NOT NULL, ) STORED AS CSV -WITH HEADER ROW DELIMITER '|' +OPTIONS ( + format.delimiter '|', + format.has_header true +) LOCATION '$PATH/lineitem.csv'; CREATE EXTERNAL TABLE nation ( @@ -47,7 +53,10 @@ CREATE EXTERNAL TABLE nation ( n_extra VARCHAR NOT NULL, ) STORED AS CSV -WITH HEADER ROW DELIMITER '|' +OPTIONS ( + format.delimiter '|', + format.has_header true +) LOCATION '$PATH/nation.csv'; CREATE EXTERNAL TABLE orders ( @@ -63,7 +72,10 @@ CREATE EXTERNAL TABLE orders ( o_extra VARCHAR NOT NULL, ) STORED AS CSV -WITH HEADER ROW DELIMITER '|' +OPTIONS ( + format.delimiter '|', + format.has_header true +) LOCATION '$PATH/orders.csv'; CREATE EXTERNAL TABLE part ( @@ -79,7 +91,10 @@ CREATE EXTERNAL TABLE part ( p_extra VARCHAR NOT NULL, ) STORED AS CSV -WITH HEADER ROW DELIMITER '|' +OPTIONS ( + format.delimiter '|', + format.has_header true +) LOCATION '$PATH/part.csv'; CREATE EXTERNAL TABLE partsupp ( @@ -91,7 +106,10 @@ CREATE EXTERNAL TABLE partsupp ( ps_extra VARCHAR NOT NULL, ) STORED AS CSV -WITH HEADER ROW DELIMITER '|' +OPTIONS ( + format.delimiter '|', + format.has_header true +) LOCATION '$PATH/partsupp.csv'; CREATE EXTERNAL TABLE region ( @@ -101,7 +119,10 @@ CREATE EXTERNAL TABLE region ( r_extra VARCHAR NOT NULL, ) STORED AS CSV -WITH HEADER ROW DELIMITER '|' +OPTIONS ( + format.delimiter '|', + format.has_header true +) LOCATION '$PATH/region.csv'; CREATE EXTERNAL TABLE supplier ( @@ -115,5 +136,8 @@ CREATE EXTERNAL TABLE supplier ( s_extra VARCHAR NOT NULL, ) STORED AS CSV -WITH HEADER ROW DELIMITER '|' +OPTIONS ( + format.delimiter '|', + format.has_header true +) LOCATION '$PATH/supplier.csv'; \ No newline at end of file diff --git a/benchmarks/tpch/tpch.py b/benchmarks/tpch/tpch.py index 7f104a4cb..ffee5554c 100644 --- a/benchmarks/tpch/tpch.py +++ b/benchmarks/tpch/tpch.py @@ -16,19 +16,21 @@ # under the License. import argparse -from datafusion import SessionContext import time +from pathlib import Path + +from datafusion import SessionContext -def bench(data_path, query_path): - with open("results.csv", "w") as results: +def bench(data_path, query_path) -> None: + with Path("results.csv").open("w") as results: # register tables start = time.time() total_time_millis = 0 # create context # runtime = ( - # RuntimeConfig() + # RuntimeEnvBuilder() # .with_disk_manager_os() # .with_fair_spill_pool(10000000) # ) @@ -44,7 +46,7 @@ def bench(data_path, query_path): print("Configuration:\n", ctx) # register tables - with open("create_tables.sql") as f: + with Path("create_tables.sql").open() as f: sql = "" for line in f.readlines(): if line.startswith("--"): @@ -58,19 +60,16 @@ def bench(data_path, query_path): end = time.time() time_millis = (end - start) * 1000 total_time_millis += time_millis - print("setup,{}".format(round(time_millis, 1))) - results.write("setup,{}\n".format(round(time_millis, 1))) + print(f"setup,{round(time_millis, 1)}") + results.write(f"setup,{round(time_millis, 1)}\n") results.flush() # run queries for query in range(1, 23): - with open("{}/q{}.sql".format(query_path, query)) as f: + with Path(f"{query_path}/q{query}.sql").open() as f: text = f.read() tmp = text.split(";") - queries = [] - for str in tmp: - if len(str.strip()) > 0: - queries.append(str.strip()) + queries = [s.strip() for s in tmp if len(s.strip()) > 0] try: start = time.time() @@ -82,14 +81,14 @@ def bench(data_path, query_path): end = time.time() time_millis = (end - start) * 1000 total_time_millis += time_millis - print("q{},{}".format(query, round(time_millis, 1))) - results.write("q{},{}\n".format(query, round(time_millis, 1))) + print(f"q{query},{round(time_millis, 1)}") + results.write(f"q{query},{round(time_millis, 1)}\n") results.flush() except Exception as e: print("query", query, "failed", e) - print("total,{}".format(round(total_time_millis, 1))) - results.write("total,{}\n".format(round(total_time_millis, 1))) + print(f"total,{round(total_time_millis, 1)}") + results.write(f"total,{round(total_time_millis, 1)}\n") if __name__ == "__main__": diff --git a/ci/scripts/rust_fmt.sh b/ci/scripts/rust_fmt.sh index 9d8325877..05cb6b208 100755 --- a/ci/scripts/rust_fmt.sh +++ b/ci/scripts/rust_fmt.sh @@ -18,4 +18,4 @@ # under the License. set -ex -cargo fmt --all -- --check +cargo +nightly fmt --all -- --check diff --git a/conda/recipes/build.sh b/conda/recipes/build.sh deleted file mode 100644 index 259894313..000000000 --- a/conda/recipes/build.sh +++ /dev/null @@ -1,84 +0,0 @@ -#!/bin/bash -# -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -# - -set -ex - -# See https://github.com/conda-forge/rust-feedstock/blob/master/recipe/build.sh for cc env explanation -if [ "$c_compiler" = gcc ] ; then - case "$target_platform" in - linux-64) rust_env_arch=X86_64_UNKNOWN_LINUX_GNU ;; - linux-aarch64) rust_env_arch=AARCH64_UNKNOWN_LINUX_GNU ;; - linux-ppc64le) rust_env_arch=POWERPC64LE_UNKNOWN_LINUX_GNU ;; - *) echo "unknown target_platform $target_platform" ; exit 1 ;; - esac - - export CARGO_TARGET_${rust_env_arch}_LINKER=$CC -fi - -declare -a _xtra_maturin_args - -mkdir -p $SRC_DIR/.cargo - -if [ "$target_platform" = "osx-64" ] ; then - cat <> $SRC_DIR/.cargo/config -[target.x86_64-apple-darwin] -linker = "$CC" -rustflags = [ - "-C", "link-arg=-undefined", - "-C", "link-arg=dynamic_lookup", -] - -EOF - - _xtra_maturin_args+=(--target=x86_64-apple-darwin) - -elif [ "$target_platform" = "osx-arm64" ] ; then - cat <> $SRC_DIR/.cargo/config -# Required for intermediate codegen stuff -[target.x86_64-apple-darwin] -linker = "$CC_FOR_BUILD" - -# Required for final binary artifacts for target -[target.aarch64-apple-darwin] -linker = "$CC" -rustflags = [ - "-C", "link-arg=-undefined", - "-C", "link-arg=dynamic_lookup", -] - -EOF - _xtra_maturin_args+=(--target=aarch64-apple-darwin) - - # This variable must be set to the directory containing the target's libpython DSO - export PYO3_CROSS_LIB_DIR=$PREFIX/lib - - # xref: https://github.com/PyO3/pyo3/commit/7beb2720 - export PYO3_PYTHON_VERSION=${PY_VER} - - # xref: https://github.com/conda-forge/python-feedstock/issues/621 - sed -i.bak 's,aarch64,arm64,g' $BUILD_PREFIX/venv/lib/os-patch.py - sed -i.bak 's,aarch64,arm64,g' $BUILD_PREFIX/venv/lib/platform-patch.py -fi - -maturin build -vv -j "${CPU_COUNT}" --release --strip --features substrait --manylinux off --interpreter="${PYTHON}" "${_xtra_maturin_args[@]}" - -"${PYTHON}" -m pip install $SRC_DIR/target/wheels/datafusion*.whl --no-deps -vv - -cargo-bundle-licenses --format yaml --output THIRDPARTY.yml diff --git a/conda/recipes/meta.yaml b/conda/recipes/meta.yaml deleted file mode 100644 index 72ac7f501..000000000 --- a/conda/recipes/meta.yaml +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -{% set name = "datafusion" %} -{% set major_minor_patch = environ.get('GIT_DESCRIBE_TAG', '0.0.0.dev').split('.') %} -{% set new_patch = major_minor_patch[2] | int + 1 %} -{% set version = (major_minor_patch[:2] + [new_patch]) | join('.') + environ.get('VERSION_SUFFIX', '') %} - - -package: - name: {{ name|lower }} - version: {{ version }} - -source: - git_url: ../.. - -build: - number: {{ GIT_DESCRIBE_NUMBER }} - string: py{{ python | replace(".", "") }}_{{ GIT_DESCRIBE_HASH }}_{{ GIT_DESCRIBE_NUMBER }} - -requirements: - build: - - python # [build_platform != target_platform] - - cross-python_{{ target_platform }} # [build_platform != target_platform] - - zlib # [build_platform != target_platform] - - {{ compiler('c') }} - - {{ compiler('rust') }} - - cargo-bundle-licenses - - maturin >=1.5.1,<1.6.0 - - libprotobuf =3 - host: - - python - - maturin >=1.5.1,<1.6.0 - - pip - - zlib - - xz # [linux64] - run: - - python - - pyarrow >=11.0.0 - -test: - imports: - - datafusion - commands: - - pip check - requires: - - pip - -about: - home: https://arrow.apache.org/datafusion - license: Apache-2.0 - license_family: APACHE - license_file: - - LICENSE.txt - - THIRDPARTY.yml - description: | - DataFusion is an extensible query execution framework, written in Rust, - that uses Apache Arrow as its in-memory format. - doc_url: https://arrow.apache.org/datafusion - dev_url: https://github.com/apache/arrow-datafusion diff --git a/conda/environments/datafusion-dev.yaml b/conftest.py similarity index 62% rename from conda/environments/datafusion-dev.yaml rename to conftest.py index 1f6f23942..1c89f92bc 100644 --- a/conda/environments/datafusion-dev.yaml +++ b/conftest.py @@ -15,30 +15,15 @@ # specific language governing permissions and limitations # under the License. -channels: - - conda-forge -dependencies: - - black - - flake8 - - isort - - maturin>=1.5.1 - - mypy - - numpy - - pyarrow>=11.0.0 - - pytest - - toml - - importlib_metadata - - python>=3.10 - # Packages useful for building distributions and releasing - - mamba - - conda-build - - anaconda-client - # Packages for documentation building - - sphinx - - pydata-sphinx-theme==0.8.0 - - myst-parser - - jinja2 - # GPU packages - - cudf - - cudatoolkit=11.8 -name: datafusion-dev +"""Pytest configuration for doctest namespace injection.""" + +import datafusion as dfn +import numpy as np +import pytest + + +@pytest.fixture(autouse=True) +def _doctest_namespace(doctest_namespace: dict) -> None: + """Add common imports to the doctest namespace.""" + doctest_namespace["dfn"] = dfn + doctest_namespace["np"] = np diff --git a/dev/changelog/43.0.0.md b/dev/changelog/43.0.0.md new file mode 100644 index 000000000..bbb766910 --- /dev/null +++ b/dev/changelog/43.0.0.md @@ -0,0 +1,73 @@ + + +# Apache DataFusion Python 43.0.0 Changelog + +This release consists of 26 commits from 7 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: expose `drop` method [#913](https://github.com/apache/datafusion-python/pull/913) (ion-elgreco) +- feat: expose `join_on` [#914](https://github.com/apache/datafusion-python/pull/914) (ion-elgreco) +- feat: add fill_null/nan expressions [#919](https://github.com/apache/datafusion-python/pull/919) (ion-elgreco) +- feat: add `with_columns` [#909](https://github.com/apache/datafusion-python/pull/909) (ion-elgreco) +- feat: add `cast` to DataFrame [#916](https://github.com/apache/datafusion-python/pull/916) (ion-elgreco) +- feat: add `head`, `tail` methods [#915](https://github.com/apache/datafusion-python/pull/915) (ion-elgreco) + +**Fixed bugs:** + +- fix: remove use of deprecated `make_scalar_function` [#906](https://github.com/apache/datafusion-python/pull/906) (Michael-J-Ward) +- fix: udwf example [#948](https://github.com/apache/datafusion-python/pull/948) (mesejo) + +**Other:** + +- Ts/minor updates release process [#903](https://github.com/apache/datafusion-python/pull/903) (timsaucer) +- build(deps): bump pyo3 from 0.22.3 to 0.22.4 [#910](https://github.com/apache/datafusion-python/pull/910) (dependabot[bot]) +- refactor: `from_arrow` use protocol typehints [#917](https://github.com/apache/datafusion-python/pull/917) (ion-elgreco) +- Change requires-python version in pyproject.toml [#924](https://github.com/apache/datafusion-python/pull/924) (kosiew) +- chore: deprecate `select_columns` [#911](https://github.com/apache/datafusion-python/pull/911) (ion-elgreco) +- build(deps): bump uuid from 1.10.0 to 1.11.0 [#927](https://github.com/apache/datafusion-python/pull/927) (dependabot[bot]) +- Add array_empty scalar function [#931](https://github.com/apache/datafusion-python/pull/931) (kosiew) +- add `cardinality` function to calculate total distinct elements in an array [#937](https://github.com/apache/datafusion-python/pull/937) (kosiew) +- Add empty scalar function (alias of array_empty), fix a small typo [#938](https://github.com/apache/datafusion-python/pull/938) (kosiew) +- README How to develop section now also works on Apple M1 [#940](https://github.com/apache/datafusion-python/pull/940) (drauschenbach) +- refactor: dataframe `join` params [#912](https://github.com/apache/datafusion-python/pull/912) (ion-elgreco) +- Upgrade to Datafusion 43 [#905](https://github.com/apache/datafusion-python/pull/905) (Michael-J-Ward) +- build(deps): bump tokio from 1.40.0 to 1.41.1 [#946](https://github.com/apache/datafusion-python/pull/946) (dependabot[bot]) +- Add list_cat, list_concat, list_repeat [#942](https://github.com/apache/datafusion-python/pull/942) (kosiew) +- Add foreign table providers [#921](https://github.com/apache/datafusion-python/pull/921) (timsaucer) +- Add make_list and tests for make_list, make_array [#949](https://github.com/apache/datafusion-python/pull/949) (kosiew) +- Documentation updates: simplify examples and add section on data sources [#955](https://github.com/apache/datafusion-python/pull/955) (timsaucer) +- Add datafusion.extract [#959](https://github.com/apache/datafusion-python/pull/959) (kosiew) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 9 Ion Koutsouris + 7 kosiew + 3 Tim Saucer + 3 dependabot[bot] + 2 Michael J Ward + 1 Daniel Mesejo + 1 David Rauschenbach +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. diff --git a/dev/changelog/44.0.0.md b/dev/changelog/44.0.0.md new file mode 100644 index 000000000..c5ed4bdb0 --- /dev/null +++ b/dev/changelog/44.0.0.md @@ -0,0 +1,58 @@ + + +# Apache DataFusion Python 44.0.0 Changelog + +This release consists of 12 commits from 5 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: support enable_url_table config [#980](https://github.com/apache/datafusion-python/pull/980) (chenkovsky) +- feat: remove DataFusion pyarrow feat [#1000](https://github.com/apache/datafusion-python/pull/1000) (timsaucer) + +**Fixed bugs:** + +- fix: correct LZ0 to LZO in compression options [#995](https://github.com/apache/datafusion-python/pull/995) (kosiew) + +**Other:** + +- Add arrow cast [#962](https://github.com/apache/datafusion-python/pull/962) (kosiew) +- Fix small issues in pyproject.toml [#976](https://github.com/apache/datafusion-python/pull/976) (kylebarron) +- chore: set validation and type hint for ffi tableprovider [#983](https://github.com/apache/datafusion-python/pull/983) (ion-elgreco) +- Support async iteration of RecordBatchStream [#975](https://github.com/apache/datafusion-python/pull/975) (kylebarron) +- Chore/upgrade datafusion 44 [#973](https://github.com/apache/datafusion-python/pull/973) (timsaucer) +- Default to ZSTD compression when writing Parquet [#981](https://github.com/apache/datafusion-python/pull/981) (kosiew) +- Feat/use uv python management [#994](https://github.com/apache/datafusion-python/pull/994) (timsaucer) +- minor: Update dependencies prior to release [#999](https://github.com/apache/datafusion-python/pull/999) (timsaucer) +- Apply import ordering in ruff check [#1001](https://github.com/apache/datafusion-python/pull/1001) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 5 Tim Saucer + 3 kosiew + 2 Kyle Barron + 1 Chongchen Chen + 1 Ion Koutsouris +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/45.0.0.md b/dev/changelog/45.0.0.md new file mode 100644 index 000000000..93659b171 --- /dev/null +++ b/dev/changelog/45.0.0.md @@ -0,0 +1,42 @@ + + +# Apache DataFusion Python 45.0.0 Changelog + +This release consists of 2 commits from 2 contributors. See credits at the end of this changelog for more information. + +**Fixed bugs:** + +- fix: add to_timestamp_nanos [#1020](https://github.com/apache/datafusion-python/pull/1020) (chenkovsky) + +**Other:** + +- Chore/upgrade datafusion 45 [#1010](https://github.com/apache/datafusion-python/pull/1010) (kevinjqliu) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 1 Kevin Liu + 1 Tim Saucer +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/46.0.0.md b/dev/changelog/46.0.0.md new file mode 100644 index 000000000..3e5768099 --- /dev/null +++ b/dev/changelog/46.0.0.md @@ -0,0 +1,73 @@ + + +# Apache DataFusion Python 46.0.0 Changelog + +This release consists of 21 commits from 11 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: reads using global ctx [#982](https://github.com/apache/datafusion-python/pull/982) (ion-elgreco) +- feat: Implementation of udf and udaf decorator [#1040](https://github.com/apache/datafusion-python/pull/1040) (CrystalZhou0529) +- feat: expose regex_count function [#1066](https://github.com/apache/datafusion-python/pull/1066) (nirnayroy) +- feat: Update DataFusion dependency to 46 [#1079](https://github.com/apache/datafusion-python/pull/1079) (timsaucer) + +**Fixed bugs:** + +- fix: add to_timestamp_nanos [#1020](https://github.com/apache/datafusion-python/pull/1020) (chenkovsky) +- fix: type checking [#993](https://github.com/apache/datafusion-python/pull/993) (chenkovsky) + +**Other:** + +- [infra] Fail Clippy on rust build warnings [#1029](https://github.com/apache/datafusion-python/pull/1029) (kevinjqliu) +- Add user documentation for the FFI approach [#1031](https://github.com/apache/datafusion-python/pull/1031) (timsaucer) +- build(deps): bump arrow from 54.1.0 to 54.2.0 [#1035](https://github.com/apache/datafusion-python/pull/1035) (dependabot[bot]) +- Chore: Release datafusion-python 45 [#1024](https://github.com/apache/datafusion-python/pull/1024) (timsaucer) +- Enable Dataframe to be converted into views which can be used in register_table [#1016](https://github.com/apache/datafusion-python/pull/1016) (kosiew) +- Add ruff check for missing futures import [#1052](https://github.com/apache/datafusion-python/pull/1052) (timsaucer) +- Enable take comments to assign issues to users [#1058](https://github.com/apache/datafusion-python/pull/1058) (timsaucer) +- Update python min version to 3.9 [#1043](https://github.com/apache/datafusion-python/pull/1043) (kevinjqliu) +- feat/improve ruff test coverage [#1055](https://github.com/apache/datafusion-python/pull/1055) (timsaucer) +- feat/making global context accessible for users [#1060](https://github.com/apache/datafusion-python/pull/1060) (jsai28) +- Renaming Internal Structs [#1059](https://github.com/apache/datafusion-python/pull/1059) (Spaarsh) +- test: add pytest asyncio tests [#1063](https://github.com/apache/datafusion-python/pull/1063) (jsai28) +- Add decorator for udwf [#1061](https://github.com/apache/datafusion-python/pull/1061) (kosiew) +- Add additional ruff suggestions [#1062](https://github.com/apache/datafusion-python/pull/1062) (Spaarsh) +- Improve collection during repr and repr_html [#1036](https://github.com/apache/datafusion-python/pull/1036) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 7 Tim Saucer + 2 Kevin Liu + 2 Spaarsh + 2 jsai28 + 2 kosiew + 1 Chen Chongchen + 1 Chongchen Chen + 1 Crystal Zhou + 1 Ion Koutsouris + 1 Nirnay Roy + 1 dependabot[bot] +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/47.0.0.md b/dev/changelog/47.0.0.md new file mode 100644 index 000000000..a7ed90313 --- /dev/null +++ b/dev/changelog/47.0.0.md @@ -0,0 +1,64 @@ + + +# Apache DataFusion Python 47.0.0 Changelog + +This release consists of 23 commits from 5 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: support unparser [#1088](https://github.com/apache/datafusion-python/pull/1088) (chenkovsky) +- feat: update datafusion dependency 47 [#1107](https://github.com/apache/datafusion-python/pull/1107) (timsaucer) +- feat: alias with metadata [#1111](https://github.com/apache/datafusion-python/pull/1111) (chenkovsky) +- feat: add missing PyLogicalPlan to_variant [#1085](https://github.com/apache/datafusion-python/pull/1085) (chenkovsky) +- feat: add user defined table function support [#1113](https://github.com/apache/datafusion-python/pull/1113) (timsaucer) + +**Fixed bugs:** + +- fix: recursive import [#1117](https://github.com/apache/datafusion-python/pull/1117) (chenkovsky) + +**Other:** + +- Update changelog and version number [#1089](https://github.com/apache/datafusion-python/pull/1089) (timsaucer) +- Documentation updates: mention correct dataset on basics page [#1081](https://github.com/apache/datafusion-python/pull/1081) (floscha) +- Add Configurable HTML Table Formatter for DataFusion DataFrames in Python [#1100](https://github.com/apache/datafusion-python/pull/1100) (kosiew) +- Add DataFrame usage guide with HTML rendering customization options [#1108](https://github.com/apache/datafusion-python/pull/1108) (kosiew) +- 1075/enhancement/Make col class with __getattr__ [#1076](https://github.com/apache/datafusion-python/pull/1076) (deanm0000) +- 1064/enhancement/add functions to Expr class [#1074](https://github.com/apache/datafusion-python/pull/1074) (deanm0000) +- ci: require approving review [#1122](https://github.com/apache/datafusion-python/pull/1122) (timsaucer) +- Partial fix for 1078: Enhance DataFrame Formatter Configuration with Memory and Display Controls [#1119](https://github.com/apache/datafusion-python/pull/1119) (kosiew) +- Add fill_null method to DataFrame API for handling missing values [#1019](https://github.com/apache/datafusion-python/pull/1019) (kosiew) +- minor: reduce error size [#1126](https://github.com/apache/datafusion-python/pull/1126) (timsaucer) +- Move the udf module to user_defined [#1112](https://github.com/apache/datafusion-python/pull/1112) (timsaucer) +- add unit tests for expression functions [#1121](https://github.com/apache/datafusion-python/pull/1121) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 12 Tim Saucer + 4 Chen Chongchen + 4 kosiew + 2 deanm0000 + 1 Florian Schäfer +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/48.0.0.md b/dev/changelog/48.0.0.md new file mode 100644 index 000000000..80bc61aca --- /dev/null +++ b/dev/changelog/48.0.0.md @@ -0,0 +1,59 @@ + + +# Apache DataFusion Python 48.0.0 Changelog + +This release consists of 15 commits from 6 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: upgrade df48 dependency [#1143](https://github.com/apache/datafusion-python/pull/1143) (timsaucer) +- feat: Support Parquet writer options [#1123](https://github.com/apache/datafusion-python/pull/1123) (nuno-faria) +- feat: dataframe string formatter [#1170](https://github.com/apache/datafusion-python/pull/1170) (timsaucer) +- feat: collect once during display() in jupyter notebooks [#1167](https://github.com/apache/datafusion-python/pull/1167) (timsaucer) +- feat: python based catalog and schema provider [#1156](https://github.com/apache/datafusion-python/pull/1156) (timsaucer) +- feat: add FFI support for user defined functions [#1145](https://github.com/apache/datafusion-python/pull/1145) (timsaucer) + +**Other:** + +- Release DataFusion 47.0.0 [#1130](https://github.com/apache/datafusion-python/pull/1130) (timsaucer) +- Add a documentation build step in CI [#1139](https://github.com/apache/datafusion-python/pull/1139) (crystalxyz) +- Add DataFrame API Documentation for DataFusion Python [#1132](https://github.com/apache/datafusion-python/pull/1132) (kosiew) +- Add Interruptible Query Execution in Jupyter via KeyboardInterrupt Support [#1141](https://github.com/apache/datafusion-python/pull/1141) (kosiew) +- Support types other than String and Int for partition columns [#1154](https://github.com/apache/datafusion-python/pull/1154) (miclegr) +- Fix signature of `__arrow_c_stream__` [#1168](https://github.com/apache/datafusion-python/pull/1168) (kylebarron) +- Consolidate DataFrame Docs: Merge HTML Rendering Section as Subpage [#1161](https://github.com/apache/datafusion-python/pull/1161) (kosiew) +- Add compression_level support to ParquetWriterOptions and enhance write_parquet to accept full options object [#1169](https://github.com/apache/datafusion-python/pull/1169) (kosiew) +- Simplify HTML Formatter Style Handling Using Script Injection [#1177](https://github.com/apache/datafusion-python/pull/1177) (kosiew) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 6 Tim Saucer + 5 kosiew + 1 Crystal Zhou + 1 Kyle Barron + 1 Michele Gregori + 1 Nuno Faria +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/49.0.0.md b/dev/changelog/49.0.0.md new file mode 100644 index 000000000..008bd43bc --- /dev/null +++ b/dev/changelog/49.0.0.md @@ -0,0 +1,61 @@ + + +# Apache DataFusion Python 49.0.0 Changelog + +This release consists of 16 commits from 7 contributors. See credits at the end of this changelog for more information. + +**Fixed bugs:** + +- fix(build): Include build.rs in published crates [#1199](https://github.com/apache/datafusion-python/pull/1199) (colinmarc) + +**Other:** + +- 48.0.0 Release [#1175](https://github.com/apache/datafusion-python/pull/1175) (timsaucer) +- Update CI rules [#1188](https://github.com/apache/datafusion-python/pull/1188) (timsaucer) +- Fix Python UDAF Accumulator Interface example to Properly Handle State and Updates with List[Array] Types [#1192](https://github.com/apache/datafusion-python/pull/1192) (kosiew) +- chore: Upgrade datafusion to version 49 [#1200](https://github.com/apache/datafusion-python/pull/1200) (nuno-faria) +- Update how to dev instructions [#1179](https://github.com/apache/datafusion-python/pull/1179) (ntjohnson1) +- build(deps): bump object_store from 0.12.2 to 0.12.3 [#1189](https://github.com/apache/datafusion-python/pull/1189) (dependabot[bot]) +- build(deps): bump uuid from 1.17.0 to 1.18.0 [#1202](https://github.com/apache/datafusion-python/pull/1202) (dependabot[bot]) +- build(deps): bump async-trait from 0.1.88 to 0.1.89 [#1203](https://github.com/apache/datafusion-python/pull/1203) (dependabot[bot]) +- build(deps): bump slab from 0.4.10 to 0.4.11 [#1205](https://github.com/apache/datafusion-python/pull/1205) (dependabot[bot]) +- Improved window and aggregate function signature [#1187](https://github.com/apache/datafusion-python/pull/1187) (timsaucer) +- Optional improvements in verification instructions [#1183](https://github.com/apache/datafusion-python/pull/1183) (paleolimbot) +- Improve `show()` output for empty DataFrames [#1208](https://github.com/apache/datafusion-python/pull/1208) (kosiew) +- build(deps): bump actions/download-artifact from 4 to 5 [#1201](https://github.com/apache/datafusion-python/pull/1201) (dependabot[bot]) +- build(deps): bump url from 2.5.4 to 2.5.7 [#1210](https://github.com/apache/datafusion-python/pull/1210) (dependabot[bot]) +- build(deps): bump actions/checkout from 4 to 5 [#1204](https://github.com/apache/datafusion-python/pull/1204) (dependabot[bot]) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 7 dependabot[bot] + 3 Tim Saucer + 2 kosiew + 1 Colin Marc + 1 Dewey Dunnington + 1 Nick + 1 Nuno Faria +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/50.0.0.md b/dev/changelog/50.0.0.md new file mode 100644 index 000000000..c3f09d180 --- /dev/null +++ b/dev/changelog/50.0.0.md @@ -0,0 +1,60 @@ + + +# Apache DataFusion Python 50.0.0 Changelog + +This release consists of 12 commits from 7 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: allow passing a slice to and expression with the [] indexing [#1215](https://github.com/apache/datafusion-python/pull/1215) (timsaucer) + +**Documentation updates:** + +- docs: fix CaseBuilder documentation example [#1225](https://github.com/apache/datafusion-python/pull/1225) (IndexSeek) +- docs: update link to user example for custom table provider [#1224](https://github.com/apache/datafusion-python/pull/1224) (IndexSeek) +- docs: add apache iceberg as datafusion data source [#1240](https://github.com/apache/datafusion-python/pull/1240) (kevinjqliu) + +**Other:** + +- 49.0.0 release [#1211](https://github.com/apache/datafusion-python/pull/1211) (timsaucer) +- Update development guide in README.md [#1213](https://github.com/apache/datafusion-python/pull/1213) (YKoustubhRao) +- Add benchmark script and documentation for maximizing CPU usage in DataFusion Python [#1216](https://github.com/apache/datafusion-python/pull/1216) (kosiew) +- Fixing a few Typos [#1220](https://github.com/apache/datafusion-python/pull/1220) (ntjohnson1) +- Set fail on warning for documentation generation [#1218](https://github.com/apache/datafusion-python/pull/1218) (timsaucer) +- chore: remove redundant error transformation [#1232](https://github.com/apache/datafusion-python/pull/1232) (mesejo) +- Support string column identifiers for sort/aggregate/window and stricter Expr validation [#1221](https://github.com/apache/datafusion-python/pull/1221) (kosiew) +- Prepare for DF50 [#1231](https://github.com/apache/datafusion-python/pull/1231) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 4 Tim Saucer + 2 Tyler White + 2 kosiew + 1 Daniel Mesejo + 1 Kevin Liu + 1 Koustubh Rao + 1 Nick +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/50.1.0.md b/dev/changelog/50.1.0.md new file mode 100644 index 000000000..3b9ff84ff --- /dev/null +++ b/dev/changelog/50.1.0.md @@ -0,0 +1,57 @@ + + +# Apache DataFusion Python 50.1.0 Changelog + +This release consists of 11 commits from 7 contributors. See credits at the end of this changelog for more information. + +**Breaking changes:** + +- Unify Table representations [#1256](https://github.com/apache/datafusion-python/pull/1256) (timsaucer) + +**Implemented enhancements:** + +- feat: expose DataFrame.write_table [#1264](https://github.com/apache/datafusion-python/pull/1264) (timsaucer) +- feat: expose` DataFrame.parse_sql_expr` [#1274](https://github.com/apache/datafusion-python/pull/1274) (milenkovicm) + +**Other:** + +- Update version number, add changelog [#1249](https://github.com/apache/datafusion-python/pull/1249) (timsaucer) +- Fix drop() method to handle quoted column names consistently [#1242](https://github.com/apache/datafusion-python/pull/1242) (H0TB0X420) +- Make Session Context `pyclass` frozen so interior mutability is only managed by rust [#1248](https://github.com/apache/datafusion-python/pull/1248) (ntjohnson1) +- macos-13 is deprecated [#1259](https://github.com/apache/datafusion-python/pull/1259) (kevinjqliu) +- Freeze PyO3 wrappers & introduce interior mutability to avoid PyO3 borrow errors [#1253](https://github.com/apache/datafusion-python/pull/1253) (kosiew) +- chore: update dependencies [#1269](https://github.com/apache/datafusion-python/pull/1269) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 4 Tim Saucer + 2 Siew Kam Onn + 1 H0TB0X420 + 1 Kevin Liu + 1 Marko Milenković + 1 Nick + 1 kosiew +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/51.0.0.md b/dev/changelog/51.0.0.md new file mode 100644 index 000000000..cc157eb0d --- /dev/null +++ b/dev/changelog/51.0.0.md @@ -0,0 +1,74 @@ + + +# Apache DataFusion Python 51.0.0 Changelog + +This release consists of 23 commits from 7 contributors. See credits at the end of this changelog for more information. + +**Breaking changes:** + +- feat: reduce duplicate fields on join [#1184](https://github.com/apache/datafusion-python/pull/1184) (timsaucer) + +**Implemented enhancements:** + +- feat: expose `select_exprs` method on DataFrame [#1271](https://github.com/apache/datafusion-python/pull/1271) (milenkovicm) +- feat: allow DataFrame.filter to accept SQL strings [#1276](https://github.com/apache/datafusion-python/pull/1276) (K-dash) +- feat: add temporary view option for into_view [#1267](https://github.com/apache/datafusion-python/pull/1267) (timsaucer) +- feat: support session token parameter for AmazonS3 [#1275](https://github.com/apache/datafusion-python/pull/1275) (GCHQDeveloper028) +- feat: `with_column` supports SQL expression [#1284](https://github.com/apache/datafusion-python/pull/1284) (milenkovicm) +- feat: Add SQL expression for `repartition_by_hash` [#1285](https://github.com/apache/datafusion-python/pull/1285) (milenkovicm) +- feat: Add SQL expression support for `with_columns` [#1286](https://github.com/apache/datafusion-python/pull/1286) (milenkovicm) + +**Fixed bugs:** + +- fix: use coalesce instead of drop_duplicate_keys for join [#1318](https://github.com/apache/datafusion-python/pull/1318) (mesejo) +- fix: Inconsistent schemas when converting to pyarrow [#1315](https://github.com/apache/datafusion-python/pull/1315) (nuno-faria) + +**Other:** + +- Release 50.1 [#1281](https://github.com/apache/datafusion-python/pull/1281) (timsaucer) +- Update python minimum version to 3.10 [#1296](https://github.com/apache/datafusion-python/pull/1296) (timsaucer) +- chore: update datafusion minor version [#1297](https://github.com/apache/datafusion-python/pull/1297) (timsaucer) +- Enable remaining pylints [#1298](https://github.com/apache/datafusion-python/pull/1298) (timsaucer) +- Add Arrow C streaming, DataFrame iteration, and OOM-safe streaming execution [#1222](https://github.com/apache/datafusion-python/pull/1222) (kosiew) +- Add PyCapsule Type Support and Type Hint Enhancements for AggregateUDF in DataFusion Python Bindings [#1277](https://github.com/apache/datafusion-python/pull/1277) (kosiew) +- Add collect_column to dataframe [#1302](https://github.com/apache/datafusion-python/pull/1302) (timsaucer) +- chore: apply cargo fmt with import organization [#1303](https://github.com/apache/datafusion-python/pull/1303) (timsaucer) +- Feat/parameterized sql queries [#964](https://github.com/apache/datafusion-python/pull/964) (timsaucer) +- Upgrade to Datafusion 51 [#1311](https://github.com/apache/datafusion-python/pull/1311) (nuno-faria) +- minor: resolve build errors after latest merge into main [#1325](https://github.com/apache/datafusion-python/pull/1325) (timsaucer) +- Update build workflow link [#1330](https://github.com/apache/datafusion-python/pull/1330) (timsaucer) +- Do not convert pyarrow scalar values to plain python types when passing as `lit` [#1319](https://github.com/apache/datafusion-python/pull/1319) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 12 Tim Saucer + 4 Marko Milenković + 2 Nuno Faria + 2 kosiew + 1 Daniel Mesejo + 1 GCHQDeveloper028 + 1 𝕂 +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/52.0.0.md b/dev/changelog/52.0.0.md new file mode 100644 index 000000000..3f848bb47 --- /dev/null +++ b/dev/changelog/52.0.0.md @@ -0,0 +1,78 @@ + + +# Apache DataFusion Python 52.0.0 Changelog + +This release consists of 26 commits from 9 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: add CatalogProviderList support [#1363](https://github.com/apache/datafusion-python/pull/1363) (timsaucer) +- feat: add support for generating JSON formatted substrait plan [#1376](https://github.com/apache/datafusion-python/pull/1376) (Prathamesh9284) +- feat: add regexp_instr function [#1382](https://github.com/apache/datafusion-python/pull/1382) (mesejo) + +**Fixed bugs:** + +- fix: mangled errors [#1377](https://github.com/apache/datafusion-python/pull/1377) (mesejo) + +**Documentation updates:** + +- docs: Clarify first_value usage in select vs aggregate [#1348](https://github.com/apache/datafusion-python/pull/1348) (AdMub) + +**Other:** + +- Release 51.0.0 [#1333](https://github.com/apache/datafusion-python/pull/1333) (timsaucer) +- Use explicit timer in unit test [#1338](https://github.com/apache/datafusion-python/pull/1338) (timsaucer) +- Add use_fabric_endpoint parameter to MicrosoftAzure class [#1357](https://github.com/apache/datafusion-python/pull/1357) (djouallah) +- Prepare for DF52 release [#1337](https://github.com/apache/datafusion-python/pull/1337) (timsaucer) +- build(deps): bump actions/checkout from 5 to 6 [#1310](https://github.com/apache/datafusion-python/pull/1310) (dependabot[bot]) +- build(deps): bump actions/download-artifact from 5 to 7 [#1321](https://github.com/apache/datafusion-python/pull/1321) (dependabot[bot]) +- build(deps): bump actions/upload-artifact from 4 to 6 [#1322](https://github.com/apache/datafusion-python/pull/1322) (dependabot[bot]) +- build(deps): bump actions/cache from 4 to 5 [#1323](https://github.com/apache/datafusion-python/pull/1323) (dependabot[bot]) +- Pass Field information back and forth when using scalar UDFs [#1299](https://github.com/apache/datafusion-python/pull/1299) (timsaucer) +- Update dependency minor versions to prepare for DF52 release [#1368](https://github.com/apache/datafusion-python/pull/1368) (timsaucer) +- Improve displayed error by using `DataFusionError`'s `Display` trait [#1370](https://github.com/apache/datafusion-python/pull/1370) (abey79) +- Enforce DataFrame display memory limits with `max_rows` + `min_rows` constraint (deprecate `repr_rows`) [#1367](https://github.com/apache/datafusion-python/pull/1367) (kosiew) +- Implement all CSV reader options [#1361](https://github.com/apache/datafusion-python/pull/1361) (timsaucer) +- chore: add confirmation before tarball is released [#1372](https://github.com/apache/datafusion-python/pull/1372) (milenkovicm) +- Build in debug mode for PRs [#1375](https://github.com/apache/datafusion-python/pull/1375) (timsaucer) +- minor: remove ffi test wheel from distribution artifact [#1378](https://github.com/apache/datafusion-python/pull/1378) (timsaucer) +- chore: update rust 2024 edition [#1371](https://github.com/apache/datafusion-python/pull/1371) (timsaucer) +- Fix Python UDAF list-of-timestamps return by enforcing list-valued scalars and caching PyArrow types [#1347](https://github.com/apache/datafusion-python/pull/1347) (kosiew) +- minor: update cargo dependencies [#1383](https://github.com/apache/datafusion-python/pull/1383) (timsaucer) +- chore: bump Python version for RAT checking [#1386](https://github.com/apache/datafusion-python/pull/1386) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 13 Tim Saucer + 4 dependabot[bot] + 2 Daniel Mesejo + 2 kosiew + 1 Adisa Mubarak (AdMub) + 1 Antoine Beyeler + 1 Dhanashri Prathamesh Iranna + 1 Marko Milenković + 1 Mimoune +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + diff --git a/dev/changelog/pre-43.0.0.md b/dev/changelog/pre-43.0.0.md new file mode 100644 index 000000000..ae3a2348a --- /dev/null +++ b/dev/changelog/pre-43.0.0.md @@ -0,0 +1,715 @@ + + +# DataFusion Python Changelog + +## [42.0.0](https://github.com/apache/datafusion-python/tree/42.0.0) (2024-10-06) + +This release consists of 20 commits from 6 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: expose between [#868](https://github.com/apache/datafusion-python/pull/868) (mesejo) +- feat: make register_csv accept a list of paths [#883](https://github.com/apache/datafusion-python/pull/883) (mesejo) +- feat: expose http object store [#885](https://github.com/apache/datafusion-python/pull/885) (mesejo) + +**Fixed bugs:** + +- fix: Calling `count` on a pyarrow dataset results in an error [#843](https://github.com/apache/datafusion-python/pull/843) (Michael-J-Ward) + +**Other:** + +- Upgrade datafusion [#867](https://github.com/apache/datafusion-python/pull/867) (emgeee) +- Feature/aggregates as windows [#871](https://github.com/apache/datafusion-python/pull/871) (timsaucer) +- Fix regression on register_udaf [#878](https://github.com/apache/datafusion-python/pull/878) (timsaucer) +- build(deps): upgrade setup-protoc action and protoc version number [#873](https://github.com/apache/datafusion-python/pull/873) (Michael-J-Ward) +- build(deps): bump prost-types from 0.13.2 to 0.13.3 [#881](https://github.com/apache/datafusion-python/pull/881) (dependabot[bot]) +- build(deps): bump prost from 0.13.2 to 0.13.3 [#882](https://github.com/apache/datafusion-python/pull/882) (dependabot[bot]) +- chore: remove XFAIL from passing tests [#884](https://github.com/apache/datafusion-python/pull/884) (Michael-J-Ward) +- Add user defined window function support [#880](https://github.com/apache/datafusion-python/pull/880) (timsaucer) +- build(deps): bump syn from 2.0.77 to 2.0.79 [#886](https://github.com/apache/datafusion-python/pull/886) (dependabot[bot]) +- fix example of reading parquet from s3 [#896](https://github.com/apache/datafusion-python/pull/896) (sir-sigurd) +- release-testing [#889](https://github.com/apache/datafusion-python/pull/889) (Michael-J-Ward) +- chore(bench): fix create_tables.sql for tpch benchmark [#897](https://github.com/apache/datafusion-python/pull/897) (Michael-J-Ward) +- Add physical and logical plan conversion to and from protobuf [#892](https://github.com/apache/datafusion-python/pull/892) (timsaucer) +- Feature/instance udfs [#890](https://github.com/apache/datafusion-python/pull/890) (timsaucer) +- chore(ci): remove Mambaforge variant from CI [#894](https://github.com/apache/datafusion-python/pull/894) (Michael-J-Ward) +- Use OnceLock to store TokioRuntime [#895](https://github.com/apache/datafusion-python/pull/895) (Michael-J-Ward) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 7 Michael J Ward + 5 Tim Saucer + 3 Daniel Mesejo + 3 dependabot[bot] + 1 Matt Green + 1 Sergey Fedoseev +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + +## [41.0.0](https://github.com/apache/datafusion-python/tree/41.0.0) (2024-09-09) + +This release consists of 19 commits from 6 contributors. See credits at the end of this changelog for more information. + +**Implemented enhancements:** + +- feat: enable list of paths for read_csv [#824](https://github.com/apache/datafusion-python/pull/824) (mesejo) +- feat: better exception and message for table not found [#851](https://github.com/apache/datafusion-python/pull/851) (mesejo) +- feat: make cast accept built-in Python types [#858](https://github.com/apache/datafusion-python/pull/858) (mesejo) + +**Other:** + +- chore: Prepare for 40.0.0 release [#801](https://github.com/apache/datafusion-python/pull/801) (andygrove) +- Add typing-extensions dependency to pyproject [#805](https://github.com/apache/datafusion-python/pull/805) (timsaucer) +- Upgrade deps to datafusion 41 [#802](https://github.com/apache/datafusion-python/pull/802) (Michael-J-Ward) +- Fix SessionContext init with only SessionConfig [#827](https://github.com/apache/datafusion-python/pull/827) (jcrist) +- build(deps): upgrade actions/{upload,download}-artifact@v3 to v4 [#829](https://github.com/apache/datafusion-python/pull/829) (Michael-J-Ward) +- Run ruff format in CI [#837](https://github.com/apache/datafusion-python/pull/837) (timsaucer) +- Add PyCapsule support for Arrow import and export [#825](https://github.com/apache/datafusion-python/pull/825) (timsaucer) +- Feature/expose when function [#836](https://github.com/apache/datafusion-python/pull/836) (timsaucer) +- Add Window Functions for use with function builder [#808](https://github.com/apache/datafusion-python/pull/808) (timsaucer) +- chore: fix typos [#844](https://github.com/apache/datafusion-python/pull/844) (mesejo) +- build(ci): use proper mac runners [#841](https://github.com/apache/datafusion-python/pull/841) (Michael-J-Ward) +- Set of small features [#839](https://github.com/apache/datafusion-python/pull/839) (timsaucer) +- chore: fix docstrings, typos [#852](https://github.com/apache/datafusion-python/pull/852) (mesejo) +- chore: Use datafusion re-exported dependencies [#856](https://github.com/apache/datafusion-python/pull/856) (emgeee) +- add guidelines on separating python and rust code [#860](https://github.com/apache/datafusion-python/pull/860) (Michael-J-Ward) +- Update Aggregate functions to take builder parameters [#859](https://github.com/apache/datafusion-python/pull/859) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 7 Tim Saucer + 5 Daniel Mesejo + 4 Michael J Ward + 1 Andy Grove + 1 Jim Crist-Harif + 1 Matt Green +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + +## [40.0.0](https://github.com/apache/datafusion-python/tree/40.0.0) (2024-08-09) + +This release consists of 18 commits from 4 contributors. See credits at the end of this changelog for more information. + +- Update changelog for 39.0.0 [#742](https://github.com/apache/datafusion-python/pull/742) (andygrove) +- build(deps): bump uuid from 1.8.0 to 1.9.1 [#744](https://github.com/apache/datafusion-python/pull/744) (dependabot[bot]) +- build(deps): bump mimalloc from 0.1.42 to 0.1.43 [#745](https://github.com/apache/datafusion-python/pull/745) (dependabot[bot]) +- build(deps): bump syn from 2.0.67 to 2.0.68 [#746](https://github.com/apache/datafusion-python/pull/746) (dependabot[bot]) +- Tsaucer/find window fn [#747](https://github.com/apache/datafusion-python/pull/747) (timsaucer) +- Python wrapper classes for all user interfaces [#750](https://github.com/apache/datafusion-python/pull/750) (timsaucer) +- Expose array sort [#764](https://github.com/apache/datafusion-python/pull/764) (timsaucer) +- Upgrade protobuf and remove GH Action googletest-installer [#773](https://github.com/apache/datafusion-python/pull/773) (Michael-J-Ward) +- Upgrade Datafusion 40 [#771](https://github.com/apache/datafusion-python/pull/771) (Michael-J-Ward) +- Bugfix: Calling count with None arguments [#768](https://github.com/apache/datafusion-python/pull/768) (timsaucer) +- Add in user example that compares a two different approaches to UDFs [#770](https://github.com/apache/datafusion-python/pull/770) (timsaucer) +- Add missing exports for wrapper modules [#782](https://github.com/apache/datafusion-python/pull/782) (timsaucer) +- Add PyExpr to_variant conversions [#793](https://github.com/apache/datafusion-python/pull/793) (Michael-J-Ward) +- Add missing expressions to wrapper export [#795](https://github.com/apache/datafusion-python/pull/795) (timsaucer) +- Doc/cross reference [#791](https://github.com/apache/datafusion-python/pull/791) (timsaucer) +- Re-Enable `num_centroids` to `approx_percentile_cont` [#798](https://github.com/apache/datafusion-python/pull/798) (Michael-J-Ward) +- UDAF process all state variables [#799](https://github.com/apache/datafusion-python/pull/799) (timsaucer) + +## Credits + +Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) per contributor. + +``` + 9 Tim Saucer + 4 Michael J Ward + 3 dependabot[bot] + 2 Andy Grove +``` + +Thank you also to everyone who contributed in other ways such as filing issues, reviewing PRs, and providing feedback on this release. + +## [39.0.0](https://github.com/apache/datafusion-python/tree/39.0.0) (2024-06-25) + +**Merged pull requests:** + +- ci: add substrait feature to linux builds [#720](https://github.com/apache/datafusion-python/pull/720) (Michael-J-Ward) +- Docs deploy action [#721](https://github.com/apache/datafusion-python/pull/721) (Michael-J-Ward) +- update deps [#723](https://github.com/apache/datafusion-python/pull/723) (Michael-J-Ward) +- Upgrade maturin [#725](https://github.com/apache/datafusion-python/pull/725) (Michael-J-Ward) +- Upgrade datafusion 39 [#728](https://github.com/apache/datafusion-python/pull/728) (Michael-J-Ward) +- use ScalarValue::to_pyarrow to convert to python object [#731](https://github.com/apache/datafusion-python/pull/731) (Michael-J-Ward) +- Pyo3 `Bound<'py, T>` api [#734](https://github.com/apache/datafusion-python/pull/734) (Michael-J-Ward) +- github test action: drop python 3.7, add python 3.12 [#736](https://github.com/apache/datafusion-python/pull/736) (Michael-J-Ward) +- Pyarrow filter pushdowns [#735](https://github.com/apache/datafusion-python/pull/735) (Michael-J-Ward) +- build(deps): bump syn from 2.0.66 to 2.0.67 [#738](https://github.com/apache/datafusion-python/pull/738) (dependabot[bot]) +- Pyo3 refactorings [#740](https://github.com/apache/datafusion-python/pull/740) (Michael-J-Ward) +- UDAF `sum` workaround [#741](https://github.com/apache/datafusion-python/pull/741) (Michael-J-Ward) + +## [38.0.1](https://github.com/apache/datafusion-python/tree/38.0.1) (2024-05-25) + +**Implemented enhancements:** + +- feat: add python bindings for ends_with function [#693](https://github.com/apache/datafusion-python/pull/693) (richtia) +- feat: expose `named_struct` in python [#700](https://github.com/apache/datafusion-python/pull/700) (Michael-J-Ward) + +**Merged pull requests:** + +- Add document about basics of working with expressions [#668](https://github.com/apache/datafusion-python/pull/668) (timsaucer) +- chore: Update Python release process now that DataFusion is TLP [#674](https://github.com/apache/datafusion-python/pull/674) (andygrove) +- Fix Docs [#676](https://github.com/apache/datafusion-python/pull/676) (Michael-J-Ward) +- Add examples from TPC-H [#666](https://github.com/apache/datafusion-python/pull/666) (timsaucer) +- fix conda nightly builds, attempt 2 [#689](https://github.com/apache/datafusion-python/pull/689) (Michael-J-Ward) +- Upgrade to datafusion 38 [#691](https://github.com/apache/datafusion-python/pull/691) (Michael-J-Ward) +- chore: update to maturin's recommended project layout for rust/python… [#695](https://github.com/apache/datafusion-python/pull/695) (Michael-J-Ward) +- chore: update cargo deps [#698](https://github.com/apache/datafusion-python/pull/698) (Michael-J-Ward) +- feat: add python bindings for ends_with function [#693](https://github.com/apache/datafusion-python/pull/693) (richtia) +- feat: expose `named_struct` in python [#700](https://github.com/apache/datafusion-python/pull/700) (Michael-J-Ward) +- Website fixes [#702](https://github.com/apache/datafusion-python/pull/702) (Michael-J-Ward) + +## [37.1.0](https://github.com/apache/datafusion-python/tree/37.1.0) (2024-05-08) + +**Implemented enhancements:** + +- feat: add execute_stream and execute_stream_partitioned [#610](https://github.com/apache/datafusion-python/pull/610) (mesejo) + +**Documentation updates:** + +- docs: update docs CI to install python-311 requirements [#661](https://github.com/apache/datafusion-python/pull/661) (Michael-J-Ward) + +**Merged pull requests:** + +- Switch to Ruff for Python linting [#529](https://github.com/apache/datafusion-python/pull/529) (andygrove) +- Remove sql-on-pandas/polars/cudf examples [#602](https://github.com/apache/datafusion-python/pull/602) (andygrove) +- build(deps): bump object_store from 0.9.0 to 0.9.1 [#611](https://github.com/apache/datafusion-python/pull/611) (dependabot[bot]) +- More missing array funcs [#605](https://github.com/apache/datafusion-python/pull/605) (judahrand) +- feat: add execute_stream and execute_stream_partitioned [#610](https://github.com/apache/datafusion-python/pull/610) (mesejo) +- build(deps): bump uuid from 1.7.0 to 1.8.0 [#615](https://github.com/apache/datafusion-python/pull/615) (dependabot[bot]) +- Bind SQLOptions and relative ctx method #567 [#588](https://github.com/apache/datafusion-python/pull/588) (giacomorebecchi) +- bugfix: no panic on empty table [#613](https://github.com/apache/datafusion-python/pull/613) (mesejo) +- Expose `register_listing_table` [#618](https://github.com/apache/datafusion-python/pull/618) (henrifroese) +- Expose unnest feature [#641](https://github.com/apache/datafusion-python/pull/641) (timsaucer) +- Update domain names and paths in asf yaml [#643](https://github.com/apache/datafusion-python/pull/643) (andygrove) +- use python 3.11 to publish docs [#645](https://github.com/apache/datafusion-python/pull/645) (andygrove) +- docs: update docs CI to install python-311 requirements [#661](https://github.com/apache/datafusion-python/pull/661) (Michael-J-Ward) +- Upgrade Datafusion to v37.1.0 [#669](https://github.com/apache/datafusion-python/pull/669) (Michael-J-Ward) + +## [36.0.0](https://github.com/apache/datafusion-python/tree/36.0.0) (2024-03-02) + +**Implemented enhancements:** + +- feat: Add `flatten` array function [#562](https://github.com/apache/datafusion-python/pull/562) (mobley-trent) + +**Documentation updates:** + +- docs: Add ASF attribution [#580](https://github.com/apache/datafusion-python/pull/580) (simicd) + +**Merged pull requests:** + +- Allow PyDataFrame to be used from other projects [#582](https://github.com/apache/datafusion-python/pull/582) (andygrove) +- docs: Add ASF attribution [#580](https://github.com/apache/datafusion-python/pull/580) (simicd) +- Add array functions [#560](https://github.com/apache/datafusion-python/pull/560) (ongchi) +- feat: Add `flatten` array function [#562](https://github.com/apache/datafusion-python/pull/562) (mobley-trent) + +## [35.0.0](https://github.com/apache/datafusion-python/tree/35.0.0) (2024-01-20) + +**Merged pull requests:** + +- build(deps): bump syn from 2.0.41 to 2.0.43 [#559](https://github.com/apache/datafusion-python/pull/559) (dependabot[bot]) +- build(deps): bump tokio from 1.35.0 to 1.35.1 [#558](https://github.com/apache/datafusion-python/pull/558) (dependabot[bot]) +- build(deps): bump async-trait from 0.1.74 to 0.1.77 [#556](https://github.com/apache/datafusion-python/pull/556) (dependabot[bot]) +- build(deps): bump pyo3 from 0.20.0 to 0.20.2 [#557](https://github.com/apache/datafusion-python/pull/557) (dependabot[bot]) + +## [34.0.0](https://github.com/apache/datafusion-python/tree/34.0.0) (2023-12-28) + +**Merged pull requests:** + +- Adjust visibility of crate private members & Functions [#537](https://github.com/apache/datafusion-python/pull/537) (jdye64) +- Update json.rst [#538](https://github.com/apache/datafusion-python/pull/538) (ray-andrew) +- Enable mimalloc local_dynamic_tls feature [#540](https://github.com/apache/datafusion-python/pull/540) (jdye64) +- Enable substrait feature to be built by default in CI, for nightlies … [#544](https://github.com/apache/datafusion-python/pull/544) (jdye64) + +## [33.0.0](https://github.com/apache/datafusion-python/tree/33.0.0) (2023-11-16) + +**Merged pull requests:** + +- First pass at getting architectured builds working [#350](https://github.com/apache/datafusion-python/pull/350) (charlesbluca) +- Remove libprotobuf dep [#527](https://github.com/apache/datafusion-python/pull/527) (jdye64) + +## [32.0.0](https://github.com/apache/datafusion-python/tree/32.0.0) (2023-10-21) + +**Implemented enhancements:** + +- feat: expose PyWindowFrame [#509](https://github.com/apache/datafusion-python/pull/509) (dlovell) +- add Binary String Functions;encode,decode [#494](https://github.com/apache/datafusion-python/pull/494) (jiangzhx) +- add bit_and,bit_or,bit_xor,bool_add,bool_or [#496](https://github.com/apache/datafusion-python/pull/496) (jiangzhx) +- add first_value last_value [#498](https://github.com/apache/datafusion-python/pull/498) (jiangzhx) +- add regr\_\* functions [#499](https://github.com/apache/datafusion-python/pull/499) (jiangzhx) +- Add random missing bindings [#522](https://github.com/apache/datafusion-python/pull/522) (jdye64) +- Allow for multiple input files per table instead of a single file [#519](https://github.com/apache/datafusion-python/pull/519) (jdye64) +- Add support for window function bindings [#521](https://github.com/apache/datafusion-python/pull/521) (jdye64) + +**Merged pull requests:** + +- Prepare 31.0.0 release [#500](https://github.com/apache/datafusion-python/pull/500) (andygrove) +- Improve release process documentation [#505](https://github.com/apache/datafusion-python/pull/505) (andygrove) +- add Binary String Functions;encode,decode [#494](https://github.com/apache/datafusion-python/pull/494) (jiangzhx) +- build(deps): bump mimalloc from 0.1.38 to 0.1.39 [#502](https://github.com/apache/datafusion-python/pull/502) (dependabot[bot]) +- build(deps): bump syn from 2.0.32 to 2.0.35 [#503](https://github.com/apache/datafusion-python/pull/503) (dependabot[bot]) +- build(deps): bump syn from 2.0.35 to 2.0.37 [#506](https://github.com/apache/datafusion-python/pull/506) (dependabot[bot]) +- Use latest DataFusion [#511](https://github.com/apache/datafusion-python/pull/511) (andygrove) +- add bit_and,bit_or,bit_xor,bool_add,bool_or [#496](https://github.com/apache/datafusion-python/pull/496) (jiangzhx) +- use DataFusion 32 [#515](https://github.com/apache/datafusion-python/pull/515) (andygrove) +- add first_value last_value [#498](https://github.com/apache/datafusion-python/pull/498) (jiangzhx) +- build(deps): bump regex-syntax from 0.7.5 to 0.8.1 [#517](https://github.com/apache/datafusion-python/pull/517) (dependabot[bot]) +- build(deps): bump pyo3-build-config from 0.19.2 to 0.20.0 [#516](https://github.com/apache/datafusion-python/pull/516) (dependabot[bot]) +- add regr\_\* functions [#499](https://github.com/apache/datafusion-python/pull/499) (jiangzhx) +- Add random missing bindings [#522](https://github.com/apache/datafusion-python/pull/522) (jdye64) +- build(deps): bump rustix from 0.38.18 to 0.38.19 [#523](https://github.com/apache/datafusion-python/pull/523) (dependabot[bot]) +- Allow for multiple input files per table instead of a single file [#519](https://github.com/apache/datafusion-python/pull/519) (jdye64) +- Add support for window function bindings [#521](https://github.com/apache/datafusion-python/pull/521) (jdye64) +- Small clippy fix [#524](https://github.com/apache/datafusion-python/pull/524) (andygrove) + +## [31.0.0](https://github.com/apache/datafusion-python/tree/31.0.0) (2023-09-12) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/28.0.0...31.0.0) + +**Implemented enhancements:** + +- feat: add case function (#447) [#448](https://github.com/apache/datafusion-python/pull/448) (mesejo) +- feat: add compression options [#456](https://github.com/apache/datafusion-python/pull/456) (mesejo) +- feat: add register_json [#458](https://github.com/apache/datafusion-python/pull/458) (mesejo) +- feat: add basic compression configuration to write_parquet [#459](https://github.com/apache/datafusion-python/pull/459) (mesejo) +- feat: add example of reading parquet from s3 [#460](https://github.com/apache/datafusion-python/pull/460) (mesejo) +- feat: add register_avro and read_table [#461](https://github.com/apache/datafusion-python/pull/461) (mesejo) +- feat: add missing scalar math functions [#465](https://github.com/apache/datafusion-python/pull/465) (mesejo) + +**Documentation updates:** + +- docs: include pre-commit hooks section in contributor guide [#455](https://github.com/apache/datafusion-python/pull/455) (mesejo) + +**Merged pull requests:** + +- Build Linux aarch64 wheel [#443](https://github.com/apache/datafusion-python/pull/443) (gokselk) +- feat: add case function (#447) [#448](https://github.com/apache/datafusion-python/pull/448) (mesejo) +- enhancement(docs): Add user guide (#432) [#445](https://github.com/apache/datafusion-python/pull/445) (mesejo) +- docs: include pre-commit hooks section in contributor guide [#455](https://github.com/apache/datafusion-python/pull/455) (mesejo) +- feat: add compression options [#456](https://github.com/apache/datafusion-python/pull/456) (mesejo) +- Upgrade to DF 28.0.0-rc1 [#457](https://github.com/apache/datafusion-python/pull/457) (andygrove) +- feat: add register_json [#458](https://github.com/apache/datafusion-python/pull/458) (mesejo) +- feat: add basic compression configuration to write_parquet [#459](https://github.com/apache/datafusion-python/pull/459) (mesejo) +- feat: add example of reading parquet from s3 [#460](https://github.com/apache/datafusion-python/pull/460) (mesejo) +- feat: add register_avro and read_table [#461](https://github.com/apache/datafusion-python/pull/461) (mesejo) +- feat: add missing scalar math functions [#465](https://github.com/apache/datafusion-python/pull/465) (mesejo) +- build(deps): bump arduino/setup-protoc from 1 to 2 [#452](https://github.com/apache/datafusion-python/pull/452) (dependabot[bot]) +- Revert "build(deps): bump arduino/setup-protoc from 1 to 2 (#452)" [#474](https://github.com/apache/datafusion-python/pull/474) (viirya) +- Minor: fix wrongly copied function description [#497](https://github.com/apache/datafusion-python/pull/497) (viirya) +- Upgrade to Datafusion 31.0.0 [#491](https://github.com/apache/datafusion-python/pull/491) (judahrand) +- Add `isnan` and `iszero` [#495](https://github.com/apache/datafusion-python/pull/495) (judahrand) + +## 30.0.0 + +- Skipped due to a breaking change in DataFusion + +## 29.0.0 + +- Skipped + +## [28.0.0](https://github.com/apache/datafusion-python/tree/28.0.0) (2023-07-25) + +**Implemented enhancements:** + +- feat: expose offset in python API [#437](https://github.com/apache/datafusion-python/pull/437) (cpcloud) + +**Merged pull requests:** + +- File based input utils [#433](https://github.com/apache/datafusion-python/pull/433) (jdye64) +- Upgrade to 28.0.0-rc1 [#434](https://github.com/apache/datafusion-python/pull/434) (andygrove) +- Introduces utility for obtaining SqlTable information from a file like location [#398](https://github.com/apache/datafusion-python/pull/398) (jdye64) +- feat: expose offset in python API [#437](https://github.com/apache/datafusion-python/pull/437) (cpcloud) +- Use DataFusion 28 [#439](https://github.com/apache/datafusion-python/pull/439) (andygrove) + +## [27.0.0](https://github.com/apache/datafusion-python/tree/27.0.0) (2023-07-03) + +**Merged pull requests:** + +- LogicalPlan.to_variant() make public [#412](https://github.com/apache/datafusion-python/pull/412) (jdye64) +- Prepare 27.0.0 release [#423](https://github.com/apache/datafusion-python/pull/423) (andygrove) + +## [26.0.0](https://github.com/apache/datafusion-python/tree/26.0.0) (2023-06-11) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/25.0.0...26.0.0) + +**Merged pull requests:** + +- Add Expr::Case when_then_else support to rex_call_operands function [#388](https://github.com/apache/datafusion-python/pull/388) (jdye64) +- Introduce BaseSessionContext abstract class [#390](https://github.com/apache/datafusion-python/pull/390) (jdye64) +- CRUD Schema support for `BaseSessionContext` [#392](https://github.com/apache/datafusion-python/pull/392) (jdye64) +- CRUD Table support for `BaseSessionContext` [#394](https://github.com/apache/datafusion-python/pull/394) (jdye64) + +## [25.0.0](https://github.com/apache/datafusion-python/tree/25.0.0) (2023-05-23) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/24.0.0...25.0.0) + +**Merged pull requests:** + +- Prepare 24.0.0 Release [#376](https://github.com/apache/datafusion-python/pull/376) (andygrove) +- build(deps): bump uuid from 1.3.1 to 1.3.2 [#359](https://github.com/apache/datafusion-python/pull/359) (dependabot[bot]) +- build(deps): bump mimalloc from 0.1.36 to 0.1.37 [#361](https://github.com/apache/datafusion-python/pull/361) (dependabot[bot]) +- build(deps): bump regex-syntax from 0.6.29 to 0.7.1 [#334](https://github.com/apache/datafusion-python/pull/334) (dependabot[bot]) +- upgrade maturin to 0.15.1 [#379](https://github.com/apache/datafusion-python/pull/379) (Jimexist) +- Expand Expr to include RexType basic support [#378](https://github.com/apache/datafusion-python/pull/378) (jdye64) +- Add Python script for generating changelog [#383](https://github.com/apache/datafusion-python/pull/383) (andygrove) + +## [24.0.0](https://github.com/apache/datafusion-python/tree/24.0.0) (2023-05-09) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/23.0.0...24.0.0) + +**Documentation updates:** + +- Fix link to user guide [#354](https://github.com/apache/datafusion-python/pull/354) (andygrove) + +**Merged pull requests:** + +- Add interface to serialize Substrait plans to Python Bytes. [#344](https://github.com/apache/datafusion-python/pull/344) (kylebrooks-8451) +- Add partition_count property to ExecutionPlan. [#346](https://github.com/apache/datafusion-python/pull/346) (kylebrooks-8451) +- Remove unsendable from all Rust pyclass types. [#348](https://github.com/apache/datafusion-python/pull/348) (kylebrooks-8451) +- Fix link to user guide [#354](https://github.com/apache/datafusion-python/pull/354) (andygrove) +- Fix SessionContext execute. [#353](https://github.com/apache/datafusion-python/pull/353) (kylebrooks-8451) +- Pub mod expr in lib.rs [#357](https://github.com/apache/datafusion-python/pull/357) (jdye64) +- Add benchmark derived from TPC-H [#355](https://github.com/apache/datafusion-python/pull/355) (andygrove) +- Add db-benchmark [#365](https://github.com/apache/datafusion-python/pull/365) (andygrove) +- First pass of documentation in mdBook [#364](https://github.com/apache/datafusion-python/pull/364) (MrPowers) +- Add 'pub' and '#[pyo3(get, set)]' to DataTypeMap [#371](https://github.com/apache/datafusion-python/pull/371) (jdye64) +- Fix db-benchmark [#369](https://github.com/apache/datafusion-python/pull/369) (andygrove) +- Docs explaining how to view query plans [#373](https://github.com/apache/datafusion-python/pull/373) (andygrove) +- Improve db-benchmark [#372](https://github.com/apache/datafusion-python/pull/372) (andygrove) +- Make expr member of PyExpr public [#375](https://github.com/apache/datafusion-python/pull/375) (jdye64) + +## [23.0.0](https://github.com/apache/datafusion-python/tree/23.0.0) (2023-04-23) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/22.0.0...23.0.0) + +**Merged pull requests:** + +- Improve API docs, README, and examples for configuring context [#321](https://github.com/apache/datafusion-python/pull/321) (andygrove) +- Osx build linker args [#330](https://github.com/apache/datafusion-python/pull/330) (jdye64) +- Add requirements file for python 3.11 [#332](https://github.com/apache/datafusion-python/pull/332) (r4ntix) +- mac arm64 build [#338](https://github.com/apache/datafusion-python/pull/338) (andygrove) +- Add conda.yaml baseline workflow file [#281](https://github.com/apache/datafusion-python/pull/281) (jdye64) +- Prepare for 23.0.0 release [#335](https://github.com/apache/datafusion-python/pull/335) (andygrove) +- Reuse the Tokio Runtime [#341](https://github.com/apache/datafusion-python/pull/341) (kylebrooks-8451) + +## [22.0.0](https://github.com/apache/datafusion-python/tree/22.0.0) (2023-04-10) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/21.0.0...22.0.0) + +**Merged pull requests:** + +- Fix invalid build yaml [#308](https://github.com/apache/datafusion-python/pull/308) (andygrove) +- Try fix release build [#309](https://github.com/apache/datafusion-python/pull/309) (andygrove) +- Fix release build [#310](https://github.com/apache/datafusion-python/pull/310) (andygrove) +- Enable datafusion-substrait protoc feature, to remove compile-time dependency on protoc [#312](https://github.com/apache/datafusion-python/pull/312) (andygrove) +- Fix Mac/Win release builds in CI [#313](https://github.com/apache/datafusion-python/pull/313) (andygrove) +- install protoc in docs workflow [#314](https://github.com/apache/datafusion-python/pull/314) (andygrove) +- Fix documentation generation in CI [#315](https://github.com/apache/datafusion-python/pull/315) (andygrove) +- Source wheel fix [#319](https://github.com/apache/datafusion-python/pull/319) (andygrove) + +## [21.0.0](https://github.com/apache/datafusion-python/tree/21.0.0) (2023-03-30) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/20.0.0...21.0.0) + +**Merged pull requests:** + +- minor: Fix minor warning on unused import [#289](https://github.com/apache/datafusion-python/pull/289) (viirya) +- feature: Implement `describe()` method [#293](https://github.com/apache/datafusion-python/pull/293) (simicd) +- fix: Printed results not visible in debugger & notebooks [#296](https://github.com/apache/datafusion-python/pull/296) (simicd) +- add package.include and remove wildcard dependency [#295](https://github.com/apache/datafusion-python/pull/295) (andygrove) +- Update main branch name in docs workflow [#303](https://github.com/apache/datafusion-python/pull/303) (andygrove) +- Upgrade to DF 21 [#301](https://github.com/apache/datafusion-python/pull/301) (andygrove) + +## [20.0.0](https://github.com/apache/datafusion-python/tree/20.0.0) (2023-03-17) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/0.8.0...20.0.0) + +**Implemented enhancements:** + +- Empty relation bindings [#208](https://github.com/apache/datafusion-python/pull/208) (jdye64) +- wrap display_name and canonical_name functions [#214](https://github.com/apache/datafusion-python/pull/214) (jdye64) +- Add PyAlias bindings [#216](https://github.com/apache/datafusion-python/pull/216) (jdye64) +- Add bindings for scalar_variable [#218](https://github.com/apache/datafusion-python/pull/218) (jdye64) +- Bindings for LIKE type expressions [#220](https://github.com/apache/datafusion-python/pull/220) (jdye64) +- Bool expr bindings [#223](https://github.com/apache/datafusion-python/pull/223) (jdye64) +- Between bindings [#229](https://github.com/apache/datafusion-python/pull/229) (jdye64) +- Add bindings for GetIndexedField [#227](https://github.com/apache/datafusion-python/pull/227) (jdye64) +- Add bindings for case, cast, and trycast [#232](https://github.com/apache/datafusion-python/pull/232) (jdye64) +- add remaining expr bindings [#233](https://github.com/apache/datafusion-python/pull/233) (jdye64) +- feature: Additional export methods [#236](https://github.com/apache/datafusion-python/pull/236) (simicd) +- Add Python wrapper for LogicalPlan::Union [#240](https://github.com/apache/datafusion-python/pull/240) (iajoiner) +- feature: Create dataframe from pandas, polars, dictionary, list or pyarrow Table [#242](https://github.com/apache/datafusion-python/pull/242) (simicd) +- Add Python wrappers for `LogicalPlan::Join` and `LogicalPlan::CrossJoin` [#246](https://github.com/apache/datafusion-python/pull/246) (iajoiner) +- feature: Set table name from ctx functions [#260](https://github.com/apache/datafusion-python/pull/260) (simicd) +- Explain bindings [#264](https://github.com/apache/datafusion-python/pull/264) (jdye64) +- Extension bindings [#266](https://github.com/apache/datafusion-python/pull/266) (jdye64) +- Subquery alias bindings [#269](https://github.com/apache/datafusion-python/pull/269) (jdye64) +- Create memory table [#271](https://github.com/apache/datafusion-python/pull/271) (jdye64) +- Create view bindings [#273](https://github.com/apache/datafusion-python/pull/273) (jdye64) +- Re-export Datafusion dependencies [#277](https://github.com/apache/datafusion-python/pull/277) (jdye64) +- Distinct bindings [#275](https://github.com/apache/datafusion-python/pull/275) (jdye64) +- Drop table bindings [#283](https://github.com/apache/datafusion-python/pull/283) (jdye64) +- Bindings for LogicalPlan::Repartition [#285](https://github.com/apache/datafusion-python/pull/285) (jdye64) +- Expand Rust return type support for Arrow DataTypes in ScalarValue [#287](https://github.com/apache/datafusion-python/pull/287) (jdye64) + +**Documentation updates:** + +- docs: Example of calling Python UDF & UDAF in SQL [#258](https://github.com/apache/datafusion-python/pull/258) (simicd) + +**Merged pull requests:** + +- Minor docs updates [#210](https://github.com/apache/datafusion-python/pull/210) (andygrove) +- Empty relation bindings [#208](https://github.com/apache/datafusion-python/pull/208) (jdye64) +- wrap display_name and canonical_name functions [#214](https://github.com/apache/datafusion-python/pull/214) (jdye64) +- Add PyAlias bindings [#216](https://github.com/apache/datafusion-python/pull/216) (jdye64) +- Add bindings for scalar_variable [#218](https://github.com/apache/datafusion-python/pull/218) (jdye64) +- Bindings for LIKE type expressions [#220](https://github.com/apache/datafusion-python/pull/220) (jdye64) +- Bool expr bindings [#223](https://github.com/apache/datafusion-python/pull/223) (jdye64) +- Between bindings [#229](https://github.com/apache/datafusion-python/pull/229) (jdye64) +- Add bindings for GetIndexedField [#227](https://github.com/apache/datafusion-python/pull/227) (jdye64) +- Add bindings for case, cast, and trycast [#232](https://github.com/apache/datafusion-python/pull/232) (jdye64) +- add remaining expr bindings [#233](https://github.com/apache/datafusion-python/pull/233) (jdye64) +- Pre-commit hooks [#228](https://github.com/apache/datafusion-python/pull/228) (jdye64) +- Implement new release process [#149](https://github.com/apache/datafusion-python/pull/149) (andygrove) +- feature: Additional export methods [#236](https://github.com/apache/datafusion-python/pull/236) (simicd) +- Add Python wrapper for LogicalPlan::Union [#240](https://github.com/apache/datafusion-python/pull/240) (iajoiner) +- feature: Create dataframe from pandas, polars, dictionary, list or pyarrow Table [#242](https://github.com/apache/datafusion-python/pull/242) (simicd) +- Fix release instructions [#238](https://github.com/apache/datafusion-python/pull/238) (andygrove) +- Add Python wrappers for `LogicalPlan::Join` and `LogicalPlan::CrossJoin` [#246](https://github.com/apache/datafusion-python/pull/246) (iajoiner) +- docs: Example of calling Python UDF & UDAF in SQL [#258](https://github.com/apache/datafusion-python/pull/258) (simicd) +- feature: Set table name from ctx functions [#260](https://github.com/apache/datafusion-python/pull/260) (simicd) +- Upgrade to DataFusion 19 [#262](https://github.com/apache/datafusion-python/pull/262) (andygrove) +- Explain bindings [#264](https://github.com/apache/datafusion-python/pull/264) (jdye64) +- Extension bindings [#266](https://github.com/apache/datafusion-python/pull/266) (jdye64) +- Subquery alias bindings [#269](https://github.com/apache/datafusion-python/pull/269) (jdye64) +- Create memory table [#271](https://github.com/apache/datafusion-python/pull/271) (jdye64) +- Create view bindings [#273](https://github.com/apache/datafusion-python/pull/273) (jdye64) +- Re-export Datafusion dependencies [#277](https://github.com/apache/datafusion-python/pull/277) (jdye64) +- Distinct bindings [#275](https://github.com/apache/datafusion-python/pull/275) (jdye64) +- build(deps): bump actions/checkout from 2 to 3 [#244](https://github.com/apache/datafusion-python/pull/244) (dependabot[bot]) +- build(deps): bump actions/upload-artifact from 2 to 3 [#245](https://github.com/apache/datafusion-python/pull/245) (dependabot[bot]) +- build(deps): bump actions/download-artifact from 2 to 3 [#243](https://github.com/apache/datafusion-python/pull/243) (dependabot[bot]) +- Use DataFusion 20 [#278](https://github.com/apache/datafusion-python/pull/278) (andygrove) +- Drop table bindings [#283](https://github.com/apache/datafusion-python/pull/283) (jdye64) +- Bindings for LogicalPlan::Repartition [#285](https://github.com/apache/datafusion-python/pull/285) (jdye64) +- Expand Rust return type support for Arrow DataTypes in ScalarValue [#287](https://github.com/apache/datafusion-python/pull/287) (jdye64) + +## [0.8.0](https://github.com/apache/datafusion-python/tree/0.8.0) (2023-02-22) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/0.8.0-rc1...0.8.0) + +**Implemented enhancements:** + +- Add support for cuDF physical execution engine [\#202](https://github.com/apache/datafusion-python/issues/202) +- Make it easier to create a Pandas dataframe from DataFusion query results [\#139](https://github.com/apache/datafusion-python/issues/139) + +**Fixed bugs:** + +- Build error: could not compile `thiserror` due to 2 previous errors [\#69](https://github.com/apache/datafusion-python/issues/69) + +**Closed issues:** + +- Integrate with the new `object_store` crate [\#22](https://github.com/apache/datafusion-python/issues/22) + +**Merged pull requests:** + +- Update README in preparation for 0.8 release [\#206](https://github.com/apache/datafusion-python/pull/206) ([andygrove](https://github.com/andygrove)) +- Add support for cudf as a physical execution engine [\#205](https://github.com/apache/datafusion-python/pull/205) ([jdye64](https://github.com/jdye64)) +- Run `maturin develop` instead of `cargo build` in verification script [\#200](https://github.com/apache/datafusion-python/pull/200) ([andygrove](https://github.com/andygrove)) +- Add tests for recently added functionality [\#199](https://github.com/apache/datafusion-python/pull/199) ([andygrove](https://github.com/andygrove)) +- Implement `to_pandas()` [\#197](https://github.com/apache/datafusion-python/pull/197) ([simicd](https://github.com/simicd)) +- Add Python wrapper for LogicalPlan::Sort [\#196](https://github.com/apache/datafusion-python/pull/196) ([andygrove](https://github.com/andygrove)) +- Add Python wrapper for LogicalPlan::Aggregate [\#195](https://github.com/apache/datafusion-python/pull/195) ([andygrove](https://github.com/andygrove)) +- Add Python wrapper for LogicalPlan::Limit [\#193](https://github.com/apache/datafusion-python/pull/193) ([andygrove](https://github.com/andygrove)) +- Add Python wrapper for LogicalPlan::Filter [\#192](https://github.com/apache/datafusion-python/pull/192) ([andygrove](https://github.com/andygrove)) +- Add experimental support for executing SQL with Polars and Pandas [\#190](https://github.com/apache/datafusion-python/pull/190) ([andygrove](https://github.com/andygrove)) +- Update changelog for 0.8 release [\#188](https://github.com/apache/datafusion-python/pull/188) ([andygrove](https://github.com/andygrove)) +- Add ability to execute ExecutionPlan and get a stream of RecordBatch [\#186](https://github.com/apache/datafusion-python/pull/186) ([andygrove](https://github.com/andygrove)) +- Dffield bindings [\#185](https://github.com/apache/datafusion-python/pull/185) ([jdye64](https://github.com/jdye64)) +- Add bindings for DFSchema [\#183](https://github.com/apache/datafusion-python/pull/183) ([jdye64](https://github.com/jdye64)) +- test: Window functions [\#182](https://github.com/apache/datafusion-python/pull/182) ([simicd](https://github.com/simicd)) +- Add bindings for Projection [\#180](https://github.com/apache/datafusion-python/pull/180) ([jdye64](https://github.com/jdye64)) +- Table scan bindings [\#178](https://github.com/apache/datafusion-python/pull/178) ([jdye64](https://github.com/jdye64)) +- Make session configurable [\#176](https://github.com/apache/datafusion-python/pull/176) ([andygrove](https://github.com/andygrove)) +- Upgrade to DataFusion 18.0.0 [\#175](https://github.com/apache/datafusion-python/pull/175) ([andygrove](https://github.com/andygrove)) +- Use latest DataFusion rev in preparation for DF 18 release [\#174](https://github.com/apache/datafusion-python/pull/174) ([andygrove](https://github.com/andygrove)) +- Arrow type bindings [\#173](https://github.com/apache/datafusion-python/pull/173) ([jdye64](https://github.com/jdye64)) +- Pyo3 bump [\#171](https://github.com/apache/datafusion-python/pull/171) ([jdye64](https://github.com/jdye64)) +- feature: Add additional aggregation functions [\#170](https://github.com/apache/datafusion-python/pull/170) ([simicd](https://github.com/simicd)) +- Make from_substrait_plan return DataFrame instead of LogicalPlan [\#164](https://github.com/apache/datafusion-python/pull/164) ([andygrove](https://github.com/andygrove)) +- feature: Implement count method [\#163](https://github.com/apache/datafusion-python/pull/163) ([simicd](https://github.com/simicd)) +- CI Fixes [\#162](https://github.com/apache/datafusion-python/pull/162) ([jdye64](https://github.com/jdye64)) +- Upgrade to DataFusion 17 [\#160](https://github.com/apache/datafusion-python/pull/160) ([andygrove](https://github.com/andygrove)) +- feature: Improve string representation of datafusion classes [\#159](https://github.com/apache/datafusion-python/pull/159) ([simicd](https://github.com/simicd)) +- Make PyExecutionPlan.plan public [\#156](https://github.com/apache/datafusion-python/pull/156) ([andygrove](https://github.com/andygrove)) +- Expose methods on logical and execution plans [\#155](https://github.com/apache/datafusion-python/pull/155) ([andygrove](https://github.com/andygrove)) +- Fix clippy for new Rust version [\#154](https://github.com/apache/datafusion-python/pull/154) ([andygrove](https://github.com/andygrove)) +- Add DataFrame methods for accessing plans [\#153](https://github.com/apache/datafusion-python/pull/153) ([andygrove](https://github.com/andygrove)) +- Use DataFusion rev 5238e8c97f998b4d2cb9fab85fb182f325a1a7fb [\#150](https://github.com/apache/datafusion-python/pull/150) ([andygrove](https://github.com/andygrove)) +- build\(deps\): bump async-trait from 0.1.61 to 0.1.62 [\#148](https://github.com/apache/datafusion-python/pull/148) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Rename default branch from master to main [\#147](https://github.com/apache/datafusion-python/pull/147) ([andygrove](https://github.com/andygrove)) +- Substrait bindings [\#145](https://github.com/apache/datafusion-python/pull/145) ([jdye64](https://github.com/jdye64)) +- build\(deps\): bump uuid from 0.8.2 to 1.2.2 [\#143](https://github.com/apache/datafusion-python/pull/143) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Prepare for 0.8.0 release [\#141](https://github.com/apache/datafusion-python/pull/141) ([andygrove](https://github.com/andygrove)) +- Improve README and add more examples [\#137](https://github.com/apache/datafusion-python/pull/137) ([andygrove](https://github.com/andygrove)) +- test: Expand tests for built-in functions [\#129](https://github.com/apache/datafusion-python/pull/129) ([simicd](https://github.com/simicd)) +- build\(deps\): bump object_store from 0.5.2 to 0.5.3 [\#126](https://github.com/apache/datafusion-python/pull/126) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump mimalloc from 0.1.32 to 0.1.34 [\#125](https://github.com/apache/datafusion-python/pull/125) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Introduce conda directory containing datafusion-dev.yaml conda enviro… [\#124](https://github.com/apache/datafusion-python/pull/124) ([jdye64](https://github.com/jdye64)) +- build\(deps\): bump bzip2 from 0.4.3 to 0.4.4 [\#121](https://github.com/apache/datafusion-python/pull/121) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump tokio from 1.23.0 to 1.24.1 [\#119](https://github.com/apache/datafusion-python/pull/119) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump async-trait from 0.1.60 to 0.1.61 [\#118](https://github.com/apache/datafusion-python/pull/118) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Upgrade to DataFusion 16.0.0 [\#115](https://github.com/apache/datafusion-python/pull/115) ([andygrove](https://github.com/andygrove)) +- Bump async-trait from 0.1.57 to 0.1.60 [\#114](https://github.com/apache/datafusion-python/pull/114) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump object_store from 0.5.1 to 0.5.2 [\#112](https://github.com/apache/datafusion-python/pull/112) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Bump tokio from 1.21.2 to 1.23.0 [\#109](https://github.com/apache/datafusion-python/pull/109) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Add entries for publishing production \(asf-site\) and staging docs [\#107](https://github.com/apache/datafusion-python/pull/107) ([martin-g](https://github.com/martin-g)) +- Add a workflow that builds the docs and deploys them at staged or production [\#104](https://github.com/apache/datafusion-python/pull/104) ([martin-g](https://github.com/martin-g)) +- Upgrade to DataFusion 15.0.0 [\#103](https://github.com/apache/datafusion-python/pull/103) ([andygrove](https://github.com/andygrove)) +- build\(deps\): bump futures from 0.3.24 to 0.3.25 [\#102](https://github.com/apache/datafusion-python/pull/102) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump pyo3 from 0.17.2 to 0.17.3 [\#101](https://github.com/apache/datafusion-python/pull/101) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump mimalloc from 0.1.30 to 0.1.32 [\#98](https://github.com/apache/datafusion-python/pull/98) ([dependabot[bot]](https://github.com/apps/dependabot)) +- build\(deps\): bump rand from 0.7.3 to 0.8.5 [\#97](https://github.com/apache/datafusion-python/pull/97) ([dependabot[bot]](https://github.com/apps/dependabot)) +- Fix GitHub actions warnings [\#95](https://github.com/apache/datafusion-python/pull/95) ([martin-g](https://github.com/martin-g)) +- Fixes \#81 - Add CI workflow for source distribution [\#93](https://github.com/apache/datafusion-python/pull/93) ([martin-g](https://github.com/martin-g)) +- post-release updates [\#91](https://github.com/apache/datafusion-python/pull/91) ([andygrove](https://github.com/andygrove)) +- Build for manylinux 2014 [\#88](https://github.com/apache/datafusion-python/pull/88) ([martin-g](https://github.com/martin-g)) +- update release readme tag [\#86](https://github.com/apache/datafusion-python/pull/86) ([Jimexist](https://github.com/Jimexist)) +- Upgrade Maturin to 0.14.2 [\#85](https://github.com/apache/datafusion-python/pull/85) ([martin-g](https://github.com/martin-g)) +- Update release instructions [\#83](https://github.com/apache/datafusion-python/pull/83) ([andygrove](https://github.com/andygrove)) +- \[Functions\] - Add python function binding to `functions` [\#73](https://github.com/apache/datafusion-python/pull/73) ([francis-du](https://github.com/francis-du)) + +## [0.8.0-rc1](https://github.com/apache/datafusion-python/tree/0.8.0-rc1) (2023-02-17) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/0.7.0-rc2...0.8.0-rc1) + +**Implemented enhancements:** + +- Add bindings for datafusion_common::DFField [\#184](https://github.com/apache/datafusion-python/issues/184) +- Add bindings for DFSchema/DFSchemaRef [\#181](https://github.com/apache/datafusion-python/issues/181) +- Add bindings for datafusion_expr Projection [\#179](https://github.com/apache/datafusion-python/issues/179) +- Add bindings for `TableScan` struct from `datafusion_expr::TableScan` [\#177](https://github.com/apache/datafusion-python/issues/177) +- Add a "mapping" struct for types [\#172](https://github.com/apache/datafusion-python/issues/172) +- Improve string representation of datafusion classes \(dataframe, context, expression, ...\) [\#158](https://github.com/apache/datafusion-python/issues/158) +- Add DataFrame count method [\#151](https://github.com/apache/datafusion-python/issues/151) +- \[REQUEST\] Github Actions Improvements [\#146](https://github.com/apache/datafusion-python/issues/146) +- Change default branch name from master to main [\#144](https://github.com/apache/datafusion-python/issues/144) +- Bump pyo3 to 0.18.0 [\#140](https://github.com/apache/datafusion-python/issues/140) +- Add script for Python linting [\#134](https://github.com/apache/datafusion-python/issues/134) +- Add Python bindings for substrait module [\#132](https://github.com/apache/datafusion-python/issues/132) +- Expand unit tests for built-in functions [\#128](https://github.com/apache/datafusion-python/issues/128) +- support creating arrow-datafusion-python conda environment [\#122](https://github.com/apache/datafusion-python/issues/122) +- Build Python source distribution in GitHub workflow [\#81](https://github.com/apache/datafusion-python/issues/81) +- EPIC: Add all functions to python binding `functions` [\#72](https://github.com/apache/datafusion-python/issues/72) + +**Fixed bugs:** + +- Build is broken [\#161](https://github.com/apache/datafusion-python/issues/161) +- Out of memory when sorting [\#157](https://github.com/apache/datafusion-python/issues/157) +- window_lead test appears to be non-deterministic [\#135](https://github.com/apache/datafusion-python/issues/135) +- Reading csv does not work [\#130](https://github.com/apache/datafusion-python/issues/130) +- Github actions produce a lot of warnings [\#94](https://github.com/apache/datafusion-python/issues/94) +- ASF source release tarball has wrong directory name [\#90](https://github.com/apache/datafusion-python/issues/90) +- Python Release Build failing after upgrading to maturin 14.2 [\#87](https://github.com/apache/datafusion-python/issues/87) +- Maturin build hangs on Linux ARM64 [\#84](https://github.com/apache/datafusion-python/issues/84) +- Cannot install on Mac M1 from source tarball from testpypi [\#82](https://github.com/apache/datafusion-python/issues/82) +- ImportPathMismatchError when running pytest locally [\#77](https://github.com/apache/datafusion-python/issues/77) + +**Closed issues:** + +- Publish documentation for Python bindings [\#39](https://github.com/apache/datafusion-python/issues/39) +- Add Python binding for `approx_median` [\#32](https://github.com/apache/datafusion-python/issues/32) +- Release version 0.7.0 [\#7](https://github.com/apache/datafusion-python/issues/7) + +## [0.7.0-rc2](https://github.com/apache/datafusion-python/tree/0.7.0-rc2) (2022-11-26) + +[Full Changelog](https://github.com/apache/datafusion-python/compare/0.7.0...0.7.0-rc2) + +## [Unreleased](https://github.com/datafusion-contrib/datafusion-python/tree/HEAD) + +[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.1...HEAD) + +**Merged pull requests:** + +- use \_\_getitem\_\_ for df column selection [\#41](https://github.com/datafusion-contrib/datafusion-python/pull/41) ([Jimexist](https://github.com/Jimexist)) +- fix demo in readme [\#40](https://github.com/datafusion-contrib/datafusion-python/pull/40) ([Jimexist](https://github.com/Jimexist)) +- Implement select_columns [\#39](https://github.com/datafusion-contrib/datafusion-python/pull/39) ([andygrove](https://github.com/andygrove)) +- update readme and changelog [\#38](https://github.com/datafusion-contrib/datafusion-python/pull/38) ([Jimexist](https://github.com/Jimexist)) +- Add PyDataFrame.explain [\#36](https://github.com/datafusion-contrib/datafusion-python/pull/36) ([andygrove](https://github.com/andygrove)) +- Release 0.5.0 [\#34](https://github.com/datafusion-contrib/datafusion-python/pull/34) ([Jimexist](https://github.com/Jimexist)) +- disable nightly in workflow [\#33](https://github.com/datafusion-contrib/datafusion-python/pull/33) ([Jimexist](https://github.com/Jimexist)) +- update requirements to 37 and 310, update readme [\#32](https://github.com/datafusion-contrib/datafusion-python/pull/32) ([Jimexist](https://github.com/Jimexist)) +- Add custom global allocator [\#30](https://github.com/datafusion-contrib/datafusion-python/pull/30) ([matthewmturner](https://github.com/matthewmturner)) +- Remove pandas dependency [\#25](https://github.com/datafusion-contrib/datafusion-python/pull/25) ([matthewmturner](https://github.com/matthewmturner)) +- upgrade datafusion and pyo3 [\#20](https://github.com/datafusion-contrib/datafusion-python/pull/20) ([Jimexist](https://github.com/Jimexist)) +- update maturin 0.12+ [\#17](https://github.com/datafusion-contrib/datafusion-python/pull/17) ([Jimexist](https://github.com/Jimexist)) +- Update README.md [\#16](https://github.com/datafusion-contrib/datafusion-python/pull/16) ([Jimexist](https://github.com/Jimexist)) +- apply cargo clippy --fix [\#15](https://github.com/datafusion-contrib/datafusion-python/pull/15) ([Jimexist](https://github.com/Jimexist)) +- update test workflow to include rust clippy and check [\#14](https://github.com/datafusion-contrib/datafusion-python/pull/14) ([Jimexist](https://github.com/Jimexist)) +- use maturin 0.12.6 [\#13](https://github.com/datafusion-contrib/datafusion-python/pull/13) ([Jimexist](https://github.com/Jimexist)) +- apply cargo fmt [\#12](https://github.com/datafusion-contrib/datafusion-python/pull/12) ([Jimexist](https://github.com/Jimexist)) +- use stable not nightly [\#11](https://github.com/datafusion-contrib/datafusion-python/pull/11) ([Jimexist](https://github.com/Jimexist)) +- ci: test against more compilers, setup clippy and fix clippy lints [\#9](https://github.com/datafusion-contrib/datafusion-python/pull/9) ([cpcloud](https://github.com/cpcloud)) +- Fix use of importlib.metadata and unify requirements.txt [\#8](https://github.com/datafusion-contrib/datafusion-python/pull/8) ([cpcloud](https://github.com/cpcloud)) +- Ship the Cargo.lock file in the source distribution [\#7](https://github.com/datafusion-contrib/datafusion-python/pull/7) ([cpcloud](https://github.com/cpcloud)) +- add \_\_version\_\_ attribute to datafusion object [\#3](https://github.com/datafusion-contrib/datafusion-python/pull/3) ([tfeda](https://github.com/tfeda)) +- fix ci by fixing directories [\#2](https://github.com/datafusion-contrib/datafusion-python/pull/2) ([Jimexist](https://github.com/Jimexist)) +- setup workflow [\#1](https://github.com/datafusion-contrib/datafusion-python/pull/1) ([Jimexist](https://github.com/Jimexist)) + +## [0.5.1](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.1) (2022-03-15) + +[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.1-rc1...0.5.1) + +## [0.5.1-rc1](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.1-rc1) (2022-03-15) + +[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.0...0.5.1-rc1) + +## [0.5.0](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.0) (2022-03-10) + +[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.0-rc2...0.5.0) + +## [0.5.0-rc2](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.0-rc2) (2022-03-10) + +[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/0.5.0-rc1...0.5.0-rc2) + +**Closed issues:** + +- Add support for Ballista [\#37](https://github.com/datafusion-contrib/datafusion-python/issues/37) +- Implement DataFrame.explain [\#35](https://github.com/datafusion-contrib/datafusion-python/issues/35) + +## [0.5.0-rc1](https://github.com/datafusion-contrib/datafusion-python/tree/0.5.0-rc1) (2022-03-09) + +[Full Changelog](https://github.com/datafusion-contrib/datafusion-python/compare/4c98b8e9c3c3f8e2e6a8f2d1ffcfefda344c4680...0.5.0-rc1) + +**Closed issues:** + +- Investigate exposing additional optimizations [\#28](https://github.com/datafusion-contrib/datafusion-python/issues/28) +- Use custom allocator in Python build [\#27](https://github.com/datafusion-contrib/datafusion-python/issues/27) +- Why is pandas a requirement? [\#24](https://github.com/datafusion-contrib/datafusion-python/issues/24) +- Unable to build [\#18](https://github.com/datafusion-contrib/datafusion-python/issues/18) +- Setup CI against multiple Python version [\#6](https://github.com/datafusion-contrib/datafusion-python/issues/6) diff --git a/dev/check_crates_patch.py b/dev/check_crates_patch.py new file mode 100644 index 000000000..74e489e1f --- /dev/null +++ b/dev/check_crates_patch.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Check that no Cargo.toml files contain [patch.crates-io] entries. + +Release builds must not depend on patched crates. During development it is +common to temporarily patch crates-io dependencies, but those patches must +be removed before creating a release. + +An empty [patch.crates-io] section is allowed. +""" + +import sys +from pathlib import Path + +import tomllib + + +def main() -> int: + errors: list[str] = [] + for cargo_toml in sorted(Path().rglob("Cargo.toml")): + if "target" in cargo_toml.parts: + continue + with Path.open(cargo_toml, "rb") as f: + data = tomllib.load(f) + patch = data.get("patch", {}).get("crates-io", {}) + if patch: + errors.append(str(cargo_toml)) + for name, spec in patch.items(): + errors.append(f" {name} = {spec}") + + if errors: + print("ERROR: Release builds must not contain [patch.crates-io] entries.") + print() + for line in errors: + print(line) + print() + print("Remove all [patch.crates-io] entries before creating a release.") + return 1 + + print("OK: No [patch.crates-io] entries found.") + return 0 + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/dev/clean.sh b/dev/clean.sh new file mode 100755 index 000000000..0d86680e8 --- /dev/null +++ b/dev/clean.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +# + +# This cleans up the project by removing build artifacts and other generated files. + +# Function to remove a directory and print the action +remove_dir() { + if [ -d "$1" ]; then + echo "Removing directory: $1" + rm -rf "$1" + fi +} + +# Function to remove a file and print the action +remove_file() { + if [ -f "$1" ]; then + echo "Removing file: $1" + rm -f "$1" + fi +} + +# Remove .pytest_cache directory +remove_dir .pytest_cache/ + +# Remove target directory +remove_dir target/ + +# Remove any __pycache__ directories +find python/ -type d -name "__pycache__" -print | while read -r dir; do + remove_dir "$dir" +done + +# Remove pytest-coverage.lcov file +# remove_file .coverage +# remove_file pytest-coverage.lcov + +# Remove rust-coverage.lcov file +# remove_file rust-coverage.lcov + +# Remove pyo3 files +find python/ -type f -name '_internal.*.so' -print | while read -r file; do + remove_file "$file" +done + +echo "Cleanup complete." \ No newline at end of file diff --git a/dev/create_license.py b/dev/create_license.py index 2a67cb8fd..acbf8587c 100644 --- a/dev/create_license.py +++ b/dev/create_license.py @@ -20,12 +20,11 @@ import json import subprocess +from pathlib import Path -subprocess.check_output(["cargo", "install", "cargo-license"]) data = subprocess.check_output( [ - "cargo", - "license", + "cargo-license", "--avoid-build-deps", "--avoid-dev-deps", "--do-not-bundle", @@ -248,5 +247,5 @@ result += "------------------\n\n" result += f"### {name} {version}\n* source: [{repository}]({repository})\n* license: {license}\n\n" -with open("LICENSE.txt", "w") as f: +with Path.open("LICENSE.txt", "w") as f: f.write(result) diff --git a/dev/python_lint.sh b/dev/python_lint.sh index 3bc67fb12..2d867f29d 100755 --- a/dev/python_lint.sh +++ b/dev/python_lint.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -21,6 +21,6 @@ # DataFusion CI does set -e -source venv/bin/activate +source .venv/bin/activate flake8 --exclude venv,benchmarks/db-benchmark --ignore=E501,W503 black --line-length 79 . diff --git a/dev/release/README.md b/dev/release/README.md index c4372c832..ed28f4aa6 100644 --- a/dev/release/README.md +++ b/dev/release/README.md @@ -56,6 +56,8 @@ Before creating a new release: - a PR should be created and merged to update the major version number of the project - A new release branch should be created, such as `branch-0.8` +## Preparing a Release Candidate + ### Change Log We maintain a `CHANGELOG.md` so our users know what has been changed between releases. @@ -63,7 +65,7 @@ We maintain a `CHANGELOG.md` so our users know what has been changed between rel The changelog is generated using a Python script: ```bash -$ GITHUB_TOKEN= ./dev/release/generate-changelog.py apache/datafusion-python 24.0.0 HEAD > dev/changelog/25.0.0.md +$ GITHUB_TOKEN= ./dev/release/generate-changelog.py 24.0.0 HEAD 25.0.0 > dev/changelog/25.0.0.md ``` This script creates a changelog from GitHub PRs based on the labels associated with them as well as looking for @@ -76,21 +78,20 @@ Categorizing pull requests Generating changelog content ``` -This process is not fully automated, so there are some additional manual steps: - -- Add the ASF header to the generated file -- Add a link to this changelog from the top-level `/datafusion/CHANGELOG.md` -- Add the following content (copy from the previous version's changelog and update as appropriate: +### Update the version number -``` -## [24.0.0](https://github.com/apache/datafusion-python/tree/24.0.0) (2023-05-06) +The only place you should need to update the version is in the root `Cargo.toml`. +After updating the toml file, run `cargo update` to update the cargo lock file. +If you do not want to update all the dependencies, you can instead run `cargo build` +which should only update the version number for `datafusion-python`. -[Full Changelog](https://github.com/apache/datafusion-python/compare/23.0.0...24.0.0) -``` +### Tag the Repository -### Preparing a Release Candidate +Commit the changes to the changelog and version. -### Tag the Repository +Assuming you have set up a remote to the `apache` repository rather than your personal fork, +you need to push a tag to start the CI process for release candidates. The following assumes +the upstream repository is called `apache`. ```bash git tag 0.8.0-rc1 @@ -103,7 +104,7 @@ git push apache 0.8.0-rc1 ./dev/release/create-tarball.sh 0.8.0 1 ``` -This will also create the email template to send to the mailing list. +This will also create the email template to send to the mailing list. Create a draft email using this content, but do not send until after completing the next step. @@ -153,16 +154,62 @@ This will create a file named `dist/datafusion-0.7.0.tar.gz`. Upload this to tes python3 -m twine upload --repository testpypi dist/datafusion-0.7.0.tar.gz ``` +### Run Verify Release Candidate Workflow + +Before sending the vote email, run the manually triggered GitHub Actions workflow +"Verify Release Candidate" and confirm all matrix jobs pass across the OS/architecture matrix +(for example, Linux, macOS, and Windows runners): + +1. Go to https://github.com/apache/datafusion-python/actions/workflows/verify-release-candidate.yml +2. Click "Run workflow" +3. Set `version` to the release version (for example, `52.0.0`) +4. Set `rc_number` to the RC number (for example, `0`) +5. Wait for all jobs to complete successfully + +Include a short note in the vote email template that this workflow was run across all OS/architecture +matrix entries and that all jobs passed. + +```text +Verification note: The manually triggered "Verify Release Candidate" workflow was run for version and rc_number across all configured OS/architecture matrix entries, and all matrix jobs completed successfully. +``` + ### Send the Email Send the email to start the vote. ## Verifying a Release -Install the release from testpypi: +Releases may be verified using `verify-release-candidate.sh`: ```bash -pip install --extra-index-url https://test.pypi.org/simple/ datafusion==0.7.0 +git clone https://github.com/apache/datafusion-python.git +dev/release/verify-release-candidate.sh 48.0.0 1 +``` + +Alternatively, one can run unit tests against a testpypi release candidate: + +```bash +# clone a fresh repo +git clone https://github.com/apache/datafusion-python.git +cd datafusion-python + +# checkout the release commit +git fetch --tags +git checkout 40.0.0-rc1 +git submodule update --init --recursive + +# create the env +python3 -m venv .venv +source .venv/bin/activate + +# install release candidate +pip install --extra-index-url https://test.pypi.org/simple/ datafusion==40.0.0 + +# install test dependencies +pip install pytest numpy pytest-asyncio + +# run the tests +pytest --import-mode=importlib python/tests -vv ``` Try running one of the examples from the top-level README, or write some custom Python code to query some available @@ -198,28 +245,9 @@ uploading them using `twine`: twine upload --repository pypi dist-release/* ``` -### Publish Python Artifacts to Anaconda +### Publish Python Artifacts to conda-forge -Publishing artifacts to Anaconda is similar to PyPi. First, Download the source tarball created in the previous step and untar it. - -```bash -# Assuming you have an existing conda environment named `datafusion-dev` if not see root README for instructions -conda activate datafusion-dev -conda build . -``` - -This will setup a virtual conda environment and build the artifacts inside of that virtual env. This step can take a few minutes as the entire build, host, and runtime environments are setup. Once complete a local filesystem path will be emitted for the location of the resulting package. Observe that path and copy to your clipboard. - -Ex: `/home/conda/envs/datafusion/conda-bld/linux-64/datafusion-0.7.0.tar.bz2` - -Now you are ready to publish this resulting package to anaconda.org. This can be accomplished in a few simple steps. - -```bash -# First login to Anaconda with the datafusion credentials -anaconda login -# Upload the package -anaconda upload /home/conda/envs/datafusion/conda-bld/linux-64/datafusion-0.7.0.tar.bz2 -``` +Pypi packages auto upload to conda-forge via [datafusion feedstock](https://github.com/conda-forge/datafusion-feedstock) ### Push the Release Tag @@ -234,7 +262,7 @@ git push apache 0.8.0 Add the release to https://reporter.apache.org/addrelease.html?datafusion with a version name prefixed with `DATAFUSION-PYTHON`, for example `DATAFUSION-PYTHON-31.0.0`. -The release information is used to generate a template for a board report (see example from Apache Arrow +The release information is used to generate a template for a board report (see example from Apache Arrow [here](https://github.com/apache/arrow/pull/14357)). ### Delete old RCs and Releases diff --git a/dev/release/check-rat-report.py b/dev/release/check-rat-report.py index d3dd7c5dd..72a35212e 100644 --- a/dev/release/check-rat-report.py +++ b/dev/release/check-rat-report.py @@ -21,6 +21,7 @@ import re import sys import xml.etree.ElementTree as ET +from pathlib import Path if len(sys.argv) != 3: sys.stderr.write("Usage: %s exclude_globs.lst rat_report.xml\n" % sys.argv[0]) @@ -29,7 +30,7 @@ exclude_globs_filename = sys.argv[1] xml_filename = sys.argv[2] -globs = [line.strip() for line in open(exclude_globs_filename, "r")] +globs = [line.strip() for line in Path.open(exclude_globs_filename)] tree = ET.parse(xml_filename) root = tree.getroot() diff --git a/dev/release/generate-changelog.py b/dev/release/generate-changelog.py index af097ce98..d86736773 100755 --- a/dev/release/generate-changelog.py +++ b/dev/release/generate-changelog.py @@ -16,27 +16,25 @@ # limitations under the License. import argparse -import sys -from github import Github import os import re +import subprocess +import sys +from github import Github -def print_pulls(repo_name, title, pulls): + +def print_pulls(repo_name, title, pulls) -> None: if len(pulls) > 0: - print("**{}:**".format(title)) + print(f"**{title}:**") print() for pull, commit in pulls: - url = "https://github.com/{}/pull/{}".format(repo_name, pull.number) - print( - "- {} [#{}]({}) ({})".format( - pull.title, pull.number, url, commit.author.login - ) - ) + url = f"https://github.com/{repo_name}/pull/{pull.number}" + print(f"- {pull.title} [#{pull.number}]({url}) ({commit.author.login})") print() -def generate_changelog(repo, repo_name, tag1, tag2): +def generate_changelog(repo, repo_name, tag1, tag2, version) -> None: # get a list of commits between two tags print(f"Fetching list of commits between {tag1} and {tag2}", file=sys.stderr) comparison = repo.compare(tag1, tag2) @@ -55,18 +53,18 @@ def generate_changelog(repo, repo_name, tag1, tag2): all_pulls.append((pull, commit)) # we split the pulls into categories - # TODO: make categories configurable breaking = [] bugs = [] docs = [] enhancements = [] + performance = [] + other = [] # categorize the pull requests based on GitHub labels print("Categorizing pull requests", file=sys.stderr) for pull, commit in all_pulls: # see if PR title uses Conventional Commits cc_type = "" - # cc_scope = '' cc_breaking = "" parts = re.findall(r"^([a-z]+)(\([a-z]+\))?(!)?:", pull.title) if len(parts) == 1: @@ -76,43 +74,105 @@ def generate_changelog(repo, repo_name, tag1, tag2): cc_breaking = parts_tuple[2] == "!" labels = [label.name for label in pull.labels] - # print(pull.number, labels, parts, file=sys.stderr) if "api change" in labels or cc_breaking: breaking.append((pull, commit)) elif "bug" in labels or cc_type == "fix": bugs.append((pull, commit)) + elif "performance" in labels or cc_type == "perf": + performance.append((pull, commit)) elif "enhancement" in labels or cc_type == "feat": enhancements.append((pull, commit)) - elif "documentation" in labels or cc_type == "docs": + elif "documentation" in labels or cc_type == "docs" or cc_type == "doc": docs.append((pull, commit)) + else: + other.append((pull, commit)) # produce the changelog content print("Generating changelog content", file=sys.stderr) + + # ASF header + print("""\n""") + + print(f"# Apache DataFusion Python {version} Changelog\n") + + # get the number of commits + commit_count = subprocess.check_output( + f"git log --pretty=oneline {tag1}..{tag2} | wc -l", shell=True, text=True + ).strip() + + # get number of contributors + contributor_count = subprocess.check_output( + f"git shortlog -sn {tag1}..{tag2} | wc -l", shell=True, text=True + ).strip() + + print( + f"This release consists of {commit_count} commits from {contributor_count} contributors. " + f"See credits at the end of this changelog for more information.\n" + ) + print_pulls(repo_name, "Breaking changes", breaking) + print_pulls(repo_name, "Performance related", performance) print_pulls(repo_name, "Implemented enhancements", enhancements) print_pulls(repo_name, "Fixed bugs", bugs) print_pulls(repo_name, "Documentation updates", docs) - print_pulls(repo_name, "Merged pull requests", all_pulls) + print_pulls(repo_name, "Other", other) + + # show code contributions + credits = subprocess.check_output( + f"git shortlog -sn {tag1}..{tag2}", shell=True, text=True + ).rstrip() + + print("## Credits\n") + print( + "Thank you to everyone who contributed to this release. Here is a breakdown of commits (PRs merged) " + "per contributor.\n" + ) + print("```") + print(credits) + print("```\n") + + print( + "Thank you also to everyone who contributed in other ways such as filing issues, reviewing " + "PRs, and providing feedback on this release.\n" + ) -def cli(args=None): +def cli(args=None) -> None: """Process command line arguments.""" if not args: args = sys.argv[1:] parser = argparse.ArgumentParser() + parser.add_argument("tag1", help="The previous commit or tag (e.g. 0.1.0)") + parser.add_argument("tag2", help="The current commit or tag (e.g. HEAD)") parser.add_argument( - "project", help="The project name e.g. apache/datafusion-python" + "version", help="The version number to include in the changelog" ) - parser.add_argument("tag1", help="The previous release tag") - parser.add_argument("tag2", help="The current release tag") args = parser.parse_args() token = os.getenv("GITHUB_TOKEN") + project = "apache/datafusion-python" g = Github(token) - repo = g.get_repo(args.project) - generate_changelog(repo, args.project, args.tag1, args.tag2) + repo = g.get_repo(project) + generate_changelog(repo, project, args.tag1, args.tag2, args.version) if __name__ == "__main__": diff --git a/dev/release/rat_exclude_files.txt b/dev/release/rat_exclude_files.txt index f65ddd06e..dcd5d9aac 100644 --- a/dev/release/rat_exclude_files.txt +++ b/dev/release/rat_exclude_files.txt @@ -45,4 +45,6 @@ Cargo.lock .github/* benchmarks/tpch/queries/q*.sql benchmarks/tpch/create_tables.sql -.cargo/config.toml \ No newline at end of file +.cargo/config.toml +**/.cargo/config.toml +uv.lock \ No newline at end of file diff --git a/dev/release/release-tarball.sh b/dev/release/release-tarball.sh index 8c305a676..2b82d1bac 100755 --- a/dev/release/release-tarball.sh +++ b/dev/release/release-tarball.sh @@ -43,6 +43,13 @@ fi version=$1 rc=$2 +read -r -p "Proceed to release tarball for ${version}-rc${rc}? [y/N]: " answer +answer=${answer:-no} +if [ "${answer}" != "y" ]; then + echo "Cancelled tarball release!" + exit 1 +fi + tmp_dir=tmp-apache-datafusion-python-dist echo "Recreate temporary directory: ${tmp_dir}" diff --git a/dev/release/verify-release-candidate.sh b/dev/release/verify-release-candidate.sh index 14c0baee8..9591e0335 100755 --- a/dev/release/verify-release-candidate.sh +++ b/dev/release/verify-release-candidate.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -106,14 +106,23 @@ setup_tempdir() { } test_source_distribution() { - # install rust toolchain in a similar fashion like test-miniconda + # install rust toolchain export RUSTUP_HOME=$PWD/test-rustup export CARGO_HOME=$PWD/test-rustup curl https://sh.rustup.rs -sSf | sh -s -- -y --no-modify-path - export PATH=$RUSTUP_HOME/bin:$PATH - source $RUSTUP_HOME/env + # On Unix, rustup creates an env file. On Windows GitHub runners (MSYS bash), + # that file may not exist, so fall back to adding Cargo bin directly. + if [ -f "$CARGO_HOME/env" ]; then + # shellcheck disable=SC1090 + source "$CARGO_HOME/env" + elif [ -f "$RUSTUP_HOME/env" ]; then + # shellcheck disable=SC1090 + source "$RUSTUP_HOME/env" + else + export PATH="$CARGO_HOME/bin:$PATH" + fi # build and test rust @@ -125,11 +134,21 @@ test_source_distribution() { git clone https://github.com/apache/arrow-testing.git testing git clone https://github.com/apache/parquet-testing.git parquet-testing - python3 -m venv venv - source venv/bin/activate - python3 -m pip install -U pip - python3 -m pip install -r requirements-310.txt - maturin develop + python3 -m venv .venv + if [ -x ".venv/bin/python" ]; then + VENV_PYTHON=".venv/bin/python" + elif [ -x ".venv/Scripts/python.exe" ]; then + VENV_PYTHON=".venv/Scripts/python.exe" + elif [ -x ".venv/Scripts/python" ]; then + VENV_PYTHON=".venv/Scripts/python" + else + echo "Unable to find python executable in virtual environment" + exit 1 + fi + + "$VENV_PYTHON" -m pip install -U pip + "$VENV_PYTHON" -m pip install -U maturin + "$VENV_PYTHON" -m maturin develop #TODO: we should really run tests here as well #python3 -m pytest diff --git a/dev/rust_lint.sh b/dev/rust_lint.sh index b1285cbc3..eeb9e2302 100755 --- a/dev/rust_lint.sh +++ b/dev/rust_lint.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file diff --git a/docs/.gitignore b/docs/.gitignore new file mode 100644 index 000000000..6e8a53b6f --- /dev/null +++ b/docs/.gitignore @@ -0,0 +1,4 @@ +pokemon.csv +yellow_trip_data.parquet +yellow_tripdata_2021-01.parquet + diff --git a/docs/Makefile b/docs/Makefile index e65c8e250..49ebae372 100644 --- a/docs/Makefile +++ b/docs/Makefile @@ -35,4 +35,4 @@ help: # Catch-all target: route all unknown targets to Sphinx using the new # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). %: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) \ No newline at end of file + @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) --fail-on-warning \ No newline at end of file diff --git a/docs/README.md b/docs/README.md index b4b94120e..502f1c2a1 100644 --- a/docs/README.md +++ b/docs/README.md @@ -26,42 +26,32 @@ when changes are merged to the main branch. ## Dependencies It's recommended to install build dependencies and build the documentation -inside a Python `venv`. +inside a Python `venv` using `uv`. To prepare building the documentation run the following on the root level of the project: -1. Set up virtual environment if it was not already created - ```bash - python3 -m venv venv - ``` -1. Activate virtual environment - ```bash - source venv/bin/activate - ``` -1. Install Datafusion's Python dependencies - ```bash - pip install -r requirements-310.txt - ``` -1. Install documentation dependencies - ```bash - pip install -r docs/requirements.txt - ``` +```bash +# Set up a virtual environment with the documentation dependencies +uv sync --dev --group docs --no-install-package datafusion +``` ## Build & Preview Run the provided script to build the HTML pages. ```bash -cd docs -./build.sh +# Build the repository +uv run --no-project maturin develop --uv +# Build the documentation +uv run --no-project docs/build.sh ``` -The HTML will be generated into a `build` directory. +The HTML will be generated into a `build` directory in `docs`. Preview the site on Linux by running this command. ```bash -firefox build/html/index.html +firefox docs/build/html/index.html ``` ## Release Process @@ -69,7 +59,7 @@ firefox build/html/index.html This documentation is hosted at https://datafusion.apache.org/python When the PR is merged to the `main` branch of the DataFusion -repository, a [github workflow](https://github.com/apache/datafusion-python/blob/main/.github/workflows/docs.yaml) which: +repository, a [github workflow](https://github.com/apache/datafusion-python/blob/main/.github/workflows/build.yml) which: 1. Builds the html content 2. Pushes the html content to the [`asf-site`](https://github.com/apache/datafusion-python/tree/asf-site) branch in this repository. @@ -77,4 +67,4 @@ repository, a [github workflow](https://github.com/apache/datafusion-python/blob The Apache Software Foundation provides https://arrow.apache.org/, which serves content based on the configuration in [.asf.yaml](https://github.com/apache/datafusion-python/blob/main/.asf.yaml), -which specifies the target as https://datafusion.apache.org/python. \ No newline at end of file +which specifies the target as https://datafusion.apache.org/python. diff --git a/docs/build.sh b/docs/build.sh index 7e8bb0b54..f73330323 100755 --- a/docs/build.sh +++ b/docs/build.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file @@ -19,8 +19,23 @@ # set -e + +original_dir=$(pwd) +script_dir=$(dirname "$(realpath "$0")") +cd "$script_dir" || exit + +if [ ! -f pokemon.csv ]; then + curl -O https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv +fi + +if [ ! -f yellow_tripdata_2021-01.parquet ]; then + curl -O https://d37ci6vzurychx.cloudfront.net/trip-data/yellow_tripdata_2021-01.parquet +fi + rm -rf build 2> /dev/null rm -rf temp 2> /dev/null mkdir temp cp -rf source/* temp/ -make SOURCEDIR=`pwd`/temp html \ No newline at end of file +make SOURCEDIR=`pwd`/temp html + +cd "$original_dir" || exit diff --git a/docs/mdbook/src/index.md b/docs/mdbook/src/index.md index 3cd0fec1d..2c1d217f8 100644 --- a/docs/mdbook/src/index.md +++ b/docs/mdbook/src/index.md @@ -18,7 +18,7 @@ DataFusion is a blazing fast query engine that lets you run data analyses quickly and reliably. -DataFusion is written in Rust, but also exposes Python and SQL bindings, so you can easily query data in your langauge of choice. You don't need to know any Rust to be a happy and productive user of DataFusion. +DataFusion is written in Rust, but also exposes Python and SQL bindings, so you can easily query data in your language of choice. You don't need to know any Rust to be a happy and productive user of DataFusion. DataFusion lets you run queries faster than pandas. Let's compare query runtimes for a 5GB CSV file with 100 million rows of data. diff --git a/docs/mdbook/src/installation.md b/docs/mdbook/src/installation.md index ba00c8b80..b29f3b66b 100644 --- a/docs/mdbook/src/installation.md +++ b/docs/mdbook/src/installation.md @@ -18,44 +18,45 @@ DataFusion is easy to install, just like any other Python library. -## Using pip +## Using uv -``` bash -pip install datafusion -``` +If you do not yet have a virtual environment, create one: -## Conda & JupyterLab setup +```bash +uv venv +``` -This section explains how to install DataFusion in a conda environment with other libraries that allow for a nice Jupyter workflow. This setup is completely optional. These steps are only needed if you'd like to run DataFusion in a Jupyter notebook and have an interface like this: +You can add datafusion to your virtual environment with the usual: -![DataFusion in Jupyter](https://github.com/MrPowers/datafusion-book/raw/main/src/images/datafusion-jupyterlab.png) +```bash +uv pip install datafusion +``` -Create a conda environment with DataFusion, Jupyter, and other useful dependencies in the `datafusion-env.yml` file: +Or, to add to a project: +```bash +uv add datafusion ``` -name: datafusion-env -channels: - - conda-forge - - defaults -dependencies: - - python=3.9 - - ipykernel - - nb_conda - - jupyterlab - - jupyterlab_code_formatter - - isort - - black - - pip - - pip: - - datafusion +## Using pip + +``` bash +pip install datafusion ``` -Create the environment with `conda env create -f datafusion-env.yml`. +## uv & JupyterLab setup -Activate the environment with `conda activate datafusion-env`. +This section explains how to install DataFusion in a uv environment with other libraries that allow for a nice Jupyter workflow. This setup is completely optional. These steps are only needed if you'd like to run DataFusion in a Jupyter notebook and have an interface like this: -Run `jupyter lab` or open the [JupyterLab Desktop application](https://github.com/jupyterlab/jupyterlab-desktop) to start running DataFusion in a Jupyter notebook. +![DataFusion in Jupyter](https://github.com/MrPowers/datafusion-book/raw/main/src/images/datafusion-jupyterlab.png) + +Create a virtual environment with DataFusion, Jupyter, and other useful dependencies and start the desktop application. + +```bash +uv venv +uv pip install datafusion jupyterlab jupyterlab_code_formatter +uv run jupyter lab +``` ## Examples diff --git a/docs/source/_static/theme_overrides.css b/docs/source/_static/theme_overrides.css index 1e972cc6f..aaa40fba2 100644 --- a/docs/source/_static/theme_overrides.css +++ b/docs/source/_static/theme_overrides.css @@ -56,7 +56,7 @@ a.navbar-brand img { /* This is the bootstrap CSS style for "table-striped". Since the theme does -not yet provide an easy way to configure this globaly, it easier to simply +not yet provide an easy way to configure this globally, it easier to simply include this snippet here than updating each table in all rst files to add ":class: table-striped" */ diff --git a/docs/source/api.rst b/docs/source/api.rst deleted file mode 100644 index d9f4a09dd..000000000 --- a/docs/source/api.rst +++ /dev/null @@ -1,31 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, -.. software distributed under the License is distributed on an -.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -.. KIND, either express or implied. See the License for the -.. specific language governing permissions and limitations -.. under the License. - -.. _api: - -************* -API Reference -************* - -.. toctree:: - :maxdepth: 2 - - api/dataframe - api/execution_context - api/expression - api/functions - api/object_store diff --git a/docs/source/api/dataframe.rst b/docs/source/api/dataframe.rst deleted file mode 100644 index 0a3c4c8b1..000000000 --- a/docs/source/api/dataframe.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, -.. software distributed under the License is distributed on an -.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -.. KIND, either express or implied. See the License for the -.. specific language governing permissions and limitations -.. under the License. - -.. _api.dataframe: -.. currentmodule:: datafusion - -DataFrame -========= - -.. autosummary:: - :toctree: ../generated/ - - DataFrame diff --git a/docs/source/api/execution_context.rst b/docs/source/api/execution_context.rst deleted file mode 100644 index a3bda76d7..000000000 --- a/docs/source/api/execution_context.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, -.. software distributed under the License is distributed on an -.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -.. KIND, either express or implied. See the License for the -.. specific language governing permissions and limitations -.. under the License. - -.. _api.execution_context: -.. currentmodule:: datafusion - -SessionContext -============== - -.. autosummary:: - :toctree: ../generated/ - - SessionConfig - RuntimeConfig - SessionContext diff --git a/docs/source/api/expression.rst b/docs/source/api/expression.rst deleted file mode 100644 index 30137d135..000000000 --- a/docs/source/api/expression.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, -.. software distributed under the License is distributed on an -.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -.. KIND, either express or implied. See the License for the -.. specific language governing permissions and limitations -.. under the License. - -.. _api.expression: -.. currentmodule:: datafusion - -Expr -========== - -.. autosummary:: - :toctree: ../generated/ - - Expr diff --git a/docs/source/api/functions.rst b/docs/source/api/functions.rst deleted file mode 100644 index 958606df2..000000000 --- a/docs/source/api/functions.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, -.. software distributed under the License is distributed on an -.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -.. KIND, either express or implied. See the License for the -.. specific language governing permissions and limitations -.. under the License. - -.. _api.functions: -.. currentmodule:: datafusion - -Functions -========= - -.. autosummary:: - :toctree: ../generated/ - - functions.functions diff --git a/docs/source/api/object_store.rst b/docs/source/api/object_store.rst deleted file mode 100644 index 8d78f0724..000000000 --- a/docs/source/api/object_store.rst +++ /dev/null @@ -1,27 +0,0 @@ -.. Licensed to the Apache Software Foundation (ASF) under one -.. or more contributor license agreements. See the NOTICE file -.. distributed with this work for additional information -.. regarding copyright ownership. The ASF licenses this file -.. to you under the Apache License, Version 2.0 (the -.. "License"); you may not use this file except in compliance -.. with the License. You may obtain a copy of the License at - -.. http://www.apache.org/licenses/LICENSE-2.0 - -.. Unless required by applicable law or agreed to in writing, -.. software distributed under the License is distributed on an -.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -.. KIND, either express or implied. See the License for the -.. specific language governing permissions and limitations -.. under the License. - -.. _api.object_store: -.. currentmodule:: datafusion.object_store - -ObjectStore -=========== - -.. autosummary:: - :toctree: ../generated/ - - object_store \ No newline at end of file diff --git a/docs/source/conf.py b/docs/source/conf.py index c0da8b2cc..01813b032 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -15,6 +15,8 @@ # specific language governing permissions and limitations # under the License. +"""Documentation generation.""" + # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full @@ -44,15 +46,11 @@ # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ - "sphinx.ext.autodoc", - "sphinx.ext.autosummary", - "sphinx.ext.doctest", - "sphinx.ext.ifconfig", "sphinx.ext.mathjax", - "sphinx.ext.viewcode", "sphinx.ext.napoleon", "myst_parser", "IPython.sphinxext.ipython_directive", + "autoapi.extension", ] source_suffix = { @@ -68,15 +66,47 @@ # This pattern also affects html_static_path and html_extra_path. exclude_patterns = [] -# Show members for classes in .. autosummary -autodoc_default_options = { - "members": None, - "undoc-members": None, - "show-inheritance": None, - "inherited-members": None, -} +autoapi_dirs = ["../../python"] +autoapi_ignore = ["*tests*"] +autoapi_member_order = "groupwise" +suppress_warnings = ["autoapi.python_import_resolution"] +autoapi_python_class_content = "both" +autoapi_keep_files = False # set to True for debugging generated files + + +def autoapi_skip_member_fn(app, what, name, obj, skip, options) -> bool: # noqa: ARG001 + skip_contents = [ + # Re-exports + ("class", "datafusion.DataFrame"), + ("class", "datafusion.SessionContext"), + ("module", "datafusion.common"), + # Duplicate modules (skip module-level docs to avoid duplication) + ("module", "datafusion.col"), + ("module", "datafusion.udf"), + # Deprecated + ("class", "datafusion.substrait.serde"), + ("class", "datafusion.substrait.plan"), + ("class", "datafusion.substrait.producer"), + ("class", "datafusion.substrait.consumer"), + ("method", "datafusion.context.SessionContext.tables"), + ("method", "datafusion.dataframe.DataFrame.unnest_column"), + ] + # Explicitly skip certain members listed above. These are either + # re-exports, duplicate module-level documentation, deprecated + # API surfaces, or private variables that would otherwise appear + # in the generated docs and cause confusing duplication. + # Keeping this explicit list avoids surprising entries in the + # AutoAPI output and gives us a single place to opt-out items + # when we intentionally hide them from the docs. + if (what, name) in skip_contents: + skip = True + + return skip + + +def setup(sphinx) -> None: + sphinx.connect("autoapi-skip-member", autoapi_skip_member_fn) -autosummary_generate = True # -- Options for HTML output ------------------------------------------------- @@ -85,9 +115,7 @@ # html_theme = "pydata_sphinx_theme" -html_theme_options = { - "use_edit_page_button": True, -} +html_theme_options = {"use_edit_page_button": False, "show_toc_level": 2} html_context = { "github_user": "apache", diff --git a/docs/source/contributor-guide/ffi.rst b/docs/source/contributor-guide/ffi.rst new file mode 100644 index 000000000..e0158e0a2 --- /dev/null +++ b/docs/source/contributor-guide/ffi.rst @@ -0,0 +1,278 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +.. _ffi: + +Python Extensions +================= + +The DataFusion in Python project is designed to allow users to extend its functionality in a few core +areas. Ideally many users would like to package their extensions as a Python package and easily +integrate that package with this project. This page serves to describe some of the challenges we face +when doing these integrations and the approach our project uses. + +The Primary Issue +----------------- + +Suppose you wish to use DataFusion and you have a custom data source that can produce tables that +can then be queried against, similar to how you can register a :ref:`CSV ` or +:ref:`Parquet ` file. In DataFusion terminology, you likely want to implement a +:ref:`Custom Table Provider `. In an effort to make your data source +as performant as possible and to utilize the features of DataFusion, you may decide to write +your source in Rust and then expose it through `PyO3 `_ as a Python library. + +At first glance, it may appear the best way to do this is to add the ``datafusion-python`` +crate as a dependency, provide a ``PyTable``, and then to register it with the +``SessionContext``. Unfortunately, this will not work. + +When you produce your code as a Python library and it needs to interact with the DataFusion +library, at the lowest level they communicate through an Application Binary Interface (ABI). +The acronym sounds similar to API (Application Programming Interface), but it is distinctly +different. + +The ABI sets the standard for how these libraries can share data and functions between each +other. One of the key differences between Rust and other programming languages is that Rust +does not have a stable ABI. What this means in practice is that if you compile a Rust library +with one version of the ``rustc`` compiler and I compile another library to interface with it +but I use a different version of the compiler, there is no guarantee the interface will be +the same. + +In practice, this means that a Python library built with ``datafusion-python`` as a Rust +dependency will generally **not** be compatible with the DataFusion Python package, even +if they reference the same version of ``datafusion-python``. If you attempt to do this, it may +work on your local computer if you have built both packages with the same optimizations. +This can sometimes lead to a false expectation that the code will work, but it frequently +breaks the moment you try to use your package against the released packages. + +You can find more information about the Rust ABI in their +`online documentation `_. + +The FFI Approach +---------------- + +Rust supports interacting with other programming languages through it's Foreign Function +Interface (FFI). The advantage of using the FFI is that it enables you to write data structures +and functions that have a stable ABI. The allows you to use Rust code with C, Python, and +other languages. In fact, the `PyO3 `_ library uses the FFI to share data +and functions between Python and Rust. + +The approach we are taking in the DataFusion in Python project is to incrementally expose +more portions of the DataFusion project via FFI interfaces. This allows users to write Rust +code that does **not** require the ``datafusion-python`` crate as a dependency, expose their +code in Python via PyO3, and have it interact with the DataFusion Python package. + +Early adopters of this approach include `delta-rs `_ +who has adapted their Table Provider for use in ```datafusion-python``` with only a few lines +of code. Also, the DataFusion Python project uses the existing definitions from +`Apache Arrow CStream Interface `_ +to support importing **and** exporting tables. Any Python package that supports reading +the Arrow C Stream interface can work with DataFusion Python out of the box! You can read +more about working with Arrow sources in the :ref:`Data Sources ` +page. + +To learn more about the Foreign Function Interface in Rust, the +`Rustonomicon `_ is a good resource. + +Inspiration from Arrow +---------------------- + +DataFusion is built upon `Apache Arrow `_. The canonical Python +Arrow implementation, `pyarrow `_ provides +an excellent way to share Arrow data between Python projects without performing any copy +operations on the data. They do this by using a well defined set of interfaces. You can +find the details about their stream interface +`here `_. The +`Rust Arrow Implementation `_ also supports these +``C`` style definitions via the Foreign Function Interface. + +In addition to using these interfaces to transfer Arrow data between libraries, ``pyarrow`` +goes one step further to make sharing the interfaces easier in Python. They do this +by exposing PyCapsules that contain the expected functionality. + +You can learn more about PyCapsules from the official +`Python online documentation `_. PyCapsules +have excellent support in PyO3 already. The +`PyO3 online documentation `_ is a good source +for more details on using PyCapsules in Rust. + +Two lessons we leverage from the Arrow project in DataFusion Python are: + +- We reuse the existing Arrow FFI functionality wherever possible. +- We expose PyCapsules that contain a FFI stable struct. + +Implementation Details +---------------------- + +The bulk of the code necessary to perform our FFI operations is in the upstream +`DataFusion `_ core repository. You can review the code and +documentation in the `datafusion-ffi`_ crate. + +Our FFI implementation is narrowly focused at sharing data and functions with Rust backed +libraries. This allows us to use the `abi_stable crate `_. +This is an excellent crate that allows for easy conversion between Rust native types +and FFI-safe alternatives. For example, if you needed to pass a ``Vec`` via FFI, +you can simply convert it to a ``RVec`` in an intuitive manner. It also supports +features like ``RResult`` and ``ROption`` that do not have an obvious translation to a +C equivalent. + +The `datafusion-ffi`_ crate has been designed to make it easy to convert from DataFusion +traits into their FFI counterparts. For example, if you have defined a custom +`TableProvider `_ +and you want to create a sharable FFI counterpart, you could write: + +.. code-block:: rust + + let my_provider = MyTableProvider::default(); + let ffi_provider = FFI_TableProvider::new(Arc::new(my_provider), false, None); + +.. _ffi_pyclass_mutability: + +PyO3 class mutability guidelines +-------------------------------- + +PyO3 bindings should present immutable wrappers whenever a struct stores shared or +interior-mutable state. In practice this means that any ``#[pyclass]`` containing an +``Arc>`` or similar synchronized primitive must opt into ``#[pyclass(frozen)]`` +unless there is a compelling reason not to. + +The :mod:`datafusion` configuration helpers illustrate the preferred pattern. The +``PyConfig`` class in :file:`src/config.rs` stores an ``Arc>`` and is +explicitly frozen so callers interact with configuration state through provided methods +instead of mutating the container directly: + +.. code-block:: rust + + #[pyclass(from_py_object, name = "Config", module = "datafusion", subclass, frozen)] + #[derive(Clone)] + pub(crate) struct PyConfig { + config: Arc>, + } + +The same approach applies to execution contexts. ``PySessionContext`` in +:file:`src/context.rs` stays frozen even though it shares mutable state internally via +``SessionContext``. This ensures PyO3 tracks borrows correctly while Python-facing APIs +clone the inner ``SessionContext`` or return new wrappers instead of mutating the +existing instance in place: + +.. code-block:: rust + + #[pyclass(from_py_object, frozen, name = "SessionContext", module = "datafusion", subclass)] + #[derive(Clone)] + pub struct PySessionContext { + pub ctx: SessionContext, + } + +Occasionally a type must remain mutable—for example when PyO3 attribute setters need to +update fields directly. In these rare cases add an inline justification so reviewers and +future contributors understand why ``frozen`` is unsafe to enable. ``DataTypeMap`` in +:file:`src/common/data_type.rs` includes such a comment because PyO3 still needs to track +field updates: + +.. code-block:: rust + + // TODO: This looks like this needs pyo3 tracking so leaving unfrozen for now + #[derive(Debug, Clone)] + #[pyclass(from_py_object, name = "DataTypeMap", module = "datafusion.common", subclass)] + pub struct DataTypeMap { + #[pyo3(get, set)] + pub arrow_type: PyDataType, + #[pyo3(get, set)] + pub python_type: PythonType, + #[pyo3(get, set)] + pub sql_type: SqlType, + } + +When reviewers encounter a mutable ``#[pyclass]`` without a comment, they should request +an explanation or ask that ``frozen`` be added. Keeping these wrappers frozen by default +helps avoid subtle bugs stemming from PyO3's interior mutability tracking. + +If you were interfacing with a library that provided the above ``FFI_TableProvider`` and +you needed to turn it back into an ``TableProvider``, you can turn it into a +``ForeignTableProvider`` with implements the ``TableProvider`` trait. + +.. code-block:: rust + + let foreign_provider: ForeignTableProvider = ffi_provider.into(); + +If you review the code in `datafusion-ffi`_ you will find that each of the traits we share +across the boundary has two portions, one with a ``FFI_`` prefix and one with a ``Foreign`` +prefix. This is used to distinguish which side of the FFI boundary that struct is +designed to be used on. The structures with the ``FFI_`` prefix are to be used on the +**provider** of the structure. In the example we're showing, this means the code that has +written the underlying ``TableProvider`` implementation to access your custom data source. +The structures with the ``Foreign`` prefix are to be used by the receiver. In this case, +it is the ``datafusion-python`` library. + +In order to share these FFI structures, we need to wrap them in some kind of Python object +that can be used to interface from one package to another. As described in the above +section on our inspiration from Arrow, we use ``PyCapsule``. We can create a ``PyCapsule`` +for our provider thusly: + +.. code-block:: rust + + let name = CString::new("datafusion_table_provider")?; + let my_capsule = PyCapsule::new_bound(py, provider, Some(name))?; + +On the receiving side, turn this pycapsule object into the ``FFI_TableProvider``, which +can then be turned into a ``ForeignTableProvider`` the associated code is: + +.. code-block:: rust + + let capsule = capsule.cast::()?; + let data: NonNull = capsule + .pointer_checked(Some(name))? + .cast(); + let codec = unsafe { data.as_ref() }; + +By convention the ``datafusion-python`` library expects a Python object that has a +``TableProvider`` PyCapsule to have this capsule accessible by calling a function named +``__datafusion_table_provider__``. You can see a complete working example of how to +share a ``TableProvider`` from one python library to DataFusion Python in the +`repository examples folder `_. + +This section has been written using ``TableProvider`` as an example. It is the first +extension that has been written using this approach and the most thoroughly implemented. +As we continue to expose more of the DataFusion features, we intend to follow this same +design pattern. + +Alternative Approach +-------------------- + +Suppose you needed to expose some other features of DataFusion and you could not wait +for the upstream repository to implement the FFI approach we describe. In this case +you decide to create your dependency on the ``datafusion-python`` crate instead. + +As we discussed, this is not guaranteed to work across different compiler versions and +optimization levels. If you wish to go down this route, there are two approaches we +have identified you can use. + +#. Re-export all of ``datafusion-python`` yourself with your extensions built in. +#. Carefully synchronize your software releases with the ``datafusion-python`` CI build + system so that your libraries use the exact same compiler, features, and + optimization level. + +We currently do not recommend either of these approaches as they are difficult to +maintain over a long period. Additionally, they require a tight version coupling +between libraries. + +Status of Work +-------------- + +At the time of this writing, the FFI features are under active development. To see +the latest status, we recommend reviewing the code in the `datafusion-ffi`_ crate. + +.. _datafusion-ffi: https://crates.io/crates/datafusion-ffi diff --git a/docs/source/contributor-guide/introduction.rst b/docs/source/contributor-guide/introduction.rst index 6de2b87bc..33c2b274c 100644 --- a/docs/source/contributor-guide/introduction.rst +++ b/docs/source/contributor-guide/introduction.rst @@ -26,25 +26,31 @@ We welcome and encourage contributions of all kinds, such as: In addition to submitting new PRs, we have a healthy tradition of community members reviewing each other’s PRs. Doing so is a great way to help the community as well as get more familiar with Rust and the relevant codebases. +Before opening a pull request that touches PyO3 bindings, please review the +:ref:`PyO3 class mutability guidelines ` so you can flag missing +``#[pyclass(frozen)]`` annotations during development and review. + How to develop -------------- -This assumes that you have rust and cargo installed. We use the workflow recommended by `pyo3 `_ and `maturin `_. +This assumes that you have rust and cargo installed. We use the workflow recommended by +`pyo3 `_ and `maturin `_. We recommend using +`uv `_ for python package management. + +By default `uv` will attempt to build the datafusion python package. For our development we prefer to build manually. This means +that when creating your virtual environment using `uv sync` you need to pass in the additional `--no-install-package datafusion` +and for `uv run` commands the additional parameter `--no-project` Bootstrap: .. code-block:: shell # fetch this repo - git clone git@github.com:apache/arrow-datafusion-python.git - # prepare development environment (used to build wheel / install in development) - python3 -m venv venv - # activate the venv - source venv/bin/activate - # update pip itself if necessary - python -m pip install -U pip - # install dependencies (for Python 3.8+) - python -m pip install -r requirements-310.txt + git clone git@github.com:apache/datafusion-python.git + # create the virtual environment + uv sync --dev --no-install-package datafusion + # activate the environment + source .venv/bin/activate The tests rely on test data in git submodules. @@ -58,8 +64,8 @@ Whenever rust code changes (your changes or via `git pull`): .. code-block:: shell - # make sure you activate the venv using "source venv/bin/activate" first - maturin develop + # make sure you activate the venv using "source .venv/bin/activate" first + maturin develop -uv python -m pytest Running & Installing pre-commit hooks @@ -71,24 +77,78 @@ Our pre-commit hooks can be installed by running :code:`pre-commit install`, whi The pre-commit hooks can also be run adhoc without installing them by simply running :code:`pre-commit run --all-files` +Guidelines for Separating Python and Rust Code +---------------------------------------------- + +Version 40 of ``datafusion-python`` introduced ``python`` wrappers around the ``pyo3`` generated code to vastly improve the user experience. (See the `blog post `_ and `pull request `_ for more details.) + +Mostly, the ``python`` code is limited to pure wrappers with type hints and good docstrings, but there are a few reasons for when the code does more: + +1. Trivial aliases like :py:func:`~datafusion.functions.array_append` and :py:func:`~datafusion.functions.list_append`. +2. Simple type conversion, like from a ``path`` to a ``string`` of the path or from ``number`` to ``lit(number)``. +3. The additional code makes an API **much** more pythonic, like we do for :py:func:`~datafusion.functions.named_struct` (see `source code `_). + Update Dependencies ------------------- -To change test dependencies, change the `requirements.in` and run +To change test dependencies, change the ``pyproject.toml`` and run + +To update dependencies, run + +.. code-block:: shell + + uv sync --dev --no-install-package datafusion + +Improving Build Speed +--------------------- + +The `pyo3 `_ dependency of this project contains a ``build.rs`` file which +can cause it to rebuild frequently. You can prevent this from happening by defining a ``PYO3_CONFIG_FILE`` +environment variable that points to a file with your build configuration. Whenever your build configuration +changes, such as during some major version updates, you will need to regenerate this file. This variable +should point to a fully resolved path on your build machine. + +To generate this file, use the following command: .. code-block:: shell - # install pip-tools (this can be done only once), also consider running in venv - python -m pip install pip-tools - python -m piptools compile --generate-hashes -o requirements-310.txt + PYO3_PRINT_CONFIG=1 cargo build + +This will generate some output that looks like the following. You will want to copy these contents intro +a file. If you place this file in your project directory with filename ``.pyo3_build_config`` it will +be ignored by ``git``. +.. code-block:: -To update dependencies, run with `-U` + implementation=CPython + version=3.9 + shared=true + abi3=true + lib_name=python3.12 + lib_dir=/opt/homebrew/opt/python@3.12/Frameworks/Python.framework/Versions/3.12/lib + executable=/Users/myusername/src/datafusion-python/.venv/bin/python + pointer_width=64 + build_flags= + suppress_build_script_link_lines=false + +Add the environment variable to your system. .. code-block:: shell - python -m piptools compile -U --generate-hashes -o requirements-310.txt + export PYO3_CONFIG_FILE="/Users//myusername/src/datafusion-python/.pyo3_build_config" + +If you are on a Mac and you use VS Code for your IDE, you will want to add these variables +to your settings. You can find the appropriate rust flags by looking in the +``.cargo/config.toml`` file. +.. code-block:: -More details about pip-tools `here `_ + "rust-analyzer.cargo.extraEnv": { + "RUSTFLAGS": "-C link-arg=-undefined -C link-arg=dynamic_lookup", + "PYO3_CONFIG_FILE": "/Users/myusername/src/datafusion-python/.pyo3_build_config" + }, + "rust-analyzer.runnables.extraEnv": { + "RUSTFLAGS": "-C link-arg=-undefined -C link-arg=dynamic_lookup", + "PYO3_CONFIG_FILE": "/Users/myusername/src/personal/datafusion-python/.pyo3_build_config" + } diff --git a/docs/source/images/jupyter_lab_df_view.png b/docs/source/images/jupyter_lab_df_view.png new file mode 100644 index 000000000..9dafb4f61 Binary files /dev/null and b/docs/source/images/jupyter_lab_df_view.png differ diff --git a/docs/source/index.rst b/docs/source/index.rst index 16c88e033..134d41cb6 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -43,27 +43,13 @@ Example .. ipython:: python - import datafusion - from datafusion import col - import pyarrow + from datafusion import SessionContext - # create a context - ctx = datafusion.SessionContext() + ctx = SessionContext() - # create a RecordBatch and a new DataFrame from it - batch = pyarrow.RecordBatch.from_arrays( - [pyarrow.array([1, 2, 3]), pyarrow.array([4, 5, 6])], - names=["a", "b"], - ) - df = ctx.create_dataframe([[batch]], name="batch_array") + df = ctx.read_csv("pokemon.csv") - # create a new statement - df = df.select( - col("a") + col("b"), - col("a") - col("b"), - ) - - df + df.show() .. _toc.links: @@ -85,10 +71,13 @@ Example user-guide/introduction user-guide/basics - user-guide/configuration + user-guide/data-sources + user-guide/dataframe/index user-guide/common-operations/index user-guide/io/index + user-guide/configuration user-guide/sql + user-guide/upgrade-guides .. _toc.contributor_guide: @@ -98,11 +87,10 @@ Example :caption: CONTRIBUTOR GUIDE contributor-guide/introduction + contributor-guide/ffi .. _toc.api: .. toctree:: :hidden: :maxdepth: 1 :caption: API - - api diff --git a/docs/source/user-guide/basics.rst b/docs/source/user-guide/basics.rst index 438b23199..7c6820461 100644 --- a/docs/source/user-guide/basics.rst +++ b/docs/source/user-guide/basics.rst @@ -15,75 +15,84 @@ .. specific language governing permissions and limitations .. under the License. +.. _user_guide_concepts: + Concepts ======== -In this section, we will cover a basic example to introduce a few key concepts. +In this section, we will cover a basic example to introduce a few key concepts. We will use the +2021 Yellow Taxi Trip Records (`download `_), +from the `TLC Trip Record Data `_. -.. code-block:: python +.. ipython:: python - import datafusion - from datafusion import col - import pyarrow + from datafusion import SessionContext, col, lit, functions as f - # create a context - ctx = datafusion.SessionContext() + ctx = SessionContext() - # create a RecordBatch and a new DataFrame from it - batch = pyarrow.RecordBatch.from_arrays( - [pyarrow.array([1, 2, 3]), pyarrow.array([4, 5, 6])], - names=["a", "b"], - ) - df = ctx.create_dataframe([[batch]]) + df = ctx.read_parquet("yellow_tripdata_2021-01.parquet") - # create a new statement df = df.select( - col("a") + col("b"), - col("a") - col("b"), + "trip_distance", + col("total_amount").alias("total"), + (f.round(lit(100.0) * col("tip_amount") / col("total_amount"), lit(1))).alias("tip_percent"), ) - # execute and collect the first (and only) batch - result = df.collect()[0] + df.show() -The first statement group: +Session Context +--------------- + +The first statement group creates a :py:class:`~datafusion.context.SessionContext`. .. code-block:: python # create a context ctx = datafusion.SessionContext() -creates a :code:`SessionContext`, that is, the main interface for executing queries with DataFusion. It maintains the state -of the connection between a user and an instance of the DataFusion engine. Additionally it provides the following functionality: +A Session Context is the main interface for executing queries with DataFusion. It maintains the state +of the connection between a user and an instance of the DataFusion engine. Additionally it provides +the following functionality: -- Create a DataFrame from a CSV or Parquet data source. -- Register a CSV or Parquet data source as a table that can be referenced from a SQL query. -- Register a custom data source that can be referenced from a SQL query. +- Create a DataFrame from a data source. +- Register a data source as a table that can be referenced from a SQL query. - Execute a SQL query +DataFrame +--------- + The second statement group creates a :code:`DataFrame`, .. code-block:: python - # create a RecordBatch and a new DataFrame from it - batch = pyarrow.RecordBatch.from_arrays( - [pyarrow.array([1, 2, 3]), pyarrow.array([4, 5, 6])], - names=["a", "b"], - ) - df = ctx.create_dataframe([[batch]]) + # Create a DataFrame from a file + df = ctx.read_parquet("yellow_tripdata_2021-01.parquet") A DataFrame refers to a (logical) set of rows that share the same column names, similar to a `Pandas DataFrame `_. -DataFrames are typically created by calling a method on :code:`SessionContext`, such as :code:`read_csv`, and can then be modified by -calling the transformation methods, such as :meth:`.DataFrame.filter`, :meth:`.DataFrame.select`, :meth:`.DataFrame.aggregate`, -and :meth:`.DataFrame.limit` to build up a query definition. +DataFrames are typically created by calling a method on :py:class:`~datafusion.context.SessionContext`, such as :code:`read_csv`, and can then be modified by +calling the transformation methods, such as :py:func:`~datafusion.dataframe.DataFrame.filter`, :py:func:`~datafusion.dataframe.DataFrame.select`, :py:func:`~datafusion.dataframe.DataFrame.aggregate`, +and :py:func:`~datafusion.dataframe.DataFrame.limit` to build up a query definition. + +For more details on working with DataFrames, including visualization options and conversion to other formats, see :doc:`dataframe/index`. + +Expressions +----------- -The third statement uses :code:`Expressions` to build up a query definition. +The third statement uses :code:`Expressions` to build up a query definition. You can find +explanations for what the functions below do in the user documentation for +:py:func:`~datafusion.col`, :py:func:`~datafusion.lit`, :py:func:`~datafusion.functions.round`, +and :py:func:`~datafusion.expr.Expr.alias`. .. code-block:: python df = df.select( - col("a") + col("b"), - col("a") - col("b"), + "trip_distance", + col("total_amount").alias("total"), + (f.round(lit(100.0) * col("tip_amount") / col("total_amount"), lit(1))).alias("tip_percent"), ) -Finally the :code:`collect` method converts the logical plan represented by the DataFrame into a physical plan and execute it, -collecting all results into a list of `RecordBatch `_. \ No newline at end of file +Finally the :py:func:`~datafusion.dataframe.DataFrame.show` method converts the logical plan +represented by the DataFrame into a physical plan and execute it, collecting all results and +displaying them to the user. It is important to note that DataFusion performs lazy evaluation +of the DataFrame. Until you call a method such as :py:func:`~datafusion.dataframe.DataFrame.show` +or :py:func:`~datafusion.dataframe.DataFrame.collect`, DataFusion will not perform the query. diff --git a/docs/source/user-guide/common-operations/aggregations.rst b/docs/source/user-guide/common-operations/aggregations.rst index 235d644e6..e458e5fcb 100644 --- a/docs/source/user-guide/common-operations/aggregations.rst +++ b/docs/source/user-guide/common-operations/aggregations.rst @@ -15,46 +15,202 @@ .. specific language governing permissions and limitations .. under the License. +.. _aggregation: + Aggregation ============ -An aggregate or aggregation is a function where the values of multiple rows are processed together to form a single summary value. -For performing an aggregation, DataFusion provides the :meth:`.DataFrame.aggregate` +An aggregate or aggregation is a function where the values of multiple rows are processed together +to form a single summary value. For performing an aggregation, DataFusion provides the +:py:func:`~datafusion.dataframe.DataFrame.aggregate` .. ipython:: python - from datafusion import SessionContext - from datafusion import column, lit - from datafusion import functions as f - import random + from datafusion import SessionContext, col, lit, functions as f ctx = SessionContext() - df = ctx.from_pydict( - { - "a": ["foo", "bar", "foo", "bar", "foo", "bar", "foo", "foo"], - "b": ["one", "one", "two", "three", "two", "two", "one", "three"], - "c": [random.randint(0, 100) for _ in range(8)], - "d": [random.random() for _ in range(8)], - }, - name="foo_bar" - ) + df = ctx.read_csv("pokemon.csv") - col_a = column("a") - col_b = column("b") - col_c = column("c") - col_d = column("d") + col_type_1 = col('"Type 1"') + col_type_2 = col('"Type 2"') + col_speed = col('"Speed"') + col_attack = col('"Attack"') - df.aggregate([], [f.approx_distinct(col_c), f.approx_median(col_d), f.approx_percentile_cont(col_d, lit(0.5))]) + df.aggregate([col_type_1], [ + f.approx_distinct(col_speed).alias("Count"), + f.approx_median(col_speed).alias("Median Speed"), + f.approx_percentile_cont(col_speed, 0.9).alias("90% Speed")]) -When the :code:`group_by` list is empty the aggregation is done over the whole :class:`.DataFrame`. For grouping -the :code:`group_by` list must contain at least one column +When the :code:`group_by` list is empty the aggregation is done over the whole :class:`.DataFrame`. +For grouping the :code:`group_by` list must contain at least one column. .. ipython:: python - df.aggregate([col_a], [f.sum(col_c), f.max(col_d), f.min(col_d)]) + df.aggregate([col_type_1], [ + f.max(col_speed).alias("Max Speed"), + f.avg(col_speed).alias("Avg Speed"), + f.min(col_speed).alias("Min Speed")]) More than one column can be used for grouping .. ipython:: python - df.aggregate([col_a, col_b], [f.sum(col_c), f.max(col_d), f.min(col_d)]) + df.aggregate([col_type_1, col_type_2], [ + f.max(col_speed).alias("Max Speed"), + f.avg(col_speed).alias("Avg Speed"), + f.min(col_speed).alias("Min Speed")]) + + + +Setting Parameters +------------------ + +Each of the built in aggregate functions provides arguments for the parameters that affect their +operation. These can also be overridden using the builder approach to setting any of the following +parameters. When you use the builder, you must call ``build()`` to finish. For example, these two +expressions are equivalent. + +.. ipython:: python + + first_1 = f.first_value(col("a"), order_by=[col("a")]) + first_2 = f.first_value(col("a")).order_by(col("a")).build() + +Ordering +^^^^^^^^ + +You can control the order in which rows are processed by window functions by providing +a list of ``order_by`` functions for the ``order_by`` parameter. In the following example, we +sort the Pokemon by their attack in increasing order and take the first value, which gives us the +Pokemon with the smallest attack value in each ``Type 1``. + +.. ipython:: python + + df.aggregate( + [col('"Type 1"')], + [f.first_value( + col('"Name"'), + order_by=[col('"Attack"').sort(ascending=True)] + ).alias("Smallest Attack") + ]) + +Distinct +^^^^^^^^ + +When you set the parameter ``distinct`` to ``True``, then unique values will only be evaluated one +time each. Suppose we want to create an array of all of the ``Type 2`` for each ``Type 1`` of our +Pokemon set. Since there will be many entries of ``Type 2`` we only one each distinct value. + +.. ipython:: python + + df.aggregate([col_type_1], [f.array_agg(col_type_2, distinct=True).alias("Type 2 List")]) + +In the output of the above we can see that there are some ``Type 1`` for which the ``Type 2`` entry +is ``null``. In reality, we probably want to filter those out. We can do this in two ways. First, +we can filter DataFrame rows that have no ``Type 2``. If we do this, we might have some ``Type 1`` +entries entirely removed. The second is we can use the ``filter`` argument described below. + +.. ipython:: python + + df.filter(col_type_2.is_not_null()).aggregate([col_type_1], [f.array_agg(col_type_2, distinct=True).alias("Type 2 List")]) + + df.aggregate([col_type_1], [f.array_agg(col_type_2, distinct=True, filter=col_type_2.is_not_null()).alias("Type 2 List")]) + +Which approach you take should depend on your use case. + +Null Treatment +^^^^^^^^^^^^^^ + +This option allows you to either respect or ignore null values. + +One common usage for handling nulls is the case where you want to find the first value within a +partition. By setting the null treatment to ignore nulls, we can find the first non-null value +in our partition. + + +.. ipython:: python + + from datafusion.common import NullTreatment + + df.aggregate([col_type_1], [ + f.first_value( + col_type_2, + order_by=[col_attack], + null_treatment=NullTreatment.RESPECT_NULLS + ).alias("Lowest Attack Type 2")]) + + df.aggregate([col_type_1], [ + f.first_value( + col_type_2, + order_by=[col_attack], + null_treatment=NullTreatment.IGNORE_NULLS + ).alias("Lowest Attack Type 2")]) + +Filter +^^^^^^ + +Using the filter option is useful for filtering results to include in the aggregate function. It can +be seen in the example above on how this can be useful to only filter rows evaluated by the +aggregate function without filtering rows from the entire DataFrame. + +Filter takes a single expression. + +Suppose we want to find the speed values for only Pokemon that have low Attack values. + +.. ipython:: python + + df.aggregate([col_type_1], [ + f.avg(col_speed).alias("Avg Speed All"), + f.avg(col_speed, filter=col_attack < lit(50)).alias("Avg Speed Low Attack")]) + + +Aggregate Functions +------------------- + +The available aggregate functions are: + +1. Comparison Functions + - :py:func:`datafusion.functions.min` + - :py:func:`datafusion.functions.max` +2. Math Functions + - :py:func:`datafusion.functions.sum` + - :py:func:`datafusion.functions.avg` + - :py:func:`datafusion.functions.median` +3. Array Functions + - :py:func:`datafusion.functions.array_agg` +4. Logical Functions + - :py:func:`datafusion.functions.bit_and` + - :py:func:`datafusion.functions.bit_or` + - :py:func:`datafusion.functions.bit_xor` + - :py:func:`datafusion.functions.bool_and` + - :py:func:`datafusion.functions.bool_or` +5. Statistical Functions + - :py:func:`datafusion.functions.count` + - :py:func:`datafusion.functions.corr` + - :py:func:`datafusion.functions.covar_samp` + - :py:func:`datafusion.functions.covar_pop` + - :py:func:`datafusion.functions.stddev` + - :py:func:`datafusion.functions.stddev_pop` + - :py:func:`datafusion.functions.var_samp` + - :py:func:`datafusion.functions.var_pop` +6. Linear Regression Functions + - :py:func:`datafusion.functions.regr_count` + - :py:func:`datafusion.functions.regr_slope` + - :py:func:`datafusion.functions.regr_intercept` + - :py:func:`datafusion.functions.regr_r2` + - :py:func:`datafusion.functions.regr_avgx` + - :py:func:`datafusion.functions.regr_avgy` + - :py:func:`datafusion.functions.regr_sxx` + - :py:func:`datafusion.functions.regr_syy` + - :py:func:`datafusion.functions.regr_slope` +7. Positional Functions + - :py:func:`datafusion.functions.first_value` + - :py:func:`datafusion.functions.last_value` + - :py:func:`datafusion.functions.nth_value` +8. String Functions + - :py:func:`datafusion.functions.string_agg` +9. Approximation Functions + - :py:func:`datafusion.functions.approx_distinct` + - :py:func:`datafusion.functions.approx_median` + - :py:func:`datafusion.functions.approx_percentile_cont` + - :py:func:`datafusion.functions.approx_percentile_cont_with_weight` + diff --git a/docs/source/user-guide/common-operations/basic-info.rst b/docs/source/user-guide/common-operations/basic-info.rst index 424e1cc92..d48b49d5c 100644 --- a/docs/source/user-guide/common-operations/basic-info.rst +++ b/docs/source/user-guide/common-operations/basic-info.rst @@ -34,26 +34,26 @@ In this section, you will learn how to display essential details of DataFrames u }) df -Use :meth:`.DataFrame.limit` to view the top rows of the frame: +Use :py:func:`~datafusion.dataframe.DataFrame.limit` to view the top rows of the frame: .. ipython:: python df.limit(2) -Display the columns of the DataFrame using :meth:`.DataFrame.schema`: +Display the columns of the DataFrame using :py:func:`~datafusion.dataframe.DataFrame.schema`: .. ipython:: python df.schema() -The method :meth:`.DataFrame.to_pandas` uses pyarrow to convert to pandas DataFrame, by collecting the batches, +The method :py:func:`~datafusion.dataframe.DataFrame.to_pandas` uses pyarrow to convert to pandas DataFrame, by collecting the batches, passing them to an Arrow table, and then converting them to a pandas DataFrame. .. ipython:: python df.to_pandas() -:meth:`.DataFrame.describe` shows a quick statistic summary of your data: +:py:func:`~datafusion.dataframe.DataFrame.describe` shows a quick statistic summary of your data: .. ipython:: python diff --git a/docs/source/user-guide/common-operations/expressions.rst b/docs/source/user-guide/common-operations/expressions.rst index ebb514f14..7848b4ee7 100644 --- a/docs/source/user-guide/common-operations/expressions.rst +++ b/docs/source/user-guide/common-operations/expressions.rst @@ -15,27 +15,29 @@ .. specific language governing permissions and limitations .. under the License. +.. _expressions: + Expressions =========== In DataFusion an expression is an abstraction that represents a computation. -Expressions are used as the primary inputs and ouputs for most functions within +Expressions are used as the primary inputs and outputs for most functions within DataFusion. As such, expressions can be combined to create expression trees, a concept shared across most compilers and databases. Column ------ -The first expression most new users will interact with is the Column, which is created by calling :func:`col`. -This expression represents a column within a DataFrame. The function :func:`col` takes as in input a string +The first expression most new users will interact with is the Column, which is created by calling :py:func:`~datafusion.col`. +This expression represents a column within a DataFrame. The function :py:func:`~datafusion.col` takes as in input a string and returns an expression as it's output. Literal ------- Literal expressions represent a single value. These are helpful in a wide range of operations where -a specific, known value is of interest. You can create a literal expression using the function :func:`lit`. -The type of the object passed to the :func:`lit` function will be used to convert it to a known data type. +a specific, known value is of interest. You can create a literal expression using the function :py:func:`~datafusion.lit`. +The type of the object passed to the :py:func:`~datafusion.lit` function will be used to convert it to a known data type. In the following example we create expressions for the column named `color` and the literal scalar string `red`. The resultant variable `red_units` is itself also an expression. @@ -58,11 +60,111 @@ examples for the and, or, and not operations. heavy_red_units = (col("color") == lit("red")) & (col("weight") > lit(42)) not_red_units = ~(col("color") == lit("red")) +Arrays +------ + +For columns that contain arrays of values, you can access individual elements of the array by index +using bracket indexing. This is similar to calling the function +:py:func:`datafusion.functions.array_element`, except that array indexing using brackets is 0 based, +similar to Python arrays and ``array_element`` is 1 based indexing to be compatible with other SQL +approaches. + +.. ipython:: python + + from datafusion import SessionContext, col + + ctx = SessionContext() + df = ctx.from_pydict({"a": [[1, 2, 3], [4, 5, 6]]}) + df.select(col("a")[0].alias("a0")) + +.. warning:: + + Indexing an element of an array via ``[]`` starts at index 0 whereas + :py:func:`~datafusion.functions.array_element` starts at index 1. + +Starting in DataFusion 49.0.0 you can also create slices of array elements using +slice syntax from Python. + +.. ipython:: python + + df.select(col("a")[1:3].alias("second_two_elements")) + +To check if an array is empty, you can use the function :py:func:`datafusion.functions.array_empty` or `datafusion.functions.empty`. +This function returns a boolean indicating whether the array is empty. + +.. ipython:: python + + from datafusion import SessionContext, col + from datafusion.functions import array_empty + + ctx = SessionContext() + df = ctx.from_pydict({"a": [[], [1, 2, 3]]}) + df.select(array_empty(col("a")).alias("is_empty")) + +In this example, the `is_empty` column will contain `True` for the first row and `False` for the second row. + +To get the total number of elements in an array, you can use the function :py:func:`datafusion.functions.cardinality`. +This function returns an integer indicating the total number of elements in the array. + +.. ipython:: python + + from datafusion import SessionContext, col + from datafusion.functions import cardinality + + ctx = SessionContext() + df = ctx.from_pydict({"a": [[1, 2, 3], [4, 5, 6]]}) + df.select(cardinality(col("a")).alias("num_elements")) + +In this example, the `num_elements` column will contain `3` for both rows. + +To concatenate two arrays, you can use the function :py:func:`datafusion.functions.array_cat` or :py:func:`datafusion.functions.array_concat`. +These functions return a new array that is the concatenation of the input arrays. + +.. ipython:: python + + from datafusion import SessionContext, col + from datafusion.functions import array_cat, array_concat + + ctx = SessionContext() + df = ctx.from_pydict({"a": [[1, 2, 3]], "b": [[4, 5, 6]]}) + df.select(array_cat(col("a"), col("b")).alias("concatenated_array")) + +In this example, the `concatenated_array` column will contain `[1, 2, 3, 4, 5, 6]`. + +To repeat the elements of an array a specified number of times, you can use the function :py:func:`datafusion.functions.array_repeat`. +This function returns a new array with the elements repeated. + +.. ipython:: python + + from datafusion import SessionContext, col, literal + from datafusion.functions import array_repeat + + ctx = SessionContext() + df = ctx.from_pydict({"a": [[1, 2, 3]]}) + df.select(array_repeat(col("a"), literal(2)).alias("repeated_array")) + +In this example, the `repeated_array` column will contain `[[1, 2, 3], [1, 2, 3]]`. + + +Structs +------- + +Columns that contain struct elements can be accessed using the bracket notation as if they were +Python dictionary style objects. This expects a string key as the parameter passed. + +.. ipython:: python + + ctx = SessionContext() + data = {"a": [{"size": 15, "color": "green"}, {"size": 10, "color": "blue"}]} + df = ctx.from_pydict(data) + df.select(col("a")["size"].alias("a_size")) + + Functions --------- As mentioned before, most functions in DataFusion return an expression at their output. This allows us to create -a wide variety of expressions built up from other expressions. For example, :func:`.alias` is a function that takes +a wide variety of expressions built up from other expressions. For example, :py:func:`~datafusion.expr.Expr.alias` is a function that takes as it input a single expression and returns an expression in which the name of the expression has changed. The following example shows a series of expressions that are built up from functions operating on expressions. diff --git a/docs/source/user-guide/common-operations/functions.rst b/docs/source/user-guide/common-operations/functions.rst index d793314f7..ccb47a4e7 100644 --- a/docs/source/user-guide/common-operations/functions.rst +++ b/docs/source/user-guide/common-operations/functions.rst @@ -19,20 +19,14 @@ Functions ========= DataFusion provides a large number of built-in functions for performing complex queries without requiring user-defined functions. -In here we will cover some of the more popular use cases. If you want to view all the functions go to the :ref:`Functions` API Reference. +In here we will cover some of the more popular use cases. If you want to view all the functions go to the :py:mod:`Functions ` API Reference. We'll use the pokemon dataset in the following examples. .. ipython:: python - import urllib.request from datafusion import SessionContext - urllib.request.urlretrieve( - "https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv", - "pokemon.csv", - ) - ctx = SessionContext() ctx.register_csv("pokemon", "pokemon.csv") df = ctx.table("pokemon") @@ -40,11 +34,11 @@ We'll use the pokemon dataset in the following examples. Mathematical ------------ -DataFusion offers mathematical functions such as :func:`.pow` or :func:`.log` +DataFusion offers mathematical functions such as :py:func:`~datafusion.functions.pow` or :py:func:`~datafusion.functions.log` .. ipython:: python - from datafusion import col, literal + from datafusion import col, literal, string_literal, str_lit from datafusion import functions as f df.select( @@ -55,7 +49,7 @@ DataFusion offers mathematical functions such as :func:`.pow` or :func:`.log` Conditional ----------- -There 3 conditional functions in DataFusion :func:`.coalesce`, :func:`.nullif` and :func:`.case` (not available in Python) +There 3 conditional functions in DataFusion :py:func:`~datafusion.functions.coalesce`, :py:func:`~datafusion.functions.nullif` and :py:func:`~datafusion.functions.case`. .. ipython:: python @@ -66,18 +60,27 @@ There 3 conditional functions in DataFusion :func:`.coalesce`, :func:`.nullif` a Temporal -------- -For selecting the current time use :func:`.now` +For selecting the current time use :py:func:`~datafusion.functions.now` .. ipython:: python df.select(f.now()) -Convert to timestamps using :func:`.to_timestamp` +Convert to timestamps using :py:func:`~datafusion.functions.to_timestamp` .. ipython:: python df.select(f.to_timestamp(col('"Total"')).alias("timestamp")) +Extracting parts of a date using :py:func:`~datafusion.functions.date_part` (alias :py:func:`~datafusion.functions.extract`) + +.. ipython:: python + + df.select( + f.date_part(literal("month"), f.to_timestamp(col('"Total"'))).alias("month"), + f.extract(literal("day"), f.to_timestamp(col('"Total"'))).alias("day") + ) + String ------ @@ -92,7 +95,7 @@ DataFusion offers a range of helpful options. f.left(col('"Name"'), literal(4)).alias("code") ) -This also includes the functions for regular expressions like :func:`.regexp_replace` and :func:`.regexp_match` +This also includes the functions for regular expressions like :py:func:`~datafusion.functions.regexp_replace` and :py:func:`~datafusion.functions.regexp_match` .. ipython:: python @@ -101,11 +104,22 @@ This also includes the functions for regular expressions like :func:`.regexp_rep f.regexp_replace(col('"Name"'), literal("saur"), literal("fleur")).alias("flowers") ) +Casting +------- + +Casting expressions to different data types using :py:func:`~datafusion.functions.arrow_cast` + +.. ipython:: python + + df.select( + f.arrow_cast(col('"Total"'), string_literal("Float64")).alias("total_as_float"), + f.arrow_cast(col('"Total"'), str_lit("Int32")).alias("total_as_int") + ) Other ----- -The function :func:`.in_list` allows to check a column for the presence of multiple values: +The function :py:func:`~datafusion.functions.in_list` allows to check a column for the presence of multiple values: .. ipython:: python @@ -115,3 +129,24 @@ The function :func:`.in_list` allows to check a column for the presence of multi .limit(20) .to_pandas() ) + + +Handling Missing Values +======================= + +DataFusion provides methods to handle missing values in DataFrames: + +fill_null +--------- + +The ``fill_null()`` method replaces NULL values in specified columns with a provided value: + +.. code-block:: python + + # Fill all NULL values with 0 where possible + df = df.fill_null(0) + + # Fill NULL values only in specific string columns + df = df.fill_null("missing", subset=["name", "category"]) + +The fill value will be cast to match each column's type. If casting fails for a column, that column remains unchanged. diff --git a/docs/source/user-guide/common-operations/index.rst b/docs/source/user-guide/common-operations/index.rst index b15b04c62..7abd1f138 100644 --- a/docs/source/user-guide/common-operations/index.rst +++ b/docs/source/user-guide/common-operations/index.rst @@ -18,9 +18,12 @@ Common Operations ================= +The contents of this section are designed to guide a new user through how to use DataFusion. + .. toctree:: :maxdepth: 2 + views basic-info select-and-filter expressions diff --git a/docs/source/user-guide/common-operations/joins.rst b/docs/source/user-guide/common-operations/joins.rst index 128203116..1d9d70385 100644 --- a/docs/source/user-guide/common-operations/joins.rst +++ b/docs/source/user-guide/common-operations/joins.rst @@ -18,7 +18,7 @@ Joins ===== -DataFusion supports the following join variants via the method :meth:`.DataFrame.join` +DataFusion supports the following join variants via the method :py:func:`~datafusion.dataframe.DataFrame.join` - Inner Join - Left Join @@ -56,9 +56,9 @@ will be included in the resulting DataFrame. .. ipython:: python - left.join(right, join_keys=(["customer_id"], ["id"]), how="inner") + left.join(right, left_on="customer_id", right_on="id", how="inner") -The parameter :code:`join_keys` specifies the columns from the left DataFrame and right DataFrame that contains the values +The parameter ``join_keys`` specifies the columns from the left DataFrame and right DataFrame that contains the values that should match. Left Join @@ -70,7 +70,7 @@ values for the corresponding columns. .. ipython:: python - left.join(right, join_keys=(["customer_id"], ["id"]), how="left") + left.join(right, left_on="customer_id", right_on="id", how="left") Full Join --------- @@ -80,7 +80,7 @@ is no match. Unmatched rows will have null values. .. ipython:: python - left.join(right, join_keys=(["customer_id"], ["id"]), how="full") + left.join(right, left_on="customer_id", right_on="id", how="full") Left Semi Join -------------- @@ -90,7 +90,7 @@ omitting duplicates with multiple matches in the right table. .. ipython:: python - left.join(right, join_keys=(["customer_id"], ["id"]), how="semi") + left.join(right, left_on="customer_id", right_on="id", how="semi") Left Anti Join -------------- @@ -101,4 +101,36 @@ the right table. .. ipython:: python - left.join(right, join_keys=(["customer_id"], ["id"]), how="anti") \ No newline at end of file + left.join(right, left_on="customer_id", right_on="id", how="anti") + +Duplicate Keys +-------------- + +It is common to join two DataFrames on a common column name. Starting in +version 51.0.0, ``datafusion-python``` will now coalesce on column with identical names by +default. This reduces problems with ambiguous column selection after joins. +You can disable this feature by setting the parameter ``coalesce_duplicate_keys`` +to ``False``. + +.. ipython:: python + + left = ctx.from_pydict( + { + "id": [1, 2, 3], + "customer": ["Alice", "Bob", "Charlie"], + } + ) + + right = ctx.from_pylist([ + {"id": 1, "name": "CityCabs"}, + {"id": 2, "name": "MetroRide"}, + {"id": 5, "name": "UrbanGo"}, + ]) + + left.join(right, "id", how="inner") + +In contrast to the above example, if we wish to get both columns: + +.. ipython:: python + + left.join(right, "id", how="inner", coalesce_duplicate_keys=False) diff --git a/docs/source/user-guide/common-operations/select-and-filter.rst b/docs/source/user-guide/common-operations/select-and-filter.rst index 8ede230e6..083bcbbd2 100644 --- a/docs/source/user-guide/common-operations/select-and-filter.rst +++ b/docs/source/user-guide/common-operations/select-and-filter.rst @@ -18,25 +18,22 @@ Column Selections ================= -Use :meth:`.DataFrame.select_columns` for basic column selection. +Use :py:func:`~datafusion.dataframe.DataFrame.select` for basic column selection. DataFusion can work with several file types, to start simple we can use a subset of the -`TLC Trip Record Data `_ +`TLC Trip Record Data `_, +which you can download `here `_. .. ipython:: python - - import urllib.request - from datafusion import SessionContext - urllib.request.urlretrieve("https://d37ci6vzurychx.cloudfront.net/trip-data/yellow_tripdata_2021-01.parquet", - "yellow_trip_data.parquet") + from datafusion import SessionContext ctx = SessionContext() - df = ctx.read_parquet("yellow_trip_data.parquet") - df.select_columns("trip_distance", "passenger_count") + df = ctx.read_parquet("yellow_tripdata_2021-01.parquet") + df.select("trip_distance", "passenger_count") -For mathematical or logical operations use :func:`.col` to select columns, and give meaningful names to the resulting -operations using :func:`.alias` +For mathematical or logical operations use :py:func:`~datafusion.col` to select columns, and give meaningful names to the resulting +operations using :py:func:`~datafusion.expr.Expr.alias` .. ipython:: python @@ -48,7 +45,7 @@ operations using :func:`.alias` Please be aware that all identifiers are effectively made lower-case in SQL, so if your file has capital letters (ex: Name) you must put your column name in double quotes or the selection won’t work. As an alternative for simple - column selection use :meth:`.DataFrame.select_columns` without double quotes + column selection use :py:func:`~datafusion.dataframe.DataFrame.select` without double quotes For selecting columns with capital letters use ``'"VendorID"'`` @@ -57,7 +54,7 @@ For selecting columns with capital letters use ``'"VendorID"'`` df.select(col('"VendorID"')) -To combine it with literal values use the :func:`.lit` +To combine it with literal values use the :py:func:`~datafusion.lit` .. ipython:: python diff --git a/docs/source/user-guide/common-operations/udf-and-udfa.rst b/docs/source/user-guide/common-operations/udf-and-udfa.rst index 62d249c7e..f669721a3 100644 --- a/docs/source/user-guide/common-operations/udf-and-udfa.rst +++ b/docs/source/user-guide/common-operations/udf-and-udfa.rst @@ -15,11 +15,24 @@ .. specific language governing permissions and limitations .. under the License. -User Defined Functions +User-Defined Functions ====================== -DataFusion provides powerful expressions and functions, reducing the need for custom Python functions. -However you can still incorporate your own functions, i.e. User-Defined Functions (UDFs), with the :func:`.udf` function. +DataFusion provides powerful expressions and functions, reducing the need for custom Python +functions. However you can still incorporate your own functions, i.e. User-Defined Functions (UDFs). + +Scalar Functions +---------------- + +When writing a user-defined function that can operate on a row by row basis, these are called Scalar +Functions. You can define your own scalar function by calling +:py:func:`~datafusion.user_defined.ScalarUDF.udf` . + +The basic definition of a scalar UDF is a python function that takes one or more +`pyarrow `_ arrays and returns a single array as +output. DataFusion scalar UDFs operate on an entire batch of records at a time, though the +evaluation of those records should be on a row by row basis. In the following example, we compute +if the input array contains null values. .. ipython:: python @@ -35,51 +48,259 @@ However you can still incorporate your own functions, i.e. User-Defined Function ctx = datafusion.SessionContext() batch = pyarrow.RecordBatch.from_arrays( - [pyarrow.array([1, 2, 3]), pyarrow.array([4, 5, 6])], + [pyarrow.array([1, None, 3]), pyarrow.array([4, 5, 6])], names=["a", "b"], ) df = ctx.create_dataframe([[batch]], name="batch_array") - df.select(is_null_arr(col("a"))).to_pandas() + df.select(col("a"), is_null_arr(col("a")).alias("is_null")).show() -Additionally the :func:`.udaf` function allows you to define User-Defined Aggregate Functions (UDAFs) +In the previous example, we used the fact that pyarrow provides a variety of built in array +functions such as ``is_null()``. There are additional pyarrow +`compute functions `_ available. When possible, +it is highly recommended to use these functions because they can perform computations without doing +any copy operations from the original arrays. This leads to greatly improved performance. -.. code-block:: python +If you need to perform an operation in python that is not available with the pyarrow compute +functions, you will need to convert the record batch into python values, perform your operation, +and construct an array. This operation of converting the built in data type of the array into a +python object can be one of the slowest operations in DataFusion, so it should be done sparingly. + +The following example performs the same operation as before with ``is_null`` but demonstrates +converting to Python objects to do the evaluation. + +.. ipython:: python import pyarrow + import datafusion + from datafusion import udf, col + + def is_null(array: pyarrow.Array) -> pyarrow.Array: + return pyarrow.array([value.as_py() is None for value in array]) + + is_null_arr = udf(is_null, [pyarrow.int64()], pyarrow.bool_(), 'stable') + + ctx = datafusion.SessionContext() + + batch = pyarrow.RecordBatch.from_arrays( + [pyarrow.array([1, None, 3]), pyarrow.array([4, 5, 6])], + names=["a", "b"], + ) + df = ctx.create_dataframe([[batch]], name="batch_array") + + df.select(col("a"), is_null_arr(col("a")).alias("is_null")).show() + +In this example we passed the PyArrow ``DataType`` when we defined the function +by calling ``udf()``. If you need additional control, such as specifying +metadata or nullability of the input or output, you can instead specify a +PyArrow ``Field``. + +If you need to write a custom function but do not want to incur the performance +cost of converting to Python objects and back, a more advanced approach is to +write Rust based UDFs and to expose them to Python. There is an example in the +`DataFusion blog `_ +describing how to do this. + +Aggregate Functions +------------------- + +The :py:func:`~datafusion.user_defined.AggregateUDF.udaf` function allows you to define User-Defined +Aggregate Functions (UDAFs). To use this you must implement an +:py:class:`~datafusion.user_defined.Accumulator` that determines how the aggregation is performed. + +When defining a UDAF there are four methods you need to implement. The ``update`` function takes the +array(s) of input and updates the internal state of the accumulator. You should define this function +to have as many input arguments as you will pass when calling the UDAF. Since aggregation may be +split into multiple batches, we must have a method to combine multiple batches. For this, we have +two functions, ``state`` and ``merge``. ``state`` will return an array of scalar values that contain +the current state of a single batch accumulation. Then we must ``merge`` the results of these +different states. Finally ``evaluate`` is the call that will return the final result after the +``merge`` is complete. + +In the following example we want to define a custom aggregate function that will return the +difference between the sum of two columns. The state can be represented by a single value and we can +also see how the inputs to ``update`` and ``merge`` differ. + +.. code-block:: python + + import pyarrow as pa import pyarrow.compute import datafusion from datafusion import col, udaf, Accumulator + from typing import List class MyAccumulator(Accumulator): """ Interface of a user-defined accumulation. """ def __init__(self): - self._sum = pyarrow.scalar(0.0) + self._sum = 0.0 - def update(self, values: pyarrow.Array) -> None: - # not nice since pyarrow scalars can't be summed yet. This breaks on `None` - self._sum = pyarrow.scalar(self._sum.as_py() + pyarrow.compute.sum(values).as_py()) + def update(self, values_a: pa.Array, values_b: pa.Array) -> None: + self._sum = self._sum + pyarrow.compute.sum(values_a).as_py() - pyarrow.compute.sum(values_b).as_py() - def merge(self, states: pyarrow.Array) -> None: - # not nice since pyarrow scalars can't be summed yet. This breaks on `None` - self._sum = pyarrow.scalar(self._sum.as_py() + pyarrow.compute.sum(states).as_py()) + def merge(self, states: list[pa.Array]) -> None: + self._sum = self._sum + pyarrow.compute.sum(states[0]).as_py() - def state(self) -> pyarrow.Array: - return pyarrow.array([self._sum.as_py()]) + def state(self) -> list[pa.Scalar]: + return [pyarrow.scalar(self._sum)] - def evaluate(self) -> pyarrow.Scalar: - return self._sum + def evaluate(self) -> pa.Scalar: + return pyarrow.scalar(self._sum) ctx = datafusion.SessionContext() df = ctx.from_pydict( { - "a": [1, 2, 3], - "b": [4, 5, 6], + "a": [4, 5, 6], + "b": [1, 2, 3], } ) - my_udaf = udaf(MyAccumulator, pyarrow.float64(), pyarrow.float64(), [pyarrow.float64()], 'stable') + my_udaf = udaf(MyAccumulator, [pa.float64(), pa.float64()], pa.float64(), [pa.float64()], 'stable') + + df.aggregate([], [my_udaf(col("a"), col("b")).alias("col_diff")]) + +FAQ +^^^ + +**How do I return a list from a UDAF?** - df.aggregate([],[my_udaf(col("a"))]) +Both the ``evaluate`` and the ``state`` functions expect to return scalar values. +If you wish to return a list array as a scalar value, the best practice is to +wrap the values in a ``pyarrow.Scalar`` object. For example, you can return a +timestamp list with ``pa.scalar([...], type=pa.list_(pa.timestamp("ms")))`` and +register the appropriate return or state types as +``return_type=pa.list_(pa.timestamp("ms"))`` and +``state_type=[pa.list_(pa.timestamp("ms"))]``, respectively. + +As of DataFusion 52.0.0 , you can pass return any Python object, including a +PyArrow array, as the return value(s) for these functions and DataFusion will +attempt to create a scalar type from the value. DataFusion has been tested to +convert PyArrow, nanoarrow, and arro3 objects as well as primitive data types +like integers, strings, and so on. + +Window Functions +---------------- + +To implement a User-Defined Window Function (UDWF) you must call the +:py:func:`~datafusion.user_defined.WindowUDF.udwf` function using a class that implements the abstract +class :py:class:`~datafusion.user_defined.WindowEvaluator`. + +There are three methods of evaluation of UDWFs. + +- ``evaluate`` is the simplest case, where you are given an array and are expected to calculate the + value for a single row of that array. This is the simplest case, but also the least performant. +- ``evaluate_all`` computes the values for all rows for an input array at a single time. +- ``evaluate_all_with_rank`` computes the values for all rows, but you only have the rank + information for the rows. + +Which methods you implement are based upon which of these options are set. + +.. list-table:: + :header-rows: 1 + + * - ``uses_window_frame`` + - ``supports_bounded_execution`` + - ``include_rank`` + - function_to_implement + * - False (default) + - False (default) + - False (default) + - ``evaluate_all`` + * - False + - True + - False + - ``evaluate`` + * - False + - True + - False + - ``evaluate_all_with_rank`` + * - True + - True/False + - True/False + - ``evaluate`` + +UDWF options +^^^^^^^^^^^^ + +When you define your UDWF you can override the functions that return these values. They will +determine which evaluate functions are called. + +- ``uses_window_frame`` is set for functions that compute based on the specified window frame. If + your function depends upon the specified frame, set this to ``True``. +- ``supports_bounded_execution`` specifies if your function can be incrementally computed. +- ``include_rank`` is set to ``True`` for window functions that can be computed only using the rank + information. + + +.. code-block:: python + + import pyarrow as pa + from datafusion import udwf, col, SessionContext + from datafusion.user_defined import WindowEvaluator + + class ExponentialSmooth(WindowEvaluator): + def __init__(self, alpha: float) -> None: + self.alpha = alpha + + def evaluate_all(self, values: list[pa.Array], num_rows: int) -> pa.Array: + results = [] + curr_value = 0.0 + values = values[0] + for idx in range(num_rows): + if idx == 0: + curr_value = values[idx].as_py() + else: + curr_value = values[idx].as_py() * self.alpha + curr_value * ( + 1.0 - self.alpha + ) + results.append(curr_value) + + return pa.array(results) + + exp_smooth = udwf( + ExponentialSmooth(0.9), + pa.float64(), + pa.float64(), + volatility="immutable", + ) + + ctx = SessionContext() + + df = ctx.from_pydict({ + "a": [1.0, 2.1, 2.9, 4.0, 5.1, 6.0, 6.9, 8.0] + }) + + df.select("a", exp_smooth(col("a")).alias("smooth_a")).show() + +Table Functions +--------------- + +User Defined Table Functions are slightly different than the other functions +described here. These functions take any number of `Expr` arguments, but only +literal expressions are supported. Table functions must return a Table +Provider as described in the ref:`_io_custom_table_provider` page. + +Once you have a table function, you can register it with the session context +by using :py:func:`datafusion.context.SessionContext.register_udtf`. + +There are examples of both rust backed and python based table functions in the +examples folder of the repository. If you have a rust backed table function +that you wish to expose via PyO3, you need to expose it as a ``PyCapsule``. + +.. code-block:: rust + + #[pymethods] + impl MyTableFunction { + fn __datafusion_table_function__<'py>( + &self, + py: Python<'py>, + ) -> PyResult> { + let name = cr"datafusion_table_function".into(); + + let func = self.clone(); + let provider = FFI_TableFunction::new(Arc::new(func), None); + + PyCapsule::new(py, provider, Some(name)) + } + } diff --git a/docs/source/user-guide/common-operations/views.rst b/docs/source/user-guide/common-operations/views.rst new file mode 100644 index 000000000..df11e3abe --- /dev/null +++ b/docs/source/user-guide/common-operations/views.rst @@ -0,0 +1,58 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +====================== +Registering Views +====================== + +You can use the context's ``register_view`` method to register a DataFrame as a view + +.. code-block:: python + + from datafusion import SessionContext, col, literal + + # Create a DataFusion context + ctx = SessionContext() + + # Create sample data + data = {"a": [1, 2, 3, 4, 5], "b": [10, 20, 30, 40, 50]} + + # Create a DataFrame from the dictionary + df = ctx.from_pydict(data, "my_table") + + # Filter the DataFrame (for example, keep rows where a > 2) + df_filtered = df.filter(col("a") > literal(2)) + + # Register the dataframe as a view with the context + ctx.register_view("view1", df_filtered) + + # Now run a SQL query against the registered view + df_view = ctx.sql("SELECT * FROM view1") + + # Collect the results + results = df_view.collect() + + # Convert results to a list of dictionaries for display + result_dicts = [batch.to_pydict() for batch in results] + + print(result_dicts) + +This will output: + +.. code-block:: python + + [{'a': [3, 4, 5], 'b': [30, 40, 50]}] diff --git a/docs/source/user-guide/common-operations/windows.rst b/docs/source/user-guide/common-operations/windows.rst index f884c7e0d..c8fdea8f4 100644 --- a/docs/source/user-guide/common-operations/windows.rst +++ b/docs/source/user-guide/common-operations/windows.rst @@ -15,79 +15,202 @@ .. specific language governing permissions and limitations .. under the License. +.. _window_functions: + Window Functions ================ -In this section you will learn about window functions. A window function utilizes values from one or multiple rows to -produce a result for each individual row, unlike an aggregate function that provides a single value for multiple rows. +In this section you will learn about window functions. A window function utilizes values from one or +multiple rows to produce a result for each individual row, unlike an aggregate function that +provides a single value for multiple rows. -The functionality of window functions in DataFusion is supported by the dedicated :func:`.window` function. +The window functions are available in the :py:mod:`~datafusion.functions` module. We'll use the pokemon dataset (from Ritchie Vink) in the following examples. .. ipython:: python - import urllib.request from datafusion import SessionContext - from datafusion import col + from datafusion import col, lit from datafusion import functions as f - urllib.request.urlretrieve( - "https://gist.githubusercontent.com/ritchie46/cac6b337ea52281aa23c049250a4ff03/raw/89a957ff3919d90e6ef2d34235e6bf22304f3366/pokemon.csv", - "pokemon.csv", - ) - ctx = SessionContext() df = ctx.read_csv("pokemon.csv") -Here is an example that shows how to compare each pokemons’s attack power with the average attack power in its :code:`"Type 1"` +Here is an example that shows how you can compare each pokemon's speed to the speed of the +previous row in the DataFrame. + +.. ipython:: python + + df.select( + col('"Name"'), + col('"Speed"'), + f.lag(col('"Speed"')).alias("Previous Speed") + ) + +Setting Parameters +------------------ + + +Ordering +^^^^^^^^ + +You can control the order in which rows are processed by window functions by providing +a list of ``order_by`` functions for the ``order_by`` parameter. .. ipython:: python df.select( col('"Name"'), col('"Attack"'), - f.alias( - f.window("avg", [col('"Attack"')], partition_by=[col('"Type 1"')]), - "Average Attack", + col('"Type 1"'), + f.rank( + partition_by=[col('"Type 1"')], + order_by=[col('"Attack"').sort(ascending=True)], + ).alias("rank"), + ).sort(col('"Type 1"'), col('"Attack"')) + +Partitions +^^^^^^^^^^ + +A window function can take a list of ``partition_by`` columns similar to an +:ref:`Aggregation Function`. This will cause the window values to be evaluated +independently for each of the partitions. In the example above, we found the rank of each +Pokemon per ``Type 1`` partitions. We can see the first couple of each partition if we do +the following: + +.. ipython:: python + + df.select( + col('"Name"'), + col('"Attack"'), + col('"Type 1"'), + f.rank( + partition_by=[col('"Type 1"')], + order_by=[col('"Attack"').sort(ascending=True)], + ).alias("rank"), + ).filter(col("rank") < lit(3)).sort(col('"Type 1"'), col("rank")) + +Window Frame +^^^^^^^^^^^^ + +When using aggregate functions, the Window Frame of defines the rows over which it operates. +If you do not specify a Window Frame, the frame will be set depending on the following +criteria. + +* If an ``order_by`` clause is set, the default window frame is defined as the rows between + unbounded preceding and the current row. +* If an ``order_by`` is not set, the default frame is defined as the rows between unbounded + and unbounded following (the entire partition). + +Window Frames are defined by three parameters: unit type, starting bound, and ending bound. + +The unit types available are: + +* Rows: The starting and ending boundaries are defined by the number of rows relative to the + current row. +* Range: When using Range, the ``order_by`` clause must have exactly one term. The boundaries + are defined bow how close the rows are to the value of the expression in the ``order_by`` + parameter. +* Groups: A "group" is the set of all rows that have equivalent values for all terms in the + ``order_by`` clause. + +In this example we perform a "rolling average" of the speed of the current Pokemon and the +two preceding rows. + +.. ipython:: python + + from datafusion.expr import Window, WindowFrame + + df.select( + col('"Name"'), + col('"Speed"'), + f.avg(col('"Speed"')) + .over(Window(window_frame=WindowFrame("rows", 2, 0), order_by=[col('"Speed"')])) + .alias("Previous Speed"), + ) + +Null Treatment +^^^^^^^^^^^^^^ + +When using aggregate functions as window functions, it is often useful to specify how null values +should be treated. In order to do this you need to use the builder function. In future releases +we expect this to be simplified in the interface. + +One common usage for handling nulls is the case where you want to find the last value up to the +current row. In the following example we demonstrate how setting the null treatment to ignore +nulls will fill in with the value of the most recent non-null row. To do this, we also will set +the window frame so that we only process up to the current row. + +In this example, we filter down to one specific type of Pokemon that does have some entries in +it's ``Type 2`` column that are null. + +.. ipython:: python + + from datafusion.common import NullTreatment + + df.filter(col('"Type 1"') == lit("Bug")).select( + '"Name"', + '"Type 2"', + f.last_value(col('"Type 2"')) + .over( + Window( + window_frame=WindowFrame("rows", None, 0), + order_by=[col('"Speed"')], + null_treatment=NullTreatment.IGNORE_NULLS, + ) + ) + .alias("last_wo_null"), + f.last_value(col('"Type 2"')) + .over( + Window( + window_frame=WindowFrame("rows", None, 0), + order_by=[col('"Speed"')], + null_treatment=NullTreatment.RESPECT_NULLS, + ) ) + .alias("last_with_null"), ) -You can also control the order in which rows are processed by window functions by providing -a list of :func:`.order_by` functions for the :code:`order_by` parameter. +Aggregate Functions +------------------- + +You can use any :ref:`Aggregation Function` as a window function. Currently +aggregate functions must use the deprecated +:py:func:`datafusion.functions.window` API but this should be resolved in +DataFusion 42.0 (`Issue Link `_). Here +is an example that shows how to compare each pokemons’s attack power with the average attack +power in its ``"Type 1"`` using the :py:func:`datafusion.functions.avg` function. .. ipython:: python + :okwarning: df.select( col('"Name"'), col('"Attack"'), - f.alias( - f.window( - "rank", - [], - partition_by=[col('"Type 1"')], - order_by=[f.order_by(col('"Attack"'))], - ), - "rank", - ), + col('"Type 1"'), + f.window("avg", [col('"Attack"')]) + .partition_by(col('"Type 1"')) + .build() + .alias("Average Attack"), ) +Available Functions +------------------- + The possible window functions are: 1. Rank Functions - - rank - - dense_rank - - row_number - - ntile + - :py:func:`datafusion.functions.rank` + - :py:func:`datafusion.functions.dense_rank` + - :py:func:`datafusion.functions.ntile` + - :py:func:`datafusion.functions.row_number` 2. Analytical Functions - - cume_dist - - percent_rank - - lag - - lead - - first_value - - last_value - - nth_value + - :py:func:`datafusion.functions.cume_dist` + - :py:func:`datafusion.functions.percent_rank` + - :py:func:`datafusion.functions.lag` + - :py:func:`datafusion.functions.lead` 3. Aggregate Functions - - All aggregate functions can be used as window functions. + - All :ref:`Aggregation Functions` can be used as window functions. diff --git a/docs/source/user-guide/configuration.rst b/docs/source/user-guide/configuration.rst index 0c1a4818a..f8e613cd4 100644 --- a/docs/source/user-guide/configuration.rst +++ b/docs/source/user-guide/configuration.rst @@ -15,22 +15,24 @@ .. specific language governing permissions and limitations .. under the License. +.. _configuration: + Configuration ============= -Let's look at how we can configure DataFusion. When creating a :code:`SessionContext`, you can pass in -a :code:`SessionConfig` and :code:`RuntimeConfig` object. These two cover a wide range of options. +Let's look at how we can configure DataFusion. When creating a :py:class:`~datafusion.context.SessionContext`, you can pass in +a :py:class:`~datafusion.context.SessionConfig` and :py:class:`~datafusion.context.RuntimeEnvBuilder` object. These two cover a wide range of options. .. code-block:: python - from datafusion import RuntimeConfig, SessionConfig, SessionContext + from datafusion import RuntimeEnvBuilder, SessionConfig, SessionContext # create a session context with default settings ctx = SessionContext() print(ctx) # create a session context with explicit runtime and config settings - runtime = RuntimeConfig().with_disk_manager_os().with_fair_spill_pool(10000000) + runtime = RuntimeEnvBuilder().with_disk_manager_os().with_fair_spill_pool(10000000) config = ( SessionConfig() .with_create_default_catalog_and_schema(True) @@ -46,6 +48,141 @@ a :code:`SessionConfig` and :code:`RuntimeConfig` object. These two cover a wide ctx = SessionContext(config, runtime) print(ctx) +Maximizing CPU Usage +-------------------- + +DataFusion uses partitions to parallelize work. For small queries the +default configuration (number of CPU cores) is often sufficient, but to +fully utilize available hardware you can tune how many partitions are +created and when DataFusion will repartition data automatically. + +Configure a ``SessionContext`` with a higher partition count: + +.. code-block:: python + + from datafusion import SessionConfig, SessionContext + + # allow up to 16 concurrent partitions + config = SessionConfig().with_target_partitions(16) + ctx = SessionContext(config) + +Automatic repartitioning for joins, aggregations, window functions and +other operations can be enabled to increase parallelism: + +.. code-block:: python + + config = ( + SessionConfig() + .with_target_partitions(16) + .with_repartition_joins(True) + .with_repartition_aggregations(True) + .with_repartition_windows(True) + ) + +Manual repartitioning is available on DataFrames when you need precise +control: + +.. code-block:: python + + from datafusion import col + + df = ctx.read_parquet("data.parquet") + + # Evenly divide into 16 partitions + df = df.repartition(16) + + # Or partition by the hash of a column + df = df.repartition_by_hash(col("a"), num=16) + + result = df.collect() + + +Benchmark Example +^^^^^^^^^^^^^^^^^ + +The repository includes a benchmark script that demonstrates how to maximize CPU usage +with DataFusion. The :code:`benchmarks/max_cpu_usage.py` script shows a practical example +of configuring DataFusion for optimal parallelism. + +You can run the benchmark script to see the impact of different configuration settings: + +.. code-block:: bash + + # Run with default settings (uses all CPU cores) + python benchmarks/max_cpu_usage.py + + # Run with specific number of rows and partitions + python benchmarks/max_cpu_usage.py --rows 5000000 --partitions 16 + + # See all available options + python benchmarks/max_cpu_usage.py --help + +Here's an example showing the performance difference between single and multiple partitions: + +.. code-block:: bash + + # Single partition - slower processing + $ python benchmarks/max_cpu_usage.py --rows=10000000 --partitions 1 + Processed 10000000 rows using 1 partitions in 0.107s + + # Multiple partitions - faster processing + $ python benchmarks/max_cpu_usage.py --rows=10000000 --partitions 10 + Processed 10000000 rows using 10 partitions in 0.038s + +This example demonstrates nearly 3x performance improvement (0.107s vs 0.038s) when using +10 partitions instead of 1, showcasing how proper partitioning can significantly improve +CPU utilization and query performance. + +The script demonstrates several key optimization techniques: + +1. **Higher target partition count**: Uses :code:`with_target_partitions()` to set the number of concurrent partitions +2. **Automatic repartitioning**: Enables repartitioning for joins, aggregations, and window functions +3. **Manual repartitioning**: Uses :code:`repartition()` to ensure all partitions are utilized +4. **CPU-intensive operations**: Performs aggregations that can benefit from parallelization + +The benchmark creates synthetic data and measures the time taken to perform a sum aggregation +across the specified number of partitions. This helps you understand how partition configuration +affects performance on your specific hardware. + +Important Considerations +"""""""""""""""""""""""" + +The provided benchmark script demonstrates partitioning concepts using synthetic in-memory data +and simple aggregation operations. While useful for understanding basic configuration principles, +actual performance in production environments may vary significantly based on numerous factors: + +**Data Sources and I/O Characteristics:** + +- **Table providers**: Performance differs greatly between Parquet files, CSV files, databases, and cloud storage +- **Storage type**: Local SSD, network-attached storage, and cloud storage have vastly different characteristics +- **Network latency**: Remote data sources introduce additional latency considerations +- **File sizes and distribution**: Large files may benefit differently from partitioning than many small files + +**Query and Workload Characteristics:** + +- **Operation complexity**: Simple aggregations versus complex joins, window functions, or nested queries +- **Data distribution**: Skewed data may not partition evenly, affecting parallel efficiency +- **Memory usage**: Large datasets may require different memory management strategies +- **Concurrent workloads**: Multiple queries running simultaneously affect resource allocation + +**Hardware and Environment Factors:** + +- **CPU architecture**: Different processors have varying parallel processing capabilities +- **Available memory**: Limited RAM may require different optimization strategies +- **System load**: Other applications competing for resources affect DataFusion performance + +**Recommendations for Production Use:** + +To optimize DataFusion for your specific use case, it is strongly recommended to: + +1. **Create custom benchmarks** using your actual data sources, formats, and query patterns +2. **Test with representative data volumes** that match your production workloads +3. **Measure end-to-end performance** including data loading, processing, and result handling +4. **Evaluate different configuration combinations** for your specific hardware and workload +5. **Monitor resource utilization** (CPU, memory, I/O) to identify bottlenecks in your environment + +This approach will provide more accurate insights into how DataFusion configuration options +will impact your particular applications and infrastructure. -You can read more about available :code:`SessionConfig` options `here `_, -and about :code:`RuntimeConfig` options `here `_. +For more information about available :py:class:`~datafusion.context.SessionConfig` options, see the `rust DataFusion Configuration guide `_, +and about :code:`RuntimeEnvBuilder` options in the rust `online API documentation `_. diff --git a/docs/source/user-guide/data-sources.rst b/docs/source/user-guide/data-sources.rst new file mode 100644 index 000000000..26f1303c4 --- /dev/null +++ b/docs/source/user-guide/data-sources.rst @@ -0,0 +1,274 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +.. _user_guide_data_sources: + +Data Sources +============ + +DataFusion provides a wide variety of ways to get data into a DataFrame to perform operations. + +Local file +---------- + +DataFusion has the ability to read from a variety of popular file formats, such as :ref:`Parquet `, +:ref:`CSV `, :ref:`JSON `, and :ref:`AVRO `. + +.. ipython:: python + + from datafusion import SessionContext + ctx = SessionContext() + df = ctx.read_csv("pokemon.csv") + df.show() + +Create in-memory +---------------- + +Sometimes it can be convenient to create a small DataFrame from a Python list or dictionary object. +To do this in DataFusion, you can use one of the three functions +:py:func:`~datafusion.context.SessionContext.from_pydict`, +:py:func:`~datafusion.context.SessionContext.from_pylist`, or +:py:func:`~datafusion.context.SessionContext.create_dataframe`. + +As their names suggest, ``from_pydict`` and ``from_pylist`` will create DataFrames from Python +dictionary and list objects, respectively. ``create_dataframe`` assumes you will pass in a list +of list of `PyArrow Record Batches `_. + +The following three examples all will create identical DataFrames: + +.. ipython:: python + + import pyarrow as pa + + ctx.from_pylist([ + { "a": 1, "b": 10.0, "c": "alpha" }, + { "a": 2, "b": 20.0, "c": "beta" }, + { "a": 3, "b": 30.0, "c": "gamma" }, + ]).show() + + ctx.from_pydict({ + "a": [1, 2, 3], + "b": [10.0, 20.0, 30.0], + "c": ["alpha", "beta", "gamma"], + }).show() + + batch = pa.RecordBatch.from_arrays( + [ + pa.array([1, 2, 3]), + pa.array([10.0, 20.0, 30.0]), + pa.array(["alpha", "beta", "gamma"]), + ], + names=["a", "b", "c"], + ) + + ctx.create_dataframe([[batch]]).show() + + +Object Store +------------ + +DataFusion has support for multiple storage options in addition to local files. +The example below requires an appropriate S3 account with access credentials. + +Supported Object Stores are + +- :py:class:`~datafusion.object_store.AmazonS3` +- :py:class:`~datafusion.object_store.GoogleCloud` +- :py:class:`~datafusion.object_store.Http` +- :py:class:`~datafusion.object_store.LocalFileSystem` +- :py:class:`~datafusion.object_store.MicrosoftAzure` + +.. code-block:: python + + from datafusion.object_store import AmazonS3 + + region = "us-east-1" + bucket_name = "yellow-trips" + + s3 = AmazonS3( + bucket_name=bucket_name, + region=region, + access_key_id=os.getenv("AWS_ACCESS_KEY_ID"), + secret_access_key=os.getenv("AWS_SECRET_ACCESS_KEY"), + ) + + path = f"s3://{bucket_name}/" + ctx.register_object_store("s3://", s3, None) + + ctx.register_parquet("trips", path) + + ctx.table("trips").show() + +Other DataFrame Libraries +------------------------- + +DataFusion can import DataFrames directly from other libraries, such as +`Polars `_ and `Pandas `_. +Since DataFusion version 42.0.0, any DataFrame library that supports the Arrow FFI PyCapsule +interface can be imported to DataFusion using the +:py:func:`~datafusion.context.SessionContext.from_arrow` function. Older versions of Polars may +not support the arrow interface. In those cases, you can still import via the +:py:func:`~datafusion.context.SessionContext.from_polars` function. + +.. code-block:: python + + import pandas as pd + + data = { "a": [1, 2, 3], "b": [10.0, 20.0, 30.0], "c": ["alpha", "beta", "gamma"] } + pandas_df = pd.DataFrame(data) + + datafusion_df = ctx.from_arrow(pandas_df) + datafusion_df.show() + +.. code-block:: python + + import polars as pl + polars_df = pl.DataFrame(data) + + datafusion_df = ctx.from_arrow(polars_df) + datafusion_df.show() + +Delta Lake +---------- + +DataFusion 43.0.0 and later support the ability to register table providers from sources such +as Delta Lake. This will require a recent version of +`deltalake `_ to provide the required interfaces. + +.. code-block:: python + + from deltalake import DeltaTable + + delta_table = DeltaTable("path_to_table") + ctx.register_table("my_delta_table", delta_table) + df = ctx.table("my_delta_table") + df.show() + +On older versions of ``deltalake`` (prior to 0.22) you can use the +`Arrow DataSet `_ +interface to import to DataFusion, but this does not support features such as filter push down +which can lead to a significant performance difference. + +.. code-block:: python + + from deltalake import DeltaTable + + delta_table = DeltaTable("path_to_table") + ctx.register_dataset("my_delta_table", delta_table.to_pyarrow_dataset()) + df = ctx.table("my_delta_table") + df.show() + +Apache Iceberg +-------------- + +DataFusion 45.0.0 and later support the ability to register Apache Iceberg tables as table providers through the Custom Table Provider interface. + +This requires either the `pyiceberg `__ library (>=0.10.0) or the `pyiceberg-core `__ library (>=0.5.0). + +* The ``pyiceberg-core`` library exposes Iceberg Rust's implementation of the Custom Table Provider interface as python bindings. +* The ``pyiceberg`` library utilizes the ``pyiceberg-core`` python bindings under the hood and provides a native way for Python users to interact with the DataFusion. + +.. code-block:: python + + from datafusion import SessionContext + from pyiceberg.catalog import load_catalog + import pyarrow as pa + + # Load catalog and create/load a table + catalog = load_catalog("catalog", type="in-memory") + catalog.create_namespace_if_not_exists("default") + + # Create some sample data + data = pa.table({"x": [1, 2, 3], "y": [4, 5, 6]}) + iceberg_table = catalog.create_table("default.test", schema=data.schema) + iceberg_table.append(data) + + # Register the table with DataFusion + ctx = SessionContext() + ctx.register_table_provider("test", iceberg_table) + + # Query the table using DataFusion + ctx.table("test").show() + + +Note that the Datafusion integration rely on features from the `Iceberg Rust `_ implementation instead of the `PyIceberg `_ implementation. +Features that are available in PyIceberg but not yet in Iceberg Rust will not be available when using DataFusion. + +Custom Table Provider +--------------------- + +You can implement a custom Data Provider in Rust and expose it to DataFusion through the +the interface as describe in the :ref:`Custom Table Provider ` +section. This is an advanced topic, but a +`user example `_ +is provided in the DataFusion repository. + +Catalog +======= + +A common technique for organizing tables is using a three level hierarchical approach. DataFusion +supports this form of organizing using the :py:class:`~datafusion.catalog.Catalog`, +:py:class:`~datafusion.catalog.Schema`, and :py:class:`~datafusion.catalog.Table`. By default, +a :py:class:`~datafusion.context.SessionContext` comes with a single Catalog and a single Schema +with the names ``datafusion`` and ``default``, respectively. + +The default implementation uses an in-memory approach to the catalog and schema. We have support +for adding additional in-memory catalogs and schemas. This can be done like in the following +example: + +.. code-block:: python + + from datafusion.catalog import Catalog, Schema + + my_catalog = Catalog.memory_catalog() + my_schema = Schema.memory_schema() + + my_catalog.register_schema("my_schema_name", my_schema) + + ctx.register_catalog("my_catalog_name", my_catalog) + +You could then register tables in ``my_schema`` and access them either through the DataFrame +API or via sql commands such as ``"SELECT * from my_catalog_name.my_schema_name.my_table"``. + +User Defined Catalog and Schema +------------------------------- + +If the in-memory catalogs are insufficient for your uses, there are two approaches you can take +to implementing a custom catalog and/or schema. In the below discussion, we describe how to +implement these for a Catalog, but the approach to implementing for a Schema is nearly +identical. + +DataFusion supports Catalogs written in either Rust or Python. If you write a Catalog in Rust, +you will need to export it as a Python library via PyO3. There is a complete example of a +catalog implemented this way in the +`examples folder `_ +of our repository. Writing catalog providers in Rust provides typically can lead to significant +performance improvements over the Python based approach. + +To implement a Catalog in Python, you will need to inherit from the abstract base class +:py:class:`~datafusion.catalog.CatalogProvider`. There are examples in the +`unit tests `_ of +implementing a basic Catalog in Python where we simply keep a dictionary of the +registered Schemas. + +One important note for developers is that when we have a Catalog defined in Python, we have +two different ways of accessing this Catalog. First, we register the catalog with a Rust +wrapper. This allows for any rust based code to call the Python functions as necessary. +Second, if the user access the Catalog via the Python API, we identify this and return back +the original Python object that implements the Catalog. This is an important distinction +for developers because we do *not* return a Python wrapper around the Rust wrapper of the +original Python object. diff --git a/docs/source/user-guide/dataframe/index.rst b/docs/source/user-guide/dataframe/index.rst new file mode 100644 index 000000000..510bcbc68 --- /dev/null +++ b/docs/source/user-guide/dataframe/index.rst @@ -0,0 +1,371 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +DataFrames +========== + +Overview +-------- + +The ``DataFrame`` class is the core abstraction in DataFusion that represents tabular data and operations +on that data. DataFrames provide a flexible API for transforming data through various operations such as +filtering, projection, aggregation, joining, and more. + +A DataFrame represents a logical plan that is lazily evaluated. The actual execution occurs only when +terminal operations like ``collect()``, ``show()``, or ``to_pandas()`` are called. + +Creating DataFrames +------------------- + +DataFrames can be created in several ways: + +* From SQL queries via a ``SessionContext``: + + .. code-block:: python + + from datafusion import SessionContext + + ctx = SessionContext() + df = ctx.sql("SELECT * FROM your_table") + +* From registered tables: + + .. code-block:: python + + df = ctx.table("your_table") + +* From various data sources: + + .. code-block:: python + + # From CSV files (see :ref:`io_csv` for detailed options) + df = ctx.read_csv("path/to/data.csv") + + # From Parquet files (see :ref:`io_parquet` for detailed options) + df = ctx.read_parquet("path/to/data.parquet") + + # From JSON files (see :ref:`io_json` for detailed options) + df = ctx.read_json("path/to/data.json") + + # From Avro files (see :ref:`io_avro` for detailed options) + df = ctx.read_avro("path/to/data.avro") + + # From Pandas DataFrame + import pandas as pd + pandas_df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) + df = ctx.from_pandas(pandas_df) + + # From Arrow data + import pyarrow as pa + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"] + ) + df = ctx.from_arrow(batch) + +For detailed information about reading from different data sources, see the :doc:`I/O Guide <../io/index>`. +For custom data sources, see :ref:`io_custom_table_provider`. + +Common DataFrame Operations +--------------------------- + +DataFusion's DataFrame API offers a wide range of operations: + +.. code-block:: python + + from datafusion import column, literal + + # Select specific columns + df = df.select("col1", "col2") + + # Select with expressions + df = df.select(column("a") + column("b"), column("a") - column("b")) + + # Filter rows (expressions or SQL strings) + df = df.filter(column("age") > literal(25)) + df = df.filter("age > 25") + + # Add computed columns + df = df.with_column("full_name", column("first_name") + literal(" ") + column("last_name")) + + # Multiple column additions + df = df.with_columns( + (column("a") + column("b")).alias("sum"), + (column("a") * column("b")).alias("product") + ) + + # Sort data + df = df.sort(column("age").sort(ascending=False)) + + # Join DataFrames + df = df1.join(df2, on="user_id", how="inner") + + # Aggregate data + from datafusion import functions as f + df = df.aggregate( + [], # Group by columns (empty for global aggregation) + [f.sum(column("amount")).alias("total_amount")] + ) + + # Limit rows + df = df.limit(100) + + # Drop columns + df = df.drop("temporary_column") + +Column Names as Function Arguments +---------------------------------- + +Some ``DataFrame`` methods accept column names when an argument refers to an +existing column. These include: + +* :py:meth:`~datafusion.DataFrame.select` +* :py:meth:`~datafusion.DataFrame.sort` +* :py:meth:`~datafusion.DataFrame.drop` +* :py:meth:`~datafusion.DataFrame.join` (``on`` argument) +* :py:meth:`~datafusion.DataFrame.aggregate` (grouping columns) + +See the full function documentation for details on any specific function. + +Note that :py:meth:`~datafusion.DataFrame.join_on` expects ``col()``/``column()`` expressions rather than plain strings. + +For such methods, you can pass column names directly: + +.. code-block:: python + + from datafusion import col, functions as f + + df.sort('id') + df.aggregate('id', [f.count(col('value'))]) + +The same operation can also be written with explicit column expressions, using either ``col()`` or ``column()``: + +.. code-block:: python + + from datafusion import col, column, functions as f + + df.sort(col('id')) + df.aggregate(column('id'), [f.count(col('value'))]) + +Note that ``column()`` is an alias of ``col()``, so you can use either name; the example above shows both in action. + +Whenever an argument represents an expression—such as in +:py:meth:`~datafusion.DataFrame.filter` or +:py:meth:`~datafusion.DataFrame.with_column`—use ``col()`` to reference +columns. The comparison and arithmetic operators on ``Expr`` will automatically +convert any non-``Expr`` value into a literal expression, so writing + +.. code-block:: python + + from datafusion import col + df.filter(col("age") > 21) + +is equivalent to using ``lit(21)`` explicitly. Use ``lit()`` (also available +as ``literal()``) when you need to construct a literal expression directly. + +Terminal Operations +------------------- + +To materialize the results of your DataFrame operations: + +.. code-block:: python + + # Collect all data as PyArrow RecordBatches + result_batches = df.collect() + + # Convert to various formats + pandas_df = df.to_pandas() # Pandas DataFrame + polars_df = df.to_polars() # Polars DataFrame + arrow_table = df.to_arrow_table() # PyArrow Table + py_dict = df.to_pydict() # Python dictionary + py_list = df.to_pylist() # Python list of dictionaries + + # Display results + df.show() # Print tabular format to console + + # Count rows + count = df.count() + + # Collect a single column of data as a PyArrow Array + arr = df.collect_column("age") + +Zero-copy streaming to Arrow-based Python libraries +--------------------------------------------------- + +DataFusion DataFrames implement the ``__arrow_c_stream__`` protocol, enabling +zero-copy, lazy streaming into Arrow-based Python libraries. With the streaming +protocol, batches are produced on demand. + +.. note:: + + The protocol is implementation-agnostic and works with any Python library + that understands the Arrow C streaming interface (for example, PyArrow + or other Arrow-compatible implementations). The sections below provide a + short PyArrow-specific example and general guidance for other + implementations. + +PyArrow +------- + +.. code-block:: python + + import pyarrow as pa + + # Create a PyArrow RecordBatchReader without materializing all batches + reader = pa.RecordBatchReader.from_stream(df) + for batch in reader: + ... # process each batch as it is produced + +DataFrames are also iterable, yielding :class:`datafusion.RecordBatch` +objects lazily so you can loop over results directly without importing +PyArrow: + +.. code-block:: python + + for batch in df: + ... # each batch is a ``datafusion.RecordBatch`` + +Each batch exposes ``to_pyarrow()``, allowing conversion to a PyArrow +table. ``pa.table(df)`` collects the entire DataFrame eagerly into a +PyArrow table: + +.. code-block:: python + + import pyarrow as pa + table = pa.table(df) + +Asynchronous iteration is supported as well, allowing integration with +``asyncio`` event loops: + +.. code-block:: python + + async for batch in df: + ... # process each batch as it is produced + +To work with the stream directly, use ``execute_stream()``, which returns a +:class:`~datafusion.RecordBatchStream`. + +.. code-block:: python + + stream = df.execute_stream() + for batch in stream: + ... + +Execute as Stream +^^^^^^^^^^^^^^^^^ + +For finer control over streaming execution, use +:py:meth:`~datafusion.DataFrame.execute_stream` to obtain a +:py:class:`datafusion.RecordBatchStream`: + +.. code-block:: python + + stream = df.execute_stream() + for batch in stream: + ... # process each batch as it is produced + +.. tip:: + + To get a PyArrow reader instead, call + + ``pa.RecordBatchReader.from_stream(df)``. + +When partition boundaries are important, +:py:meth:`~datafusion.DataFrame.execute_stream_partitioned` +returns an iterable of :py:class:`datafusion.RecordBatchStream` objects, one per +partition: + +.. code-block:: python + + for stream in df.execute_stream_partitioned(): + for batch in stream: + ... # each stream yields RecordBatches + +To process partitions concurrently, first collect the streams into a list +and then poll each one in a separate ``asyncio`` task: + +.. code-block:: python + + import asyncio + + async def consume(stream): + async for batch in stream: + ... + + streams = list(df.execute_stream_partitioned()) + await asyncio.gather(*(consume(s) for s in streams)) + +See :doc:`../io/arrow` for additional details on the Arrow interface. + +HTML Rendering +-------------- + +When working in Jupyter notebooks or other environments that support HTML rendering, DataFrames will +automatically display as formatted HTML tables. For detailed information about customizing HTML +rendering, formatting options, and advanced styling, see :doc:`rendering`. + +Core Classes +------------ + +**DataFrame** + The main DataFrame class for building and executing queries. + + See: :py:class:`datafusion.DataFrame` + +**SessionContext** + The primary entry point for creating DataFrames from various data sources. + + Key methods for DataFrame creation: + + * :py:meth:`~datafusion.SessionContext.read_csv` - Read CSV files + * :py:meth:`~datafusion.SessionContext.read_parquet` - Read Parquet files + * :py:meth:`~datafusion.SessionContext.read_json` - Read JSON files + * :py:meth:`~datafusion.SessionContext.read_avro` - Read Avro files + * :py:meth:`~datafusion.SessionContext.table` - Access registered tables + * :py:meth:`~datafusion.SessionContext.sql` - Execute SQL queries + * :py:meth:`~datafusion.SessionContext.from_pandas` - Create from Pandas DataFrame + * :py:meth:`~datafusion.SessionContext.from_arrow` - Create from Arrow data + + See: :py:class:`datafusion.SessionContext` + +Expression Classes +------------------ + +**Expr** + Represents expressions that can be used in DataFrame operations. + + See: :py:class:`datafusion.Expr` + +**Functions for creating expressions:** + +* :py:func:`datafusion.column` - Reference a column by name +* :py:func:`datafusion.literal` - Create a literal value expression + +Built-in Functions +------------------ + +DataFusion provides many built-in functions for data manipulation: + +* :py:mod:`datafusion.functions` - Mathematical, string, date/time, and aggregation functions + +For a complete list of available functions, see the :py:mod:`datafusion.functions` module documentation. + + +.. toctree:: + :maxdepth: 1 + + rendering diff --git a/docs/source/user-guide/dataframe/rendering.rst b/docs/source/user-guide/dataframe/rendering.rst new file mode 100644 index 000000000..9dea948bb --- /dev/null +++ b/docs/source/user-guide/dataframe/rendering.rst @@ -0,0 +1,223 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +HTML Rendering in Jupyter +========================= + +When working in Jupyter notebooks or other environments that support rich HTML display, +DataFusion DataFrames automatically render as nicely formatted HTML tables. This functionality +is provided by the ``_repr_html_`` method, which is automatically called by Jupyter to provide +a richer visualization than plain text output. + +Basic HTML Rendering +-------------------- + +In a Jupyter environment, simply displaying a DataFrame object will trigger HTML rendering: + +.. code-block:: python + + # Will display as HTML table in Jupyter + df + + # Explicit display also uses HTML rendering + display(df) + +Customizing HTML Rendering +--------------------------- + +DataFusion provides extensive customization options for HTML table rendering through the +``datafusion.html_formatter`` module. + +Configuring the HTML Formatter +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can customize how DataFrames are rendered by configuring the formatter: + +.. code-block:: python + + from datafusion.html_formatter import configure_formatter + + # Change the default styling + configure_formatter( + max_cell_length=25, # Maximum characters in a cell before truncation + max_width=1000, # Maximum width in pixels + max_height=300, # Maximum height in pixels + max_memory_bytes=2097152, # Maximum memory for rendering (2MB) + min_rows=10, # Minimum number of rows to display + max_rows=10, # Maximum rows to display in __repr__ + enable_cell_expansion=True,# Allow expanding truncated cells + custom_css=None, # Additional custom CSS + show_truncation_message=True, # Show message when data is truncated + style_provider=None, # Custom styling provider + use_shared_styles=True # Share styles across tables + ) + +The formatter settings affect all DataFrames displayed after configuration. + +Custom Style Providers +----------------------- + +For advanced styling needs, you can create a custom style provider: + +.. code-block:: python + + from datafusion.html_formatter import StyleProvider, configure_formatter + + class MyStyleProvider(StyleProvider): + def get_table_styles(self): + return { + "table": "border-collapse: collapse; width: 100%;", + "th": "background-color: #007bff; color: white; padding: 8px; text-align: left;", + "td": "border: 1px solid #ddd; padding: 8px;", + "tr:nth-child(even)": "background-color: #f2f2f2;", + } + + def get_value_styles(self, dtype, value): + """Return custom styles for specific values""" + if dtype == "float" and value < 0: + return "color: red;" + return None + + # Apply the custom style provider + configure_formatter(style_provider=MyStyleProvider()) + +Performance Optimization with Shared Styles +-------------------------------------------- + +The ``use_shared_styles`` parameter (enabled by default) optimizes performance when displaying +multiple DataFrames in notebook environments: + +.. code-block:: python + + from datafusion.html_formatter import StyleProvider, configure_formatter + # Default: Use shared styles (recommended for notebooks) + configure_formatter(use_shared_styles=True) + + # Disable shared styles (each DataFrame includes its own styles) + configure_formatter(use_shared_styles=False) + +When ``use_shared_styles=True``: +- CSS styles and JavaScript are included only once per notebook session +- This reduces HTML output size and prevents style duplication +- Improves rendering performance with many DataFrames +- Applies consistent styling across all DataFrames + +Creating a Custom Formatter +---------------------------- + +For complete control over rendering, you can implement a custom formatter: + +.. code-block:: python + + from datafusion.html_formatter import Formatter, get_formatter + + class MyFormatter(Formatter): + def format_html(self, batches, schema, has_more=False, table_uuid=None): + # Create your custom HTML here + html = "
" + # ... formatting logic ... + html += "
" + return html + + # Set as the global formatter + configure_formatter(formatter_class=MyFormatter) + + # Or use the formatter just for specific operations + formatter = get_formatter() + custom_html = formatter.format_html(batches, schema) + +Managing Formatters +------------------- + +Reset to default formatting: + +.. code-block:: python + + from datafusion.html_formatter import reset_formatter + + # Reset to default settings + reset_formatter() + +Get the current formatter settings: + +.. code-block:: python + + from datafusion.html_formatter import get_formatter + + formatter = get_formatter() + print(formatter.max_rows) + print(formatter.theme) + +Contextual Formatting +---------------------- + +You can also use a context manager to temporarily change formatting settings: + +.. code-block:: python + + from datafusion.html_formatter import formatting_context + + # Default formatting + df.show() + + # Temporarily use different formatting + with formatting_context(max_rows=100, theme="dark"): + df.show() # Will use the temporary settings + + # Back to default formatting + df.show() + +Memory and Display Controls +--------------------------- + +You can control how much data is displayed and how much memory is used for rendering: + +.. code-block:: python + + configure_formatter( + max_memory_bytes=4 * 1024 * 1024, # 4MB maximum memory for display + min_rows=20, # Always show at least 20 rows + max_rows=50 # Show up to 50 rows in output + ) + +These parameters help balance comprehensive data display against performance considerations. + +Best Practices +-------------- + +1. **Global Configuration**: Use ``configure_formatter()`` at the beginning of your notebook to set up consistent formatting for all DataFrames. + +2. **Memory Management**: Set appropriate ``max_memory_bytes`` limits to prevent performance issues with large datasets. + +3. **Shared Styles**: Keep ``use_shared_styles=True`` (default) for better performance in notebooks with multiple DataFrames. + +4. **Reset When Needed**: Call ``reset_formatter()`` when you want to start fresh with default settings. + +5. **Cell Expansion**: Use ``enable_cell_expansion=True`` when cells might contain longer content that users may want to see in full. + +Additional Resources +-------------------- + +* :doc:`../dataframe/index` - Complete guide to using DataFrames +* :doc:`../io/index` - I/O Guide for reading data from various sources +* :doc:`../data-sources` - Comprehensive data sources guide +* :ref:`io_csv` - CSV file reading +* :ref:`io_parquet` - Parquet file reading +* :ref:`io_json` - JSON file reading +* :ref:`io_avro` - Avro file reading +* :ref:`io_custom_table_provider` - Custom table providers +* `API Reference `_ - Full API reference diff --git a/docs/source/user-guide/introduction.rst b/docs/source/user-guide/introduction.rst index 8abb9113e..7b30ef2b2 100644 --- a/docs/source/user-guide/introduction.rst +++ b/docs/source/user-guide/introduction.rst @@ -39,5 +39,39 @@ You can verify the installation by running: import datafusion datafusion.__version__ +In this documentation we will also show some examples for how DataFusion integrates +with Jupyter notebooks. To install and start a Jupyter labs session use +.. code-block:: shell + + pip install jupyterlab + jupyter lab + +To demonstrate working with DataFusion, we need a data source. Later in the tutorial we will show +options for data sources. For our first example, we demonstrate using a Pokemon dataset that you +can download +`here `_. + +With that file in place you can use the following python example to view the DataFrame in +DataFusion. + +.. ipython:: python + + from datafusion import SessionContext + + ctx = SessionContext() + + df = ctx.read_csv("pokemon.csv") + + df.show() + +If you are working in a Jupyter notebook, you can also use the following to give you a table +display that may be easier to read. + +.. code-block:: shell + + display(df) +.. image:: ../images/jupyter_lab_df_view.png + :width: 800 + :alt: Rendered table showing Pokemon DataFrame diff --git a/docs/source/user-guide/io/arrow.rst b/docs/source/user-guide/io/arrow.rst new file mode 100644 index 000000000..9196fcea7 --- /dev/null +++ b/docs/source/user-guide/io/arrow.rst @@ -0,0 +1,75 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +Arrow +===== + +DataFusion implements the +`Apache Arrow PyCapsule interface `_ +for importing and exporting DataFrames with zero copy. With this feature, any Python +project that implements this interface can share data back and forth with DataFusion +with zero copy. + +We can demonstrate using `pyarrow `_. + +Importing to DataFusion +----------------------- + +Here we will create an Arrow table and import it to DataFusion. + +To import an Arrow table, use :py:func:`datafusion.context.SessionContext.from_arrow`. +This will accept any Python object that implements +`__arrow_c_stream__ `_ +or `__arrow_c_array__ `_ +and returns a ``StructArray``. Common pyarrow sources you can use are: + +- `Array `_ (but it must return a Struct Array) +- `Record Batch `_ +- `Record Batch Reader `_ +- `Table `_ + +.. ipython:: python + + from datafusion import SessionContext + import pyarrow as pa + + data = {"a": [1, 2, 3], "b": [4, 5, 6]} + table = pa.Table.from_pydict(data) + + ctx = SessionContext() + df = ctx.from_arrow(table) + df + +Exporting from DataFusion +------------------------- + +DataFusion DataFrames implement ``__arrow_c_stream__`` PyCapsule interface, so any +Python library that accepts these can import a DataFusion DataFrame directly. + +Invoking ``__arrow_c_stream__`` triggers execution of the underlying query, but +batches are yielded incrementally rather than materialized all at once in memory. +Consumers can process the stream as it arrives. The stream executes lazily, +letting downstream readers pull batches on demand. + + +.. ipython:: python + + from datafusion import col, lit + + df = df.select((col("a") * lit(1.5)).alias("c"), lit("df").alias("d")) + pa.table(df) + diff --git a/docs/source/user-guide/io/avro.rst b/docs/source/user-guide/io/avro.rst index 85d546e2a..66398ac7f 100644 --- a/docs/source/user-guide/io/avro.rst +++ b/docs/source/user-guide/io/avro.rst @@ -15,11 +15,13 @@ .. specific language governing permissions and limitations .. under the License. +.. _io_avro: + Avro ==== `Avro `_ is a serialization format for record data. Reading an avro file is very straightforward -with :meth:`.SessionContext.read_avro` +with :py:func:`~datafusion.context.SessionContext.read_avro` .. code-block:: python diff --git a/docs/source/user-guide/io/csv.rst b/docs/source/user-guide/io/csv.rst index 3f95c54a5..9c23c291b 100644 --- a/docs/source/user-guide/io/csv.rst +++ b/docs/source/user-guide/io/csv.rst @@ -15,10 +15,12 @@ .. specific language governing permissions and limitations .. under the License. +.. _io_csv: + CSV === -Reading a csv is very straightforward with :meth:`.SessionContext.read_csv` +Reading a csv is very straightforward with :py:func:`~datafusion.context.SessionContext.read_csv` .. code-block:: python @@ -28,9 +30,31 @@ Reading a csv is very straightforward with :meth:`.SessionContext.read_csv` ctx = SessionContext() df = ctx.read_csv("file.csv") -An alternative is to use :meth:`.SessionContext.register_csv` +An alternative is to use :py:func:`~datafusion.context.SessionContext.register_csv` .. code-block:: python ctx.register_csv("file", "file.csv") - df = ctx.table("file") \ No newline at end of file + df = ctx.table("file") + +If you require additional control over how to read the CSV file, you can use +:py:class:`~datafusion.options.CsvReadOptions` to set a variety of options. + +.. code-block:: python + + from datafusion import CsvReadOptions + options = ( + CsvReadOptions() + .with_has_header(True) # File contains a header row + .with_delimiter(";") # Use ; as the delimiter instead of , + .with_comment("#") # Skip lines starting with # + .with_escape("\\") # Escape character + .with_null_regex(r"^(null|NULL|N/A)$") # Treat these as NULL + .with_truncated_rows(True) # Allow rows to have incomplete columns + .with_file_compression_type("gzip") # Read gzipped CSV + .with_file_extension(".gz") # File extension other than .csv + ) + df = ctx.read_csv("data.csv.gz", options=options) + +Details for all CSV reading options can be found on the +`DataFusion documentation site `_. diff --git a/docs/source/user-guide/io/index.rst b/docs/source/user-guide/io/index.rst index af08240ff..b885cfeda 100644 --- a/docs/source/user-guide/io/index.rst +++ b/docs/source/user-guide/io/index.rst @@ -21,8 +21,9 @@ IO .. toctree:: :maxdepth: 2 + arrow + avro csv - parquet json - avro - + parquet + table_provider diff --git a/docs/source/user-guide/io/json.rst b/docs/source/user-guide/io/json.rst index 1ee065c44..39030db7f 100644 --- a/docs/source/user-guide/io/json.rst +++ b/docs/source/user-guide/io/json.rst @@ -15,10 +15,12 @@ .. specific language governing permissions and limitations .. under the License. +.. _io_json: + JSON ==== `JSON `_ (JavaScript Object Notation) is a lightweight data-interchange format. -When it comes to reading a JSON file, using :meth:`.SessionContext.read_json` is a simple and easy +When it comes to reading a JSON file, using :py:func:`~datafusion.context.SessionContext.read_json` is a simple and easy .. code-block:: python diff --git a/docs/source/user-guide/io/parquet.rst b/docs/source/user-guide/io/parquet.rst index 78bba30c5..c5b9ca3d4 100644 --- a/docs/source/user-guide/io/parquet.rst +++ b/docs/source/user-guide/io/parquet.rst @@ -15,22 +15,23 @@ .. specific language governing permissions and limitations .. under the License. +.. _io_parquet: + Parquet ======= -It is quite simple to read a parquet file using the :meth:`.SessionContext.read_parquet` function. +It is quite simple to read a parquet file using the :py:func:`~datafusion.context.SessionContext.read_parquet` function. .. code-block:: python - from datafusion import SessionContext ctx = SessionContext() df = ctx.read_parquet("file.parquet") -An alternative is to use :meth:`.SessionContext.register_parquet` +An alternative is to use :py:func:`~datafusion.context.SessionContext.register_parquet` .. code-block:: python ctx.register_parquet("file", "file.parquet") - df = ctx.table("file") \ No newline at end of file + df = ctx.table("file") diff --git a/docs/source/user-guide/io/table_provider.rst b/docs/source/user-guide/io/table_provider.rst new file mode 100644 index 000000000..29e5d9880 --- /dev/null +++ b/docs/source/user-guide/io/table_provider.rst @@ -0,0 +1,62 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +.. _io_custom_table_provider: + +Custom Table Provider +===================== + +If you have a custom data source that you want to integrate with DataFusion, you can do so by +implementing the `TableProvider `_ +interface in Rust and then exposing it in Python. To do so, +you must use DataFusion 43.0.0 or later and expose a `FFI_TableProvider `_ +via `PyCapsule `_. + +A complete example can be found in the `examples folder `_. + +.. code-block:: rust + + #[pymethods] + impl MyTableProvider { + + fn __datafusion_table_provider__<'py>( + &self, + py: Python<'py>, + ) -> PyResult> { + let name = cr"datafusion_table_provider".into(); + + let provider = Arc::new(self.clone()); + let provider = FFI_TableProvider::new(provider, false, None); + + PyCapsule::new_bound(py, provider, Some(name.clone())) + } + } + +Once you have this library available, you can construct a +:py:class:`~datafusion.Table` in Python and register it with the +``SessionContext``. + +.. code-block:: python + + from datafusion import SessionContext, Table + + ctx = SessionContext() + provider = MyTableProvider() + + ctx.register_table("capsule_table", provider) + + ctx.table("capsule_table").show() diff --git a/docs/source/user-guide/sql.rst b/docs/source/user-guide/sql.rst index 6fa7f0c6a..b4bfb9611 100644 --- a/docs/source/user-guide/sql.rst +++ b/docs/source/user-guide/sql.rst @@ -23,17 +23,100 @@ DataFusion also offers a SQL API, read the full reference `here `_, +but allow passing named parameters into a SQL query. Consider this simple +example. + +.. ipython:: python + + def show_attacks(ctx: SessionContext, threshold: int) -> None: + ctx.sql( + 'SELECT "Name", "Attack" FROM pokemon WHERE "Attack" > $val', val=threshold + ).show(num=5) + show_attacks(ctx, 75) + +When passing parameters like the example above we convert the Python objects +into their string representation. We also have special case handling +for :py:class:`~datafusion.dataframe.DataFrame` objects, since they cannot simply +be turned into string representations for an SQL query. In these cases we +will register a temporary view in the :py:class:`~datafusion.context.SessionContext` +using a generated table name. + +The formatting for passing string replacement objects is to precede the +variable name with a single ``$``. This works for all dialects in +the SQL parser except ``hive`` and ``mysql``. Since these dialects do not +support named placeholders, we are unable to do this type of replacement. +We recommend either switching to another dialect or using Python +f-string style replacement. + +.. warning:: + + To support DataFrame parameterized queries, your session must support + registration of temporary views. The default + :py:class:`~datafusion.catalog.CatalogProvider` and + :py:class:`~datafusion.catalog.SchemaProvider` do have this capability. + If you have implemented custom providers, it is important that temporary + views do not persist across :py:class:`~datafusion.context.SessionContext` + or you may get unintended consequences. + +The following example shows passing in both a :py:class:`~datafusion.dataframe.DataFrame` +object as well as a Python object to be used in parameterized replacement. + +.. ipython:: python + + def show_column( + ctx: SessionContext, column: str, df: DataFrame, threshold: int + ) -> None: + ctx.sql( + 'SELECT "Name", $col FROM $df WHERE $col > $val', + col=column, + df=df, + val=threshold, + ).show(num=5) + df = ctx.table("pokemon") + show_column(ctx, '"Defense"', df, 75) + +The approach implemented for conversion of variables into a SQL query +relies on string conversion. This has the potential for data loss, +specifically for cases like floating point numbers. If you need to pass +variables into a parameterized query and it is important to maintain the +original value without conversion to a string, then you can use the +optional parameter ``param_values`` to specify these. This parameter +expects a dictionary mapping from the parameter name to a Python +object. Those objects will be cast into a +`PyArrow Scalar Value `_. + +Using ``param_values`` will rely on the SQL dialect you have configured +for your session. This can be set using the :ref:`configuration options ` +of your :py:class:`~datafusion.context.SessionContext`. Similar to how +`prepared statements `_ +work, these parameters are limited to places where you would pass in a +scalar value, such as a comparison. + +.. ipython:: python + + def param_attacks(ctx: SessionContext, threshold: int) -> None: + ctx.sql( + 'SELECT "Name", "Attack" FROM pokemon WHERE "Attack" > $val', + param_values={"val": threshold}, + ).show(num=5) + param_attacks(ctx, 75) diff --git a/docs/source/user-guide/upgrade-guides.rst b/docs/source/user-guide/upgrade-guides.rst new file mode 100644 index 000000000..e3d7c2d87 --- /dev/null +++ b/docs/source/user-guide/upgrade-guides.rst @@ -0,0 +1,117 @@ +.. Licensed to the Apache Software Foundation (ASF) under one +.. or more contributor license agreements. See the NOTICE file +.. distributed with this work for additional information +.. regarding copyright ownership. The ASF licenses this file +.. to you under the Apache License, Version 2.0 (the +.. "License"); you may not use this file except in compliance +.. with the License. You may obtain a copy of the License at + +.. http://www.apache.org/licenses/LICENSE-2.0 + +.. Unless required by applicable law or agreed to in writing, +.. software distributed under the License is distributed on an +.. "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +.. KIND, either express or implied. See the License for the +.. specific language governing permissions and limitations +.. under the License. + +Upgrade Guides +============== + +DataFusion 53.0.0 +----------------- + +This version includes an upgraded version of ``pyo3``, which changed the way to extract an FFI +object. Example: + +Before: + +.. code-block:: rust + + let codec = unsafe { capsule.reference::() }; + +Now: + +.. code-block:: rust + + let data: NonNull = capsule + .pointer_checked(Some(c_str!("datafusion_logical_extension_codec")))? + .cast(); + let codec = unsafe { data.as_ref() }; + +DataFusion 52.0.0 +----------------- + +This version includes a major update to the :ref:`ffi` due to upgrades +to the `Foreign Function Interface `_. +Users who contribute their own ``CatalogProvider``, ``SchemaProvider``, +``TableProvider`` or ``TableFunction`` via FFI must now provide access to a +``LogicalExtensionCodec`` and a ``TaskContextProvider``. The function signatures +for the methods to get these ``PyCapsule`` objects now requires an additional +parameter, which is a Python object that can be used to extract the +``FFI_LogicalExtensionCodec`` that is necessary. + +A complete example can be found in the `FFI example `_. +Your methods need to be updated to take an additional parameter like in this +example. + +.. code-block:: rust + + #[pymethods] + impl MyCatalogProvider { + pub fn __datafusion_catalog_provider__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_catalog_provider".into(); + + let provider = Arc::clone(&self.inner) as Arc; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_CatalogProvider::new_with_ffi_codec(provider, None, codec); + + PyCapsule::new(py, provider, Some(name)) + } + } + +To extract the logical extension codec FFI object from the provided object you +can implement a helper method such as: + +.. code-block:: rust + + pub(crate) fn ffi_logical_codec_from_pycapsule( + obj: Bound, + ) -> PyResult { + let attr_name = "__datafusion_logical_extension_codec__"; + let capsule = if obj.hasattr(attr_name)? { + obj.getattr(attr_name)?.call0()? + } else { + obj + }; + + let capsule = capsule.downcast::()?; + validate_pycapsule(capsule, "datafusion_logical_extension_codec")?; + + let codec = unsafe { capsule.reference::() }; + + Ok(codec.clone()) + } + + +The DataFusion FFI interface updates no longer depend directly on the +``datafusion`` core crate. You can improve your build times and potentially +reduce your library binary size by removing this dependency and instead +using the specific datafusion project crates. + +For example, instead of including expressions like: + +.. code-block:: rust + + use datafusion::catalog::MemTable; + +Instead you can now write: + +.. code-block:: rust + + use datafusion_catalog::MemTable; diff --git a/examples/create-context.py b/examples/create-context.py index 3184d4085..0026d6162 100644 --- a/examples/create-context.py +++ b/examples/create-context.py @@ -15,25 +15,28 @@ # specific language governing permissions and limitations # under the License. -from datafusion import RuntimeConfig, SessionConfig, SessionContext +from datafusion import RuntimeEnvBuilder, SessionConfig, SessionContext # create a session context with default settings ctx = SessionContext() print(ctx) # create a session context with explicit runtime and config settings -runtime = RuntimeConfig().with_disk_manager_os().with_fair_spill_pool(10000000) +runtime = RuntimeEnvBuilder().with_disk_manager_os().with_fair_spill_pool(10000000) config = ( SessionConfig() - .with_create_default_catalog_and_schema(True) + .with_create_default_catalog_and_schema(enabled=True) .with_default_catalog_and_schema("foo", "bar") .with_target_partitions(8) - .with_information_schema(True) - .with_repartition_joins(False) - .with_repartition_aggregations(False) - .with_repartition_windows(False) - .with_parquet_pruning(False) + .with_information_schema(enabled=True) + .with_repartition_joins(enabled=False) + .with_repartition_aggregations(enabled=False) + .with_repartition_windows(enabled=False) + .with_parquet_pruning(enabled=False) .set("datafusion.execution.parquet.pushdown_filters", "true") ) ctx = SessionContext(config, runtime) print(ctx) + +ctx = ctx.enable_url_table() +print(ctx) diff --git a/examples/csv-read-options.py b/examples/csv-read-options.py new file mode 100644 index 000000000..a5952d950 --- /dev/null +++ b/examples/csv-read-options.py @@ -0,0 +1,96 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Example demonstrating CsvReadOptions usage.""" + +from datafusion import CsvReadOptions, SessionContext + +# Create a SessionContext +ctx = SessionContext() + +# Example 1: Using CsvReadOptions with default values +print("Example 1: Default CsvReadOptions") +options = CsvReadOptions() +df = ctx.read_csv("data.csv", options=options) + +# Example 2: Using CsvReadOptions with custom parameters +print("\nExample 2: Custom CsvReadOptions") +options = CsvReadOptions( + has_header=True, + delimiter=",", + quote='"', + schema_infer_max_records=1000, + file_extension=".csv", +) +df = ctx.read_csv("data.csv", options=options) + +# Example 3: Using the builder pattern (recommended for readability) +print("\nExample 3: Builder pattern") +options = ( + CsvReadOptions() + .with_has_header(True) # noqa: FBT003 + .with_delimiter("|") + .with_quote("'") + .with_schema_infer_max_records(500) + .with_truncated_rows(False) # noqa: FBT003 + .with_newlines_in_values(True) # noqa: FBT003 +) +df = ctx.read_csv("data.csv", options=options) + +# Example 4: Advanced options +print("\nExample 4: Advanced options") +options = ( + CsvReadOptions() + .with_has_header(True) # noqa: FBT003 + .with_delimiter(",") + .with_comment("#") # Skip lines starting with # + .with_escape("\\") # Escape character + .with_null_regex(r"^(null|NULL|N/A)$") # Treat these as NULL + .with_truncated_rows(True) # noqa: FBT003 + .with_file_compression_type("gzip") # Read gzipped CSV + .with_file_extension(".gz") +) +df = ctx.read_csv("data.csv.gz", options=options) + +# Example 5: Register CSV table with options +print("\nExample 5: Register CSV table") +options = CsvReadOptions().with_has_header(True).with_delimiter(",") # noqa: FBT003 +ctx.register_csv("my_table", "data.csv", options=options) +df = ctx.sql("SELECT * FROM my_table") + +# Example 6: Backward compatibility (without options) +print("\nExample 6: Backward compatibility") +# Still works the old way! +df = ctx.read_csv("data.csv", has_header=True, delimiter=",") + +print("\nAll examples completed!") +print("\nFor all available options, see the CsvReadOptions documentation:") +print(" - has_header: bool") +print(" - delimiter: str") +print(" - quote: str") +print(" - terminator: str | None") +print(" - escape: str | None") +print(" - comment: str | None") +print(" - newlines_in_values: bool") +print(" - schema: pa.Schema | None") +print(" - schema_infer_max_records: int") +print(" - file_extension: str") +print(" - table_partition_cols: list[tuple[str, pa.DataType]]") +print(" - file_compression_type: str") +print(" - file_sort_order: list[list[SortExpr]]") +print(" - null_regex: str | None") +print(" - truncated_rows: bool") diff --git a/examples/datafusion-ffi-example/.cargo/config.toml b/examples/datafusion-ffi-example/.cargo/config.toml new file mode 100644 index 000000000..af951327f --- /dev/null +++ b/examples/datafusion-ffi-example/.cargo/config.toml @@ -0,0 +1,5 @@ +[target.x86_64-apple-darwin] +rustflags = ["-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup"] + +[target.aarch64-apple-darwin] +rustflags = ["-C", "link-arg=-undefined", "-C", "link-arg=dynamic_lookup"] diff --git a/examples/datafusion-ffi-example/Cargo.lock b/examples/datafusion-ffi-example/Cargo.lock new file mode 100644 index 000000000..ede9b446b --- /dev/null +++ b/examples/datafusion-ffi-example/Cargo.lock @@ -0,0 +1,3127 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 4 + +[[package]] +name = "abi_stable" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69d6512d3eb05ffe5004c59c206de7f99c34951504056ce23fc953842f12c445" +dependencies = [ + "abi_stable_derive", + "abi_stable_shared", + "const_panic", + "core_extensions", + "crossbeam-channel", + "generational-arena", + "libloading", + "lock_api", + "parking_lot", + "paste", + "repr_offset", + "rustc_version", + "serde", + "serde_derive", + "serde_json", +] + +[[package]] +name = "abi_stable_derive" +version = "0.11.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7178468b407a4ee10e881bc7a328a65e739f0863615cca4429d43916b05e898" +dependencies = [ + "abi_stable_shared", + "as_derive_utils", + "core_extensions", + "proc-macro2", + "quote", + "rustc_version", + "syn 1.0.109", + "typed-arena", +] + +[[package]] +name = "abi_stable_shared" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2b5df7688c123e63f4d4d649cba63f2967ba7f7861b1664fca3f77d3dad2b63" +dependencies = [ + "core_extensions", +] + +[[package]] +name = "adler2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" + +[[package]] +name = "ahash" +version = "0.8.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" +dependencies = [ + "cfg-if", + "const-random", + "getrandom 0.3.4", + "once_cell", + "version_check", + "zerocopy", +] + +[[package]] +name = "aho-corasick" +version = "1.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" +dependencies = [ + "memchr", +] + +[[package]] +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" +dependencies = [ + "alloc-no-stdlib", +] + +[[package]] +name = "allocator-api2" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" + +[[package]] +name = "android_system_properties" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "819e7219dbd41043ac279b19830f2efc897156490d7fd6ea916720117ee66311" +dependencies = [ + "libc", +] + +[[package]] +name = "anyhow" +version = "1.0.101" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" + +[[package]] +name = "arc-swap" +version = "1.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9f3647c145568cec02c42054e07bdf9a5a698e15b466fb2341bfc393cd24aa5" +dependencies = [ + "rustversion", +] + +[[package]] +name = "arrow" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "602268ce9f569f282cedb9a9f6bac569b680af47b9b077d515900c03c5d190da" +dependencies = [ + "arrow-arith", + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-csv", + "arrow-data", + "arrow-ipc", + "arrow-json", + "arrow-ord", + "arrow-row", + "arrow-schema", + "arrow-select", + "arrow-string", +] + +[[package]] +name = "arrow-arith" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cd53c6bf277dea91f136ae8e3a5d7041b44b5e489e244e637d00ae302051f56f" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "num-traits", +] + +[[package]] +name = "arrow-array" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e53796e07a6525edaf7dc28b540d477a934aff14af97967ad1d5550878969b9e" +dependencies = [ + "ahash", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "chrono", + "chrono-tz", + "half", + "hashbrown 0.16.1", + "num-complex", + "num-integer", + "num-traits", +] + +[[package]] +name = "arrow-buffer" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2c1a85bb2e94ee10b76531d8bc3ce9b7b4c0d508cabfb17d477f63f2617bd20" +dependencies = [ + "bytes", + "half", + "num-bigint", + "num-traits", +] + +[[package]] +name = "arrow-cast" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89fb245db6b0e234ed8e15b644edb8664673fefe630575e94e62cd9d489a8a26" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-ord", + "arrow-schema", + "arrow-select", + "atoi", + "base64", + "chrono", + "comfy-table", + "half", + "lexical-core", + "num-traits", + "ryu", +] + +[[package]] +name = "arrow-csv" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d374882fb465a194462527c0c15a93aa19a554cf690a6b77a26b2a02539937a7" +dependencies = [ + "arrow-array", + "arrow-cast", + "arrow-schema", + "chrono", + "csv", + "csv-core", + "regex", +] + +[[package]] +name = "arrow-data" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "189d210bc4244c715fa3ed9e6e22864673cccb73d5da28c2723fb2e527329b33" +dependencies = [ + "arrow-buffer", + "arrow-schema", + "half", + "num-integer", + "num-traits", +] + +[[package]] +name = "arrow-ipc" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7968c2e5210c41f4909b2ef76f6e05e172b99021c2def5edf3cc48fdd39d1d6c" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "flatbuffers", + "lz4_flex", +] + +[[package]] +name = "arrow-json" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92111dba5bf900f443488e01f00d8c4ddc2f47f5c50039d18120287b580baa22" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-cast", + "arrow-data", + "arrow-schema", + "chrono", + "half", + "indexmap", + "itoa", + "lexical-core", + "memchr", + "num-traits", + "ryu", + "serde_core", + "serde_json", + "simdutf8", +] + +[[package]] +name = "arrow-ord" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "211136cb253577ee1a6665f741a13136d4e563f64f5093ffd6fb837af90b9495" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", +] + +[[package]] +name = "arrow-row" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e0f20145f9f5ea3fe383e2ba7a7487bf19be36aa9dbf5dd6a1f92f657179663" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "half", +] + +[[package]] +name = "arrow-schema" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b47e0ca91cc438d2c7879fe95e0bca5329fff28649e30a88c6f760b1faeddcb" +dependencies = [ + "bitflags", +] + +[[package]] +name = "arrow-select" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "750a7d1dda177735f5e82a314485b6915c7cccdbb278262ac44090f4aba4a325" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "num-traits", +] + +[[package]] +name = "arrow-string" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e1eab1208bc4fe55d768cdc9b9f3d9df5a794cdb3ee2586bf89f9b30dc31ad8c" +dependencies = [ + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-schema", + "arrow-select", + "memchr", + "num-traits", + "regex", + "regex-syntax", +] + +[[package]] +name = "as_derive_utils" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff3c96645900a44cf11941c111bd08a6573b0e2f9f69bc9264b179d8fae753c4" +dependencies = [ + "core_extensions", + "proc-macro2", + "quote", + "syn 1.0.109", +] + +[[package]] +name = "async-ffi" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f4de21c0feef7e5a556e51af767c953f0501f7f300ba785cc99c47bdc8081a50" +dependencies = [ + "abi_stable", +] + +[[package]] +name = "async-trait" +version = "0.1.89" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "atoi" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f28d99ec8bfea296261ca1af174f24225171fea9664ba9003cbebee704810528" +dependencies = [ + "num-traits", +] + +[[package]] +name = "autocfg" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + +[[package]] +name = "base64" +version = "0.22.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + +[[package]] +name = "bitflags" +version = "2.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" + +[[package]] +name = "brotli" +version = "8.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "5.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + +[[package]] +name = "bumpalo" +version = "3.20.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c81d250916401487680ed13b8b675660281dcfc3ab0121fe44c94bcab9eae2fb" + +[[package]] +name = "byteorder" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + +[[package]] +name = "bytes" +version = "1.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" + +[[package]] +name = "cc" +version = "1.2.56" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" +dependencies = [ + "find-msvc-tools", + "jobserver", + "libc", + "shlex", +] + +[[package]] +name = "cfg-if" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + +[[package]] +name = "chrono" +version = "0.4.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" +dependencies = [ + "iana-time-zone", + "num-traits", + "windows-link", +] + +[[package]] +name = "chrono-tz" +version = "0.10.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3" +dependencies = [ + "chrono", + "phf", +] + +[[package]] +name = "comfy-table" +version = "7.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47" +dependencies = [ + "unicode-segmentation", + "unicode-width", +] + +[[package]] +name = "const-random" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "87e00182fe74b066627d63b85fd550ac2998d4b0bd86bfed477a0ae4c7c71359" +dependencies = [ + "const-random-macro", +] + +[[package]] +name = "const-random-macro" +version = "0.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" +dependencies = [ + "getrandom 0.2.17", + "once_cell", + "tiny-keccak", +] + +[[package]] +name = "const_panic" +version = "0.2.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e262cdaac42494e3ae34c43969f9cdeb7da178bdb4b66fa6a1ea2edb4c8ae652" +dependencies = [ + "typewit", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" + +[[package]] +name = "core_extensions" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42bb5e5d0269fd4f739ea6cedaf29c16d81c27a7ce7582008e90eb50dcd57003" +dependencies = [ + "core_extensions_proc_macros", +] + +[[package]] +name = "core_extensions_proc_macros" +version = "1.5.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "533d38ecd2709b7608fb8e18e4504deb99e9a72879e6aa66373a76d8dc4259ea" + +[[package]] +name = "crossbeam-channel" +version = "0.5.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" +dependencies = [ + "crossbeam-utils", +] + +[[package]] +name = "crossbeam-utils" +version = "0.8.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + +[[package]] +name = "crunchy" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" + +[[package]] +name = "csv" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52cd9d68cf7efc6ddfaaee42e7288d3a99d613d4b50f76ce9827ae0c6e14f938" +dependencies = [ + "csv-core", + "itoa", + "ryu", + "serde_core", +] + +[[package]] +name = "csv-core" +version = "0.1.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "704a3c26996a80471189265814dbc2c257598b96b8a7feae2d31ace646bb9782" +dependencies = [ + "memchr", +] + +[[package]] +name = "dashmap" +version = "6.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5041cc499144891f3790297212f32a74fb938e5136a14943f338ef9e0ae276cf" +dependencies = [ + "cfg-if", + "crossbeam-utils", + "hashbrown 0.14.5", + "lock_api", + "once_cell", + "parking_lot_core", +] + +[[package]] +name = "datafusion-catalog" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "dashmap", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "itertools", + "log", + "object_store", + "parking_lot", + "tokio", +] + +[[package]] +name = "datafusion-catalog-listing" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-expr-adapter", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "futures", + "itertools", + "log", + "object_store", +] + +[[package]] +name = "datafusion-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "ahash", + "arrow", + "arrow-ipc", + "chrono", + "half", + "hashbrown 0.16.1", + "indexmap", + "itertools", + "libc", + "log", + "object_store", + "parquet", + "paste", + "tokio", + "web-time", +] + +[[package]] +name = "datafusion-common-runtime" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "futures", + "log", + "tokio", +] + +[[package]] +name = "datafusion-datasource" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "chrono", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr", + "datafusion-physical-expr-adapter", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "glob", + "itertools", + "log", + "object_store", + "rand", + "tokio", + "url", +] + +[[package]] +name = "datafusion-datasource-arrow" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "arrow-ipc", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "itertools", + "object_store", + "tokio", +] + +[[package]] +name = "datafusion-datasource-csv" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "object_store", + "regex", + "tokio", +] + +[[package]] +name = "datafusion-datasource-json" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-session", + "futures", + "object_store", + "serde_json", + "tokio", + "tokio-stream", +] + +[[package]] +name = "datafusion-datasource-parquet" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "bytes", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-aggregate-common", + "datafusion-physical-expr", + "datafusion-physical-expr-adapter", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-pruning", + "datafusion-session", + "futures", + "itertools", + "log", + "object_store", + "parking_lot", + "parquet", + "tokio", +] + +[[package]] +name = "datafusion-doc" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" + +[[package]] +name = "datafusion-execution" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "arrow-buffer", + "async-trait", + "chrono", + "dashmap", + "datafusion-common", + "datafusion-expr", + "datafusion-physical-expr-common", + "futures", + "log", + "object_store", + "parking_lot", + "rand", + "tempfile", + "url", +] + +[[package]] +name = "datafusion-expr" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "chrono", + "datafusion-common", + "datafusion-doc", + "datafusion-expr-common", + "datafusion-functions-aggregate-common", + "datafusion-functions-window-common", + "datafusion-physical-expr-common", + "indexmap", + "itertools", + "paste", + "serde_json", + "sqlparser", +] + +[[package]] +name = "datafusion-expr-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "indexmap", + "itertools", + "paste", +] + +[[package]] +name = "datafusion-ffi" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "abi_stable", + "arrow", + "arrow-schema", + "async-ffi", + "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-datasource", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-aggregate-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-proto", + "datafusion-proto-common", + "datafusion-session", + "futures", + "log", + "prost", + "semver", + "tokio", +] + +[[package]] +name = "datafusion-ffi-example" +version = "0.2.0" +dependencies = [ + "arrow", + "arrow-array", + "arrow-schema", + "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-expr", + "datafusion-ffi", + "datafusion-functions-aggregate", + "datafusion-functions-window", + "pyo3", + "pyo3-build-config", + "pyo3-log", +] + +[[package]] +name = "datafusion-functions" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "arrow-buffer", + "base64", + "chrono", + "chrono-tz", + "datafusion-common", + "datafusion-doc", + "datafusion-execution", + "datafusion-expr", + "datafusion-expr-common", + "datafusion-macros", + "hex", + "itertools", + "log", + "memchr", + "num-traits", + "rand", + "regex", + "unicode-segmentation", + "uuid", +] + +[[package]] +name = "datafusion-functions-aggregate" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "ahash", + "arrow", + "datafusion-common", + "datafusion-doc", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-aggregate-common", + "datafusion-macros", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "half", + "log", + "num-traits", + "paste", +] + +[[package]] +name = "datafusion-functions-aggregate-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "ahash", + "arrow", + "datafusion-common", + "datafusion-expr-common", + "datafusion-physical-expr-common", +] + +[[package]] +name = "datafusion-functions-table" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "async-trait", + "datafusion-catalog", + "datafusion-common", + "datafusion-expr", + "datafusion-physical-plan", + "parking_lot", + "paste", +] + +[[package]] +name = "datafusion-functions-window" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-doc", + "datafusion-expr", + "datafusion-functions-window-common", + "datafusion-macros", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "log", + "paste", +] + +[[package]] +name = "datafusion-functions-window-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "datafusion-common", + "datafusion-physical-expr-common", +] + +[[package]] +name = "datafusion-macros" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "datafusion-doc", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "datafusion-physical-expr" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "ahash", + "arrow", + "datafusion-common", + "datafusion-expr", + "datafusion-expr-common", + "datafusion-functions-aggregate-common", + "datafusion-physical-expr-common", + "half", + "hashbrown 0.16.1", + "indexmap", + "itertools", + "parking_lot", + "paste", + "petgraph", + "tokio", +] + +[[package]] +name = "datafusion-physical-expr-adapter" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-expr", + "datafusion-functions", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "itertools", +] + +[[package]] +name = "datafusion-physical-expr-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "ahash", + "arrow", + "chrono", + "datafusion-common", + "datafusion-expr-common", + "hashbrown 0.16.1", + "indexmap", + "itertools", + "parking_lot", +] + +[[package]] +name = "datafusion-physical-plan" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "ahash", + "arrow", + "arrow-ord", + "arrow-schema", + "async-trait", + "datafusion-common", + "datafusion-common-runtime", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions", + "datafusion-functions-aggregate-common", + "datafusion-functions-window-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "futures", + "half", + "hashbrown 0.16.1", + "indexmap", + "itertools", + "log", + "num-traits", + "parking_lot", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "datafusion-proto" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "chrono", + "datafusion-catalog", + "datafusion-catalog-listing", + "datafusion-common", + "datafusion-datasource", + "datafusion-datasource-arrow", + "datafusion-datasource-csv", + "datafusion-datasource-json", + "datafusion-datasource-parquet", + "datafusion-execution", + "datafusion-expr", + "datafusion-functions-table", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "datafusion-proto-common", + "object_store", + "prost", + "rand", +] + +[[package]] +name = "datafusion-proto-common" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "prost", +] + +[[package]] +name = "datafusion-pruning" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "arrow", + "datafusion-common", + "datafusion-datasource", + "datafusion-expr-common", + "datafusion-physical-expr", + "datafusion-physical-expr-common", + "datafusion-physical-plan", + "itertools", + "log", +] + +[[package]] +name = "datafusion-session" +version = "53.0.0" +source = "git+https://github.com/apache/datafusion.git?rev=35749607f585b3bf25b66b7d2289c56c18d03e4f#35749607f585b3bf25b66b7d2289c56c18d03e4f" +dependencies = [ + "async-trait", + "datafusion-common", + "datafusion-execution", + "datafusion-expr", + "datafusion-physical-plan", + "parking_lot", +] + +[[package]] +name = "displaydoc" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + +[[package]] +name = "equivalent" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + +[[package]] +name = "errno" +version = "0.3.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "fastrand" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + +[[package]] +name = "find-msvc-tools" +version = "0.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + +[[package]] +name = "fixedbitset" +version = "0.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" + +[[package]] +name = "flatbuffers" +version = "25.12.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35f6839d7b3b98adde531effaf34f0c2badc6f4735d26fe74709d8e513a96ef3" +dependencies = [ + "bitflags", + "rustc_version", +] + +[[package]] +name = "flate2" +version = "1.1.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "843fba2746e448b37e26a819579957415c8cef339bf08564fe8b7ddbd959573c" +dependencies = [ + "miniz_oxide", + "zlib-rs", +] + +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + +[[package]] +name = "foldhash" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "77ce24cb58228fbb8aa041425bb1050850ac19177686ea6e0f41a70416f56fdb" + +[[package]] +name = "form_urlencoded" +version = "1.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" +dependencies = [ + "percent-encoding", +] + +[[package]] +name = "futures" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" +dependencies = [ + "futures-channel", + "futures-core", + "futures-executor", + "futures-io", + "futures-sink", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-channel" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" +dependencies = [ + "futures-core", + "futures-sink", +] + +[[package]] +name = "futures-core" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" + +[[package]] +name = "futures-executor" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" +dependencies = [ + "futures-core", + "futures-task", + "futures-util", +] + +[[package]] +name = "futures-io" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" + +[[package]] +name = "futures-macro" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "futures-sink" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" + +[[package]] +name = "futures-task" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" + +[[package]] +name = "futures-util" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" +dependencies = [ + "futures-channel", + "futures-core", + "futures-io", + "futures-macro", + "futures-sink", + "futures-task", + "memchr", + "pin-project-lite", + "slab", +] + +[[package]] +name = "generational-arena" +version = "0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "877e94aff08e743b651baaea359664321055749b398adff8740a7399af7796e7" +dependencies = [ + "cfg-if", +] + +[[package]] +name = "getrandom" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" +dependencies = [ + "cfg-if", + "libc", + "wasi", +] + +[[package]] +name = "getrandom" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", +] + +[[package]] +name = "getrandom" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "wasip2", + "wasip3", +] + +[[package]] +name = "glob" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" + +[[package]] +name = "half" +version = "2.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" +dependencies = [ + "cfg-if", + "crunchy", + "num-traits", + "zerocopy", +] + +[[package]] +name = "hashbrown" +version = "0.14.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1" + +[[package]] +name = "hashbrown" +version = "0.15.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash 0.1.5", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" +dependencies = [ + "allocator-api2", + "equivalent", + "foldhash 0.2.0", +] + +[[package]] +name = "heck" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + +[[package]] +name = "hex" +version = "0.4.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" + +[[package]] +name = "http" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" +dependencies = [ + "bytes", + "itoa", +] + +[[package]] +name = "humantime" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "iana-time-zone" +version = "0.1.65" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" +dependencies = [ + "android_system_properties", + "core-foundation-sys", + "iana-time-zone-haiku", + "js-sys", + "log", + "wasm-bindgen", + "windows-core", +] + +[[package]] +name = "iana-time-zone-haiku" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f31827a206f56af32e590ba56d5d2d085f558508192593743f16b2306495269f" +dependencies = [ + "cc", +] + +[[package]] +name = "icu_collections" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" +dependencies = [ + "displaydoc", + "potential_utf", + "yoke", + "zerofrom", + "zerovec", +] + +[[package]] +name = "icu_locale_core" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" +dependencies = [ + "displaydoc", + "litemap", + "tinystr", + "writeable", + "zerovec", +] + +[[package]] +name = "icu_normalizer" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" +dependencies = [ + "icu_collections", + "icu_normalizer_data", + "icu_properties", + "icu_provider", + "smallvec", + "zerovec", +] + +[[package]] +name = "icu_normalizer_data" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + +[[package]] +name = "icu_properties" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" +dependencies = [ + "icu_collections", + "icu_locale_core", + "icu_properties_data", + "icu_provider", + "zerotrie", + "zerovec", +] + +[[package]] +name = "icu_properties_data" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + +[[package]] +name = "icu_provider" +version = "2.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" +dependencies = [ + "displaydoc", + "icu_locale_core", + "writeable", + "yoke", + "zerofrom", + "zerotrie", + "zerovec", +] + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" + +[[package]] +name = "idna" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" +dependencies = [ + "idna_adapter", + "smallvec", + "utf8_iter", +] + +[[package]] +name = "idna_adapter" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" +dependencies = [ + "icu_normalizer", + "icu_properties", +] + +[[package]] +name = "indexmap" +version = "2.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" +dependencies = [ + "equivalent", + "hashbrown 0.16.1", + "serde", + "serde_core", +] + +[[package]] +name = "integer-encoding" +version = "3.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" + +[[package]] +name = "itertools" +version = "0.14.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" +dependencies = [ + "either", +] + +[[package]] +name = "itoa" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "jobserver" +version = "0.1.34" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" +dependencies = [ + "getrandom 0.3.4", + "libc", +] + +[[package]] +name = "js-sys" +version = "0.3.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +dependencies = [ + "once_cell", + "wasm-bindgen", +] + +[[package]] +name = "leb128fmt" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" + +[[package]] +name = "lexical-core" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d8d125a277f807e55a77304455eb7b1cb52f2b18c143b60e766c120bd64a594" +dependencies = [ + "lexical-parse-float", + "lexical-parse-integer", + "lexical-util", + "lexical-write-float", + "lexical-write-integer", +] + +[[package]] +name = "lexical-parse-float" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "52a9f232fbd6f550bc0137dcb5f99ab674071ac2d690ac69704593cb4abbea56" +dependencies = [ + "lexical-parse-integer", + "lexical-util", +] + +[[package]] +name = "lexical-parse-integer" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a7a039f8fb9c19c996cd7b2fcce303c1b2874fe1aca544edc85c4a5f8489b34" +dependencies = [ + "lexical-util", +] + +[[package]] +name = "lexical-util" +version = "1.0.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2604dd126bb14f13fb5d1bd6a66155079cb9fa655b37f875b3a742c705dbed17" + +[[package]] +name = "lexical-write-float" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50c438c87c013188d415fbabbb1dceb44249ab81664efbd31b14ae55dabb6361" +dependencies = [ + "lexical-util", + "lexical-write-integer", +] + +[[package]] +name = "lexical-write-integer" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "409851a618475d2d5796377cad353802345cba92c867d9fbcde9cf4eac4e14df" +dependencies = [ + "lexical-util", +] + +[[package]] +name = "libc" +version = "0.2.182" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" + +[[package]] +name = "libloading" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b67380fd3b2fbe7527a606e18729d21c6f3951633d0500574c4dc22d2d638b9f" +dependencies = [ + "cfg-if", + "winapi", +] + +[[package]] +name = "libm" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" + +[[package]] +name = "linux-raw-sys" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + +[[package]] +name = "litemap" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + +[[package]] +name = "lock_api" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" +dependencies = [ + "scopeguard", +] + +[[package]] +name = "log" +version = "0.4.29" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + +[[package]] +name = "lz4_flex" +version = "0.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ab6473172471198271ff72e9379150e9dfd70d8e533e0752a27e515b48dd375e" +dependencies = [ + "twox-hash", +] + +[[package]] +name = "memchr" +version = "2.8.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + +[[package]] +name = "miniz_oxide" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" +dependencies = [ + "adler2", + "simd-adler32", +] + +[[package]] +name = "num-bigint" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" +dependencies = [ + "num-integer", + "num-traits", +] + +[[package]] +name = "num-complex" +version = "0.4.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-integer" +version = "0.1.46" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" +dependencies = [ + "num-traits", +] + +[[package]] +name = "num-traits" +version = "0.2.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" +dependencies = [ + "autocfg", + "libm", +] + +[[package]] +name = "object_store" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2858065e55c148d294a9f3aae3b0fa9458edadb41a108397094566f4e3c0dfb" +dependencies = [ + "async-trait", + "bytes", + "chrono", + "futures", + "http", + "humantime", + "itertools", + "parking_lot", + "percent-encoding", + "thiserror", + "tokio", + "tracing", + "url", + "walkdir", + "wasm-bindgen-futures", + "web-time", +] + +[[package]] +name = "once_cell" +version = "1.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + +[[package]] +name = "ordered-float" +version = "2.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68f19d67e5a2795c94e73e0bb1cc1a7edeb2e28efd39e2e1c9b7a40c1108b11c" +dependencies = [ + "num-traits", +] + +[[package]] +name = "parking_lot" +version = "0.12.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" +dependencies = [ + "lock_api", + "parking_lot_core", +] + +[[package]] +name = "parking_lot_core" +version = "0.9.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" +dependencies = [ + "cfg-if", + "libc", + "redox_syscall", + "smallvec", + "windows-link", +] + +[[package]] +name = "parquet" +version = "58.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f491d0ef1b510194426ee67ddc18a9b747ef3c42050c19322a2cd2e1666c29b" +dependencies = [ + "ahash", + "arrow-array", + "arrow-buffer", + "arrow-data", + "arrow-ipc", + "arrow-schema", + "arrow-select", + "base64", + "brotli", + "bytes", + "chrono", + "flate2", + "futures", + "half", + "hashbrown 0.16.1", + "lz4_flex", + "num-bigint", + "num-integer", + "num-traits", + "object_store", + "paste", + "seq-macro", + "simdutf8", + "snap", + "thrift", + "tokio", + "twox-hash", + "zstd", +] + +[[package]] +name = "paste" +version = "1.0.15" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + +[[package]] +name = "percent-encoding" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + +[[package]] +name = "petgraph" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" +dependencies = [ + "fixedbitset", + "hashbrown 0.15.5", + "indexmap", + "serde", +] + +[[package]] +name = "phf" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7" +dependencies = [ + "phf_shared", +] + +[[package]] +name = "phf_shared" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981" +dependencies = [ + "siphasher", +] + +[[package]] +name = "pin-project-lite" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + +[[package]] +name = "pkg-config" +version = "0.3.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7edddbd0b52d732b21ad9a5fab5c704c14cd949e5e9a1ec5929a24fded1b904c" + +[[package]] +name = "portable-atomic" +version = "1.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c33a9471896f1c69cecef8d20cbe2f7accd12527ce60845ff44c153bb2a21b49" + +[[package]] +name = "potential_utf" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" +dependencies = [ + "zerovec", +] + +[[package]] +name = "ppv-lite86" +version = "0.2.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" +dependencies = [ + "zerocopy", +] + +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn 2.0.117", +] + +[[package]] +name = "proc-macro2" +version = "1.0.106" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "prost" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" +dependencies = [ + "bytes", + "prost-derive", +] + +[[package]] +name = "prost-derive" +version = "0.14.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" +dependencies = [ + "anyhow", + "itertools", + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "pyo3" +version = "0.28.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf85e27e86080aafd5a22eae58a162e133a589551542b3e5cee4beb27e54f8e1" +dependencies = [ + "libc", + "once_cell", + "portable-atomic", + "pyo3-build-config", + "pyo3-ffi", + "pyo3-macros", +] + +[[package]] +name = "pyo3-build-config" +version = "0.28.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8bf94ee265674bf76c09fa430b0e99c26e319c945d96ca0d5a8215f31bf81cf7" +dependencies = [ + "target-lexicon", +] + +[[package]] +name = "pyo3-ffi" +version = "0.28.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "491aa5fc66d8059dd44a75f4580a2962c1862a1c2945359db36f6c2818b748dc" +dependencies = [ + "libc", + "pyo3-build-config", +] + +[[package]] +name = "pyo3-log" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26c2ec80932c5c3b2d4fbc578c9b56b2d4502098587edb8bef5b6bfcad43682e" +dependencies = [ + "arc-swap", + "log", + "pyo3", +] + +[[package]] +name = "pyo3-macros" +version = "0.28.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f5d671734e9d7a43449f8480f8b38115df67bef8d21f76837fa75ee7aaa5e52e" +dependencies = [ + "proc-macro2", + "pyo3-macros-backend", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "pyo3-macros-backend" +version = "0.28.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22faaa1ce6c430a1f71658760497291065e6450d7b5dc2bcf254d49f66ee700a" +dependencies = [ + "heck", + "proc-macro2", + "pyo3-build-config", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "quote" +version = "1.0.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + +[[package]] +name = "rand" +version = "0.9.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" +dependencies = [ + "rand_chacha", + "rand_core", +] + +[[package]] +name = "rand_chacha" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" +dependencies = [ + "ppv-lite86", + "rand_core", +] + +[[package]] +name = "rand_core" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" +dependencies = [ + "getrandom 0.3.4", +] + +[[package]] +name = "redox_syscall" +version = "0.5.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" +dependencies = [ + "bitflags", +] + +[[package]] +name = "regex" +version = "1.12.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" +dependencies = [ + "aho-corasick", + "memchr", + "regex-automata", + "regex-syntax", +] + +[[package]] +name = "regex-automata" +version = "0.4.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" +dependencies = [ + "aho-corasick", + "memchr", + "regex-syntax", +] + +[[package]] +name = "regex-syntax" +version = "0.8.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" + +[[package]] +name = "repr_offset" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb1070755bd29dffc19d0971cab794e607839ba2ef4b69a9e6fbc8733c1b72ea" +dependencies = [ + "tstr", +] + +[[package]] +name = "rustc_version" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92" +dependencies = [ + "semver", +] + +[[package]] +name = "rustix" +version = "1.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "rustversion" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + +[[package]] +name = "ryu" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + +[[package]] +name = "same-file" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" +dependencies = [ + "winapi-util", +] + +[[package]] +name = "scopeguard" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + +[[package]] +name = "semver" +version = "1.0.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" + +[[package]] +name = "seq-macro" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1bc711410fbe7399f390ca1c3b60ad0f53f80e95c5eb935e52268a0e2cd49acc" + +[[package]] +name = "serde" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", +] + +[[package]] +name = "serde_derive" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "serde_json" +version = "1.0.149" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" +dependencies = [ + "itoa", + "memchr", + "serde", + "serde_core", + "zmij", +] + +[[package]] +name = "shlex" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + +[[package]] +name = "simd-adler32" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e320a6c5ad31d271ad523dcf3ad13e2767ad8b1cb8f047f75a8aeaf8da139da2" + +[[package]] +name = "simdutf8" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e3a9fe34e3e7a50316060351f37187a3f546bce95496156754b601a5fa71b76e" + +[[package]] +name = "siphasher" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" + +[[package]] +name = "slab" +version = "0.4.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + +[[package]] +name = "smallvec" +version = "1.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + +[[package]] +name = "snap" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b6b67fb9a61334225b5b790716f609cd58395f895b3fe8b328786812a40bc3b" + +[[package]] +name = "sqlparser" +version = "0.61.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dbf5ea8d4d7c808e1af1cbabebca9a2abe603bcefc22294c5b95018d53200cb7" +dependencies = [ + "log", + "sqlparser_derive", +] + +[[package]] +name = "sqlparser_derive" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a6dd45d8fc1c79299bfbb7190e42ccbbdf6a5f52e4a6ad98d92357ea965bd289" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "stable_deref_trait" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + +[[package]] +name = "syn" +version = "1.0.109" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "syn" +version = "2.0.117" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "synstructure" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "target-lexicon" +version = "0.13.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "adb6935a6f5c20170eeceb1a3835a49e12e19d792f6dd344ccc76a985ca5a6ca" + +[[package]] +name = "tempfile" +version = "3.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" +dependencies = [ + "fastrand", + "getrandom 0.4.1", + "once_cell", + "rustix", + "windows-sys", +] + +[[package]] +name = "thiserror" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "2.0.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "thrift" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7e54bc85fc7faa8bc175c4bab5b92ba8d9a3ce893d0e9f42cc455c8ab16a9e09" +dependencies = [ + "byteorder", + "integer-encoding", + "ordered-float", +] + +[[package]] +name = "tiny-keccak" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237" +dependencies = [ + "crunchy", +] + +[[package]] +name = "tinystr" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" +dependencies = [ + "displaydoc", + "zerovec", +] + +[[package]] +name = "tokio" +version = "1.49.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" +dependencies = [ + "bytes", + "pin-project-lite", + "tokio-macros", +] + +[[package]] +name = "tokio-macros" +version = "2.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "tokio-stream" +version = "0.1.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "32da49809aab5c3bc678af03902d4ccddea2a87d028d86392a4b1560c6906c70" +dependencies = [ + "futures-core", + "pin-project-lite", + "tokio", + "tokio-util", +] + +[[package]] +name = "tokio-util" +version = "0.7.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" +dependencies = [ + "bytes", + "futures-core", + "futures-sink", + "pin-project-lite", + "tokio", +] + +[[package]] +name = "tracing" +version = "0.1.44" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" +dependencies = [ + "pin-project-lite", + "tracing-attributes", + "tracing-core", +] + +[[package]] +name = "tracing-attributes" +version = "0.1.31" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "tracing-core" +version = "0.1.36" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" +dependencies = [ + "once_cell", +] + +[[package]] +name = "tstr" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7f8e0294f14baae476d0dd0a2d780b2e24d66e349a9de876f5126777a37bdba7" +dependencies = [ + "tstr_proc_macros", +] + +[[package]] +name = "tstr_proc_macros" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e78122066b0cb818b8afd08f7ed22f7fdbc3e90815035726f0840d0d26c0747a" + +[[package]] +name = "twox-hash" +version = "2.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ea3136b675547379c4bd395ca6b938e5ad3c3d20fad76e7fe85f9e0d011419c" + +[[package]] +name = "typed-arena" +version = "2.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6af6ae20167a9ece4bcb41af5b80f8a1f1df981f6391189ce00fd257af04126a" + +[[package]] +name = "typewit" +version = "1.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8c1ae7cc0fdb8b842d65d127cb981574b0d2b249b74d1c7a2986863dc134f71" + +[[package]] +name = "unicode-ident" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" + +[[package]] +name = "unicode-segmentation" +version = "1.12.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" + +[[package]] +name = "unicode-width" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4ac048d71ede7ee76d585517add45da530660ef4390e49b098733c6e897f254" + +[[package]] +name = "unicode-xid" +version = "0.2.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" + +[[package]] +name = "url" +version = "2.5.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" +dependencies = [ + "form_urlencoded", + "idna", + "percent-encoding", + "serde", +] + +[[package]] +name = "utf8_iter" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + +[[package]] +name = "uuid" +version = "1.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" +dependencies = [ + "getrandom 0.4.1", + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "version_check" +version = "0.9.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + +[[package]] +name = "walkdir" +version = "2.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" +dependencies = [ + "same-file", + "winapi-util", +] + +[[package]] +name = "wasi" +version = "0.11.1+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" + +[[package]] +name = "wasip2" +version = "1.0.2+wasi-0.2.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" +dependencies = [ + "wit-bindgen", +] + +[[package]] +name = "wasm-bindgen" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +dependencies = [ + "cfg-if", + "once_cell", + "rustversion", + "wasm-bindgen-macro", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-futures" +version = "0.4.58" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" +dependencies = [ + "cfg-if", + "futures-util", + "js-sys", + "once_cell", + "wasm-bindgen", + "web-sys", +] + +[[package]] +name = "wasm-bindgen-macro" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +dependencies = [ + "quote", + "wasm-bindgen-macro-support", +] + +[[package]] +name = "wasm-bindgen-macro-support" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +dependencies = [ + "bumpalo", + "proc-macro2", + "quote", + "syn 2.0.117", + "wasm-bindgen-shared", +] + +[[package]] +name = "wasm-bindgen-shared" +version = "0.2.108" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "wasm-encoder" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" +dependencies = [ + "leb128fmt", + "wasmparser", +] + +[[package]] +name = "wasm-metadata" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" +dependencies = [ + "anyhow", + "indexmap", + "wasm-encoder", + "wasmparser", +] + +[[package]] +name = "wasmparser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" +dependencies = [ + "bitflags", + "hashbrown 0.15.5", + "indexmap", + "semver", +] + +[[package]] +name = "web-sys" +version = "0.3.85" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "web-time" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a6580f308b1fad9207618087a65c04e7a10bc77e02c8e84e9b00dd4b12fa0bb" +dependencies = [ + "js-sys", + "wasm-bindgen", +] + +[[package]] +name = "winapi" +version = "0.3.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5c839a674fcd7a98952e593242ea400abe93992746761e38641405d28b00f419" +dependencies = [ + "winapi-i686-pc-windows-gnu", + "winapi-x86_64-pc-windows-gnu", +] + +[[package]] +name = "winapi-i686-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6" + +[[package]] +name = "winapi-util" +version = "0.1.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "winapi-x86_64-pc-windows-gnu" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" + +[[package]] +name = "windows-core" +version = "0.62.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8e83a14d34d0623b51dce9581199302a221863196a1dde71a7663a4c2be9deb" +dependencies = [ + "windows-implement", + "windows-interface", + "windows-link", + "windows-result", + "windows-strings", +] + +[[package]] +name = "windows-implement" +version = "0.60.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "windows-interface" +version = "0.59.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "windows-link" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" + +[[package]] +name = "windows-result" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7781fa89eaf60850ac3d2da7af8e5242a5ea78d1a11c49bf2910bb5a73853eb5" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-strings" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7837d08f69c77cf6b07689544538e017c1bfcf57e34b4c0ff58e6c2cd3b37091" +dependencies = [ + "windows-link", +] + +[[package]] +name = "windows-sys" +version = "0.61.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] + +[[package]] +name = "wit-bindgen" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] + +[[package]] +name = "wit-bindgen-core" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] + +[[package]] +name = "wit-bindgen-rust" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap", + "prettyplease", + "syn 2.0.117", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] + +[[package]] +name = "wit-bindgen-rust-macro" +version = "0.51.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn 2.0.117", + "wit-bindgen-core", + "wit-bindgen-rust", +] + +[[package]] +name = "wit-component" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser", + "wit-parser", +] + +[[package]] +name = "wit-parser" +version = "0.244.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser", +] + +[[package]] +name = "writeable" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + +[[package]] +name = "yoke" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" +dependencies = [ + "stable_deref_trait", + "yoke-derive", + "zerofrom", +] + +[[package]] +name = "yoke-derive" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", + "synstructure", +] + +[[package]] +name = "zerocopy" +version = "0.8.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" +dependencies = [ + "zerocopy-derive", +] + +[[package]] +name = "zerocopy-derive" +version = "0.8.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zerofrom" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" +dependencies = [ + "zerofrom-derive", +] + +[[package]] +name = "zerofrom-derive" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", + "synstructure", +] + +[[package]] +name = "zerotrie" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + +[[package]] +name = "zerovec" +version = "0.11.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" +dependencies = [ + "yoke", + "zerofrom", + "zerovec-derive", +] + +[[package]] +name = "zerovec-derive" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.117", +] + +[[package]] +name = "zlib-rs" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c745c48e1007337ed136dc99df34128b9faa6ed542d80a1c673cf55a6d7236c8" + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" + +[[package]] +name = "zstd" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e91ee311a569c327171651566e07972200e76fcfe2242a4fa446149a3881c08a" +dependencies = [ + "zstd-safe", +] + +[[package]] +name = "zstd-safe" +version = "7.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f49c4d5f0abb602a93fb8736af2a4f4dd9512e36f7f570d66e65ff867ed3b9d" +dependencies = [ + "zstd-sys", +] + +[[package]] +name = "zstd-sys" +version = "2.0.16+zstd.1.5.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" +dependencies = [ + "cc", + "pkg-config", +] diff --git a/examples/datafusion-ffi-example/Cargo.toml b/examples/datafusion-ffi-example/Cargo.toml new file mode 100644 index 000000000..be6096faf --- /dev/null +++ b/examples/datafusion-ffi-example/Cargo.toml @@ -0,0 +1,56 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[package] +name = "datafusion-ffi-example" +version = "0.2.0" +edition = "2024" + +[dependencies] +datafusion-catalog = { version = "53", default-features = false } +datafusion-common = { version = "53", default-features = false } +datafusion-functions-aggregate = { version = "53" } +datafusion-functions-window = { version = "53" } +datafusion-expr = { version = "53" } +datafusion-ffi = { version = "53" } + +pyo3 = { version = "0.28", features = [ + "extension-module", + "abi3", + "abi3-py39", +] } +arrow = { version = "58" } +arrow-array = { version = "58" } +arrow-schema = { version = "58" } +async-trait = "0.1.89" +pyo3-log = "0.13.2" + +[build-dependencies] +pyo3-build-config = "0.28" + +[lib] +name = "datafusion_ffi_example" +crate-type = ["cdylib", "rlib"] + +# TODO: remove when datafusion-53 is released +[patch.crates-io] +datafusion-catalog = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } +datafusion-common = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } +datafusion-functions-aggregate = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } +datafusion-functions-window = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } +datafusion-expr = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } +datafusion-ffi = { git = "https://github.com/apache/datafusion.git", rev = "35749607f585b3bf25b66b7d2289c56c18d03e4f" } diff --git a/examples/datafusion-ffi-example/build.rs b/examples/datafusion-ffi-example/build.rs new file mode 100644 index 000000000..4878d8b0e --- /dev/null +++ b/examples/datafusion-ffi-example/build.rs @@ -0,0 +1,20 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +fn main() { + pyo3_build_config::add_extension_module_link_args(); +} diff --git a/examples/datafusion-ffi-example/pyproject.toml b/examples/datafusion-ffi-example/pyproject.toml new file mode 100644 index 000000000..7f85e9487 --- /dev/null +++ b/examples/datafusion-ffi-example/pyproject.toml @@ -0,0 +1,33 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +[build-system] +requires = ["maturin>=1.6,<2.0"] +build-backend = "maturin" + +[project] +name = "datafusion_ffi_example" +requires-python = ">=3.9" +classifiers = [ + "Programming Language :: Rust", + "Programming Language :: Python :: Implementation :: CPython", + "Programming Language :: Python :: Implementation :: PyPy", +] +dynamic = ["version"] + +[tool.maturin] +features = ["pyo3/extension-module"] diff --git a/examples/datafusion-ffi-example/python/tests/_test_aggregate_udf.py b/examples/datafusion-ffi-example/python/tests/_test_aggregate_udf.py new file mode 100644 index 000000000..7ea6b295c --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_aggregate_udf.py @@ -0,0 +1,77 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +from datafusion import SessionContext, col, udaf +from datafusion_ffi_example import MySumUDF + + +def setup_context_with_table(): + ctx = SessionContext() + + # Pick numbers here so we get the same value in both groups + # since we cannot be certain of the output order of batches + batch = pa.RecordBatch.from_arrays( + [ + pa.array([1, 2, 3, None], type=pa.int64()), + pa.array([1, 1, 2, 2], type=pa.int64()), + ], + names=["a", "b"], + ) + ctx.register_record_batches("test_table", [[batch]]) + return ctx + + +def test_ffi_aggregate_register(): + ctx = setup_context_with_table() + my_udaf = udaf(MySumUDF()) + ctx.register_udaf(my_udaf) + + result = ctx.sql("select my_custom_sum(a) from test_table group by b").collect() + + assert len(result) == 2 + assert result[0].num_columns == 1 + + result = [r.column(0) for r in result] + expected = [ + pa.array([3], type=pa.int64()), + pa.array([3], type=pa.int64()), + ] + + assert result == expected + + +def test_ffi_aggregate_call_directly(): + ctx = setup_context_with_table() + my_udaf = udaf(MySumUDF()) + + result = ( + ctx.table("test_table").aggregate([col("b")], [my_udaf(col("a"))]).collect() + ) + + assert len(result) == 2 + assert result[0].num_columns == 2 + + result = [r.column(1) for r in result] + expected = [ + pa.array([3], type=pa.int64()), + pa.array([3], type=pa.int64()), + ] + + assert result == expected diff --git a/examples/datafusion-ffi-example/python/tests/_test_catalog_provider.py b/examples/datafusion-ffi-example/python/tests/_test_catalog_provider.py new file mode 100644 index 000000000..a862b23ba --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_catalog_provider.py @@ -0,0 +1,136 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +import pyarrow.dataset as ds +import pytest +from datafusion import SessionContext, Table +from datafusion.catalog import Schema +from datafusion_ffi_example import MyCatalogProvider, MyCatalogProviderList + + +def create_test_dataset() -> Table: + """Create a simple test dataset.""" + batch = pa.RecordBatch.from_arrays( + [pa.array([100, 200, 300]), pa.array([1.1, 2.2, 3.3])], + names=["id", "value"], + ) + dataset = ds.dataset([batch]) + return Table(dataset) + + +@pytest.mark.parametrize("inner_capsule", [True, False]) +def test_ffi_catalog_provider_list(inner_capsule: bool) -> None: + """Test basic FFI CatalogProviderList functionality.""" + ctx = SessionContext() + + # Register FFI catalog + catalog_provider_list = MyCatalogProviderList() + if inner_capsule: + catalog_provider_list = ( + catalog_provider_list.__datafusion_catalog_provider_list__(ctx) + ) + + ctx.register_catalog_provider_list(catalog_provider_list) + + # Verify the catalog exists + catalog = ctx.catalog("auto_ffi_catalog") + schema_names = catalog.names() + assert "my_schema" in schema_names + + ctx.register_catalog_provider("second", MyCatalogProvider()) + + assert ctx.catalog_names() == {"auto_ffi_catalog", "second"} + + +@pytest.mark.parametrize("inner_capsule", [True, False]) +def test_ffi_catalog_provider_basic(inner_capsule: bool) -> None: + """Test basic FFI CatalogProvider functionality.""" + ctx = SessionContext() + + # Register FFI catalog + catalog_provider = MyCatalogProvider() + if inner_capsule: + catalog_provider = catalog_provider.__datafusion_catalog_provider__(ctx) + + ctx.register_catalog_provider("ffi_catalog", catalog_provider) + + # Verify the catalog exists + catalog = ctx.catalog("ffi_catalog") + schema_names = catalog.names() + assert "my_schema" in schema_names + + # Query the pre-populated table + result = ctx.sql("SELECT * FROM ffi_catalog.my_schema.my_table").collect() + assert len(result) == 2 + assert result[0].num_columns == 2 + + +def test_ffi_catalog_provider_register_schema(): + """Test registering additional schemas to FFI CatalogProvider.""" + ctx = SessionContext() + + catalog_provider = MyCatalogProvider() + ctx.register_catalog_provider("ffi_catalog", catalog_provider) + + catalog = ctx.catalog("ffi_catalog") + + # Register a new memory schema + new_schema = Schema.memory_schema() + catalog.register_schema("additional_schema", new_schema) + + # Verify the schema was registered + assert "additional_schema" in catalog.names() + + # Add a table to the new schema + new_schema.register_table("new_table", create_test_dataset()) + + # Query the new table + result = ctx.sql("SELECT * FROM ffi_catalog.additional_schema.new_table").collect() + assert len(result) == 1 + assert result[0].column(0) == pa.array([100, 200, 300]) + + +def test_ffi_catalog_provider_deregister_schema(): + """Test deregistering schemas from FFI CatalogProvider.""" + ctx = SessionContext() + + catalog_provider = MyCatalogProvider() + ctx.register_catalog_provider("ffi_catalog", catalog_provider) + + catalog = ctx.catalog("ffi_catalog") + + # Register two schemas + schema1 = Schema.memory_schema() + schema2 = Schema.memory_schema() + catalog.register_schema("temp_schema1", schema1) + catalog.register_schema("temp_schema2", schema2) + + # Verify both exist + names = catalog.names() + assert "temp_schema1" in names + assert "temp_schema2" in names + + # Deregister one schema + catalog.deregister_schema("temp_schema1") + + # Verify it's gone + names = catalog.names() + assert "temp_schema1" not in names + assert "temp_schema2" in names diff --git a/examples/datafusion-ffi-example/python/tests/_test_scalar_udf.py b/examples/datafusion-ffi-example/python/tests/_test_scalar_udf.py new file mode 100644 index 000000000..0c949c34a --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_scalar_udf.py @@ -0,0 +1,70 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +from datafusion import SessionContext, col, udf +from datafusion_ffi_example import IsNullUDF + + +def setup_context_with_table(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3, None])], + names=["a"], + ) + ctx.register_record_batches("test_table", [[batch]]) + return ctx + + +def test_ffi_scalar_register(): + ctx = setup_context_with_table() + my_udf = udf(IsNullUDF()) + ctx.register_udf(my_udf) + + result = ctx.sql("select my_custom_is_null(a) from test_table").collect() + + assert len(result) == 1 + assert result[0].num_columns == 1 + print(result) + + result = [r.column(0) for r in result] + expected = [ + pa.array([False, False, False, True], type=pa.bool_()), + ] + + assert result == expected + + +def test_ffi_scalar_call_directly(): + ctx = setup_context_with_table() + my_udf = udf(IsNullUDF()) + + result = ctx.table("test_table").select(my_udf(col("a"))).collect() + + assert len(result) == 1 + assert result[0].num_columns == 1 + print(result) + + result = [r.column(0) for r in result] + expected = [ + pa.array([False, False, False, True], type=pa.bool_()), + ] + + assert result == expected diff --git a/examples/datafusion-ffi-example/python/tests/_test_schema_provider.py b/examples/datafusion-ffi-example/python/tests/_test_schema_provider.py new file mode 100644 index 000000000..93449c660 --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_schema_provider.py @@ -0,0 +1,232 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +import pyarrow.dataset as ds +import pytest +from datafusion import SessionContext, Table +from datafusion.catalog import Schema +from datafusion_ffi_example import FixedSchemaProvider, MyCatalogProvider + + +def create_test_dataset() -> Table: + """Create a simple test dataset.""" + batch = pa.RecordBatch.from_arrays( + [pa.array([100, 200, 300]), pa.array([1.1, 2.2, 3.3])], + names=["id", "value"], + ) + dataset = ds.dataset([batch]) + return Table(dataset) + + +@pytest.mark.parametrize("inner_capsule", [True, False]) +def test_schema_provider_extract_values(inner_capsule: bool) -> None: + ctx = SessionContext() + + my_schema_name = "my_schema" + + schema_provider = FixedSchemaProvider() + if inner_capsule: + schema_provider = schema_provider.__datafusion_schema_provider__(ctx) + + ctx.catalog().register_schema(my_schema_name, schema_provider) + + expected_schema_name = "my_schema" + expected_table_name = "my_table" + expected_table_columns = ["units", "price"] + + default_catalog = ctx.catalog() + + catalog_schemas = default_catalog.names() + assert expected_schema_name in catalog_schemas + my_schema = default_catalog.schema(expected_schema_name) + assert expected_table_name in my_schema.names() + my_table = my_schema.table(expected_table_name) + assert expected_table_columns == my_table.schema.names + + result = ctx.table(f"{expected_schema_name}.{expected_table_name}").collect() + assert len(result) == 2 + + col0_result = [r.column(0) for r in result] + col1_result = [r.column(1) for r in result] + expected_col0 = [ + pa.array([10, 20, 30], type=pa.int32()), + pa.array([5, 7], type=pa.int32()), + ] + expected_col1 = [ + pa.array([1, 2, 5], type=pa.float64()), + pa.array([1.5, 2.5], type=pa.float64()), + ] + assert col0_result == expected_col0 + assert col1_result == expected_col1 + + +def test_ffi_schema_provider_basic(): + """Test basic FFI SchemaProvider functionality.""" + ctx = SessionContext() + + # Register FFI schema + schema_provider = FixedSchemaProvider() + ctx.catalog().register_schema("ffi_schema", schema_provider) + + # Verify the schema exists + schema = ctx.catalog().schema("ffi_schema") + table_names = schema.names() + assert "my_table" in table_names + + # Query the pre-populated table + result = ctx.sql("SELECT * FROM ffi_schema.my_table").collect() + assert len(result) == 2 + assert result[0].num_columns == 2 + + +def test_ffi_schema_provider_register_table(): + """Test registering additional tables to FFI SchemaProvider.""" + ctx = SessionContext() + + schema_provider = FixedSchemaProvider() + ctx.catalog().register_schema("ffi_schema", schema_provider) + + schema = ctx.catalog().schema("ffi_schema") + + # Register a new table + schema.register_table("additional_table", create_test_dataset()) + + # Verify the table was registered + assert "additional_table" in schema.names() + + # Query the new table + result = ctx.sql("SELECT * FROM ffi_schema.additional_table").collect() + assert len(result) == 1 + assert result[0].column(0) == pa.array([100, 200, 300]) + assert result[0].column(1) == pa.array([1.1, 2.2, 3.3]) + + +def test_ffi_schema_provider_deregister_table(): + """Test deregistering tables from FFI SchemaProvider.""" + ctx = SessionContext() + + schema_provider = FixedSchemaProvider() + ctx.catalog().register_schema("ffi_schema", schema_provider) + + schema = ctx.catalog().schema("ffi_schema") + + # Register two tables + schema.register_table("temp_table1", create_test_dataset()) + schema.register_table("temp_table2", create_test_dataset()) + + # Verify both exist + names = schema.names() + assert "temp_table1" in names + assert "temp_table2" in names + + # Deregister one table + schema.deregister_table("temp_table1") + + # Verify it's gone + names = schema.names() + assert "temp_table1" not in names + assert "temp_table2" in names + + +def test_mixed_ffi_and_python_providers(): + """Test mixing FFI and Python providers in the same catalog/schema.""" + ctx = SessionContext() + + # Register FFI catalog + ffi_catalog = MyCatalogProvider() + ctx.register_catalog_provider("ffi_catalog", ffi_catalog) + + # Register Python memory schema to FFI catalog + python_schema = Schema.memory_schema() + ctx.catalog("ffi_catalog").register_schema("python_schema", python_schema) + + # Add table to Python schema + python_schema.register_table("python_table", create_test_dataset()) + + # Query both FFI table and Python table + result_ffi = ctx.sql("SELECT * FROM ffi_catalog.my_schema.my_table").collect() + assert len(result_ffi) == 2 + + result_python = ctx.sql( + "SELECT * FROM ffi_catalog.python_schema.python_table" + ).collect() + assert len(result_python) == 1 + assert result_python[0].column(0) == pa.array([100, 200, 300]) + + +def test_ffi_catalog_with_multiple_schemas(): + """Test FFI catalog with multiple schemas of different types.""" + ctx = SessionContext() + + catalog_provider = MyCatalogProvider() + ctx.register_catalog_provider("multi_catalog", catalog_provider) + + catalog = ctx.catalog("multi_catalog") + + # Register different types of schemas + ffi_schema = FixedSchemaProvider() + memory_schema = Schema.memory_schema() + + catalog.register_schema("ffi_schema", ffi_schema) + catalog.register_schema("memory_schema", memory_schema) + + # Add tables to memory schema + memory_schema.register_table("mem_table", create_test_dataset()) + + # Verify all schemas exist + names = catalog.names() + assert "my_schema" in names # Pre-populated + assert "ffi_schema" in names + assert "memory_schema" in names + + # Query tables from each schema + result = ctx.sql("SELECT * FROM multi_catalog.my_schema.my_table").collect() + assert len(result) == 2 + + result = ctx.sql("SELECT * FROM multi_catalog.ffi_schema.my_table").collect() + assert len(result) == 2 + + result = ctx.sql("SELECT * FROM multi_catalog.memory_schema.mem_table").collect() + assert len(result) == 1 + assert result[0].column(0) == pa.array([100, 200, 300]) + + +def test_ffi_schema_table_exist(): + """Test table_exist method on FFI SchemaProvider.""" + ctx = SessionContext() + + schema_provider = FixedSchemaProvider() + ctx.catalog().register_schema("ffi_schema", schema_provider) + + schema = ctx.catalog().schema("ffi_schema") + + # Check pre-populated table + assert schema.table_exist("my_table") + + # Check non-existent table + assert not schema.table_exist("nonexistent_table") + + # Register a new table and check + schema.register_table("new_table", create_test_dataset()) + assert schema.table_exist("new_table") + + # Deregister and check + schema.deregister_table("new_table") + assert not schema.table_exist("new_table") diff --git a/examples/datafusion-ffi-example/python/tests/_test_table_function.py b/examples/datafusion-ffi-example/python/tests/_test_table_function.py new file mode 100644 index 000000000..bf5aae3bd --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_table_function.py @@ -0,0 +1,135 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pyarrow as pa +from datafusion import Expr, SessionContext, udtf +from datafusion_ffi_example import MyTableFunction, MyTableProvider + +if TYPE_CHECKING: + from datafusion.context import TableProviderExportable + + +def test_ffi_table_function_register() -> None: + ctx = SessionContext() + table_func = MyTableFunction() + + table_udtf = udtf(table_func, "my_table_func") + ctx.register_udtf(table_udtf) + result = ctx.sql("select * from my_table_func()").collect() + + assert len(result) == 2 + assert result[0].num_columns == 4 + print(result) + + result = [r.column(0) for r in result] + expected = [ + pa.array([0, 1, 2], type=pa.int32()), + pa.array([3, 4, 5, 6], type=pa.int32()), + ] + + assert result == expected + + +def test_ffi_table_function_call_directly(): + ctx = SessionContext() + table_func = MyTableFunction() + table_udtf = udtf(table_func, "my_table_func") + + my_table = table_udtf() + ctx.register_table("t", my_table) + result = ctx.table("t").collect() + + assert len(result) == 2 + assert result[0].num_columns == 4 + print(result) + + result = [r.column(0) for r in result] + expected = [ + pa.array([0, 1, 2], type=pa.int32()), + pa.array([3, 4, 5, 6], type=pa.int32()), + ] + + assert result == expected + + +class PythonTableFunction: + """Python based table function. + + This class is used as a Python implementation of a table function. + We use the existing TableProvider to create the underlying + provider, and this function takes no arguments + """ + + def __call__( + self, num_cols: Expr, num_rows: Expr, num_batches: Expr + ) -> TableProviderExportable: + args = [ + num_cols.to_variant().value_i64(), + num_rows.to_variant().value_i64(), + num_batches.to_variant().value_i64(), + ] + return MyTableProvider(*args) + + +def common_table_function_test(test_ctx: SessionContext) -> None: + result = test_ctx.sql("select * from my_table_func(3,2,4)").collect() + + assert len(result) == 4 + assert result[0].num_columns == 3 + print(result) + + result = [r.column(0) for r in result] + expected = [ + pa.array([0, 1], type=pa.int32()), + pa.array([2, 3, 4], type=pa.int32()), + pa.array([4, 5, 6, 7], type=pa.int32()), + pa.array([6, 7, 8, 9, 10], type=pa.int32()), + ] + + assert result == expected + + +def test_python_table_function(): + ctx = SessionContext() + table_func = PythonTableFunction() + table_udtf = udtf(table_func, "my_table_func") + ctx.register_udtf(table_udtf) + + common_table_function_test(ctx) + + +def test_python_table_function_decorator(): + ctx = SessionContext() + + @udtf("my_table_func") + def my_udtf( + num_cols: Expr, num_rows: Expr, num_batches: Expr + ) -> TableProviderExportable: + args = [ + num_cols.to_variant().value_i64(), + num_rows.to_variant().value_i64(), + num_batches.to_variant().value_i64(), + ] + return MyTableProvider(*args) + + ctx.register_udtf(my_udtf) + + common_table_function_test(ctx) diff --git a/examples/datafusion-ffi-example/python/tests/_test_table_provider.py b/examples/datafusion-ffi-example/python/tests/_test_table_provider.py new file mode 100644 index 000000000..fc77d2d3b --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_table_provider.py @@ -0,0 +1,51 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +import pytest +from datafusion import SessionContext +from datafusion_ffi_example import MyTableProvider + + +@pytest.mark.parametrize("inner_capsule", [True, False]) +def test_table_provider_ffi(inner_capsule: bool) -> None: + ctx = SessionContext() + table = MyTableProvider(3, 2, 4) + if inner_capsule: + table = table.__datafusion_table_provider__(ctx) + + ctx.register_table("t", table) + result = ctx.table("t").collect() + + assert len(result) == 4 + assert result[0].num_columns == 3 + + result = [r.column(0) for r in result] + expected = [ + pa.array([0, 1], type=pa.int32()), + pa.array([2, 3, 4], type=pa.int32()), + pa.array([4, 5, 6, 7], type=pa.int32()), + pa.array([6, 7, 8, 9, 10], type=pa.int32()), + ] + + assert result == expected + + result = ctx.read_table(table).collect() + result = [r.column(0) for r in result] + assert result == expected diff --git a/examples/datafusion-ffi-example/python/tests/_test_window_udf.py b/examples/datafusion-ffi-example/python/tests/_test_window_udf.py new file mode 100644 index 000000000..7d96994b9 --- /dev/null +++ b/examples/datafusion-ffi-example/python/tests/_test_window_udf.py @@ -0,0 +1,89 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import pyarrow as pa +from datafusion import SessionContext, col, udwf +from datafusion_ffi_example import MyRankUDF + + +def setup_context_with_table(): + ctx = SessionContext() + + # Pick numbers here so we get the same value in both groups + # since we cannot be certain of the output order of batches + batch = pa.RecordBatch.from_arrays( + [ + pa.array([40, 10, 30, 20], type=pa.int64()), + ], + names=["a"], + ) + ctx.register_record_batches("test_table", [[batch]]) + return ctx + + +def test_ffi_window_register(): + ctx = setup_context_with_table() + my_udwf = udwf(MyRankUDF()) + ctx.register_udwf(my_udwf) + + result = ctx.sql( + "select a, my_custom_rank() over (order by a) from test_table" + ).collect() + assert len(result) == 1 + assert result[0].num_columns == 2 + + results = [ + (result[0][0][idx].as_py(), result[0][1][idx].as_py()) for idx in range(4) + ] + results.sort() + + expected = [ + (10, 1), + (20, 2), + (30, 3), + (40, 4), + ] + assert results == expected + + +def test_ffi_window_call_directly(): + ctx = setup_context_with_table() + my_udwf = udwf(MyRankUDF()) + + result = ( + ctx.table("test_table") + .select(col("a"), my_udwf().order_by(col("a")).build()) + .collect() + ) + + assert len(result) == 1 + assert result[0].num_columns == 2 + + results = [ + (result[0][0][idx].as_py(), result[0][1][idx].as_py()) for idx in range(4) + ] + results.sort() + + expected = [ + (10, 1), + (20, 2), + (30, 3), + (40, 4), + ] + assert results == expected diff --git a/python/datafusion/tests/test_catalog.py b/examples/datafusion-ffi-example/python/tests/conftest.py similarity index 55% rename from python/datafusion/tests/test_catalog.py rename to examples/datafusion-ffi-example/python/tests/conftest.py index 214f6b165..68f8057af 100644 --- a/python/datafusion/tests/test_catalog.py +++ b/examples/datafusion-ffi-example/python/tests/conftest.py @@ -15,26 +15,28 @@ # specific language governing permissions and limitations # under the License. -import pyarrow as pa +from __future__ import annotations + +import logging +from typing import TYPE_CHECKING + import pytest +if TYPE_CHECKING: + from collections.abc import Generator + from typing import Any -def test_basic(ctx, database): - with pytest.raises(KeyError): - ctx.catalog("non-existent") - default = ctx.catalog() - assert default.names() == ["public"] +class _FailOnWarning(logging.Handler): + def emit(self, record: logging.LogRecord) -> None: + if record.levelno >= logging.WARNING: + err = f"Unexpected log warning from '{record.name}': {self.format(record)}" + raise AssertionError(err) - for database in [default.database("public"), default.database()]: - assert database.names() == {"csv1", "csv", "csv2"} - table = database.table("csv") - assert table.kind == "physical" - assert table.schema == pa.schema( - [ - pa.field("int", pa.int64(), nullable=True), - pa.field("str", pa.string(), nullable=True), - pa.field("float", pa.float64(), nullable=True), - ] - ) +@pytest.fixture(autouse=True) +def fail_on_log_warnings() -> Generator[None, Any, None]: + handler = _FailOnWarning() + logging.root.addHandler(handler) + yield + logging.root.removeHandler(handler) diff --git a/examples/datafusion-ffi-example/src/aggregate_udf.rs b/examples/datafusion-ffi-example/src/aggregate_udf.rs new file mode 100644 index 000000000..d5343ff91 --- /dev/null +++ b/examples/datafusion-ffi-example/src/aggregate_udf.rs @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::sync::Arc; + +use arrow_schema::DataType; +use datafusion_common::error::Result as DataFusionResult; +use datafusion_expr::function::AccumulatorArgs; +use datafusion_expr::{Accumulator, AggregateUDF, AggregateUDFImpl, Signature}; +use datafusion_ffi::udaf::FFI_AggregateUDF; +use datafusion_functions_aggregate::sum::Sum; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyResult, Python, pyclass, pymethods}; + +#[pyclass( + from_py_object, + name = "MySumUDF", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub(crate) struct MySumUDF { + inner: Arc, +} + +#[pymethods] +impl MySumUDF { + #[new] + fn new() -> PyResult { + Ok(Self { + inner: Arc::new(Sum::new()), + }) + } + + fn __datafusion_aggregate_udf__<'py>( + &self, + py: Python<'py>, + ) -> PyResult> { + let name = cr"datafusion_aggregate_udf".into(); + + let func = Arc::new(AggregateUDF::from(self.clone())); + let provider = FFI_AggregateUDF::from(func); + + PyCapsule::new(py, provider, Some(name)) + } +} + +impl AggregateUDFImpl for MySumUDF { + fn as_any(&self) -> &dyn Any { + self + } + + fn name(&self) -> &str { + "my_custom_sum" + } + + fn signature(&self) -> &Signature { + self.inner.signature() + } + + fn return_type(&self, arg_types: &[DataType]) -> DataFusionResult { + self.inner.return_type(arg_types) + } + + fn accumulator(&self, acc_args: AccumulatorArgs) -> DataFusionResult> { + self.inner.accumulator(acc_args) + } + + fn coerce_types(&self, arg_types: &[DataType]) -> DataFusionResult> { + self.inner.coerce_types(arg_types) + } +} diff --git a/examples/datafusion-ffi-example/src/catalog_provider.rs b/examples/datafusion-ffi-example/src/catalog_provider.rs new file mode 100644 index 000000000..d0e07c787 --- /dev/null +++ b/examples/datafusion-ffi-example/src/catalog_provider.rs @@ -0,0 +1,273 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::fmt::Debug; +use std::sync::Arc; + +use arrow::datatypes::Schema; +use async_trait::async_trait; +use datafusion_catalog::{ + CatalogProvider, CatalogProviderList, MemTable, MemoryCatalogProvider, + MemoryCatalogProviderList, MemorySchemaProvider, SchemaProvider, TableProvider, +}; +use datafusion_common::error::{DataFusionError, Result}; +use datafusion_ffi::catalog_provider::FFI_CatalogProvider; +use datafusion_ffi::catalog_provider_list::FFI_CatalogProviderList; +use datafusion_ffi::schema_provider::FFI_SchemaProvider; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyAny, PyResult, Python, pyclass, pymethods}; + +use crate::utils::ffi_logical_codec_from_pycapsule; + +pub fn my_table() -> Arc { + use arrow::datatypes::{DataType, Field}; + use datafusion_common::record_batch; + + let schema = Arc::new(Schema::new(vec![ + Field::new("units", DataType::Int32, true), + Field::new("price", DataType::Float64, true), + ])); + + let partitions = vec![ + record_batch!( + ("units", Int32, vec![10, 20, 30]), + ("price", Float64, vec![1.0, 2.0, 5.0]) + ) + .unwrap(), + record_batch!( + ("units", Int32, vec![5, 7]), + ("price", Float64, vec![1.5, 2.5]) + ) + .unwrap(), + ]; + + Arc::new(MemTable::try_new(schema, vec![partitions]).unwrap()) +} + +#[pyclass( + skip_from_py_object, + name = "FixedSchemaProvider", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug)] +pub struct FixedSchemaProvider { + inner: Arc, +} + +impl Default for FixedSchemaProvider { + fn default() -> Self { + let inner = Arc::new(MemorySchemaProvider::new()); + + let table = my_table(); + + let _ = inner.register_table("my_table".to_string(), table).unwrap(); + + Self { inner } + } +} + +#[pymethods] +impl FixedSchemaProvider { + #[new] + pub fn new() -> Self { + Self::default() + } + + pub fn __datafusion_schema_provider__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_schema_provider".into(); + + let provider = Arc::clone(&self.inner) as Arc; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_SchemaProvider::new_with_ffi_codec(provider, None, codec); + + PyCapsule::new(py, provider, Some(name)) + } +} + +#[async_trait] +impl SchemaProvider for FixedSchemaProvider { + fn as_any(&self) -> &dyn Any { + self + } + + fn table_names(&self) -> Vec { + self.inner.table_names() + } + + async fn table(&self, name: &str) -> Result>, DataFusionError> { + self.inner.table(name).await + } + + fn register_table( + &self, + name: String, + table: Arc, + ) -> Result>> { + self.inner.register_table(name, table) + } + + fn deregister_table(&self, name: &str) -> Result>> { + self.inner.deregister_table(name) + } + + fn table_exist(&self, name: &str) -> bool { + self.inner.table_exist(name) + } +} + +/// This catalog provider is intended only for unit tests. It prepopulates with one +/// schema and only allows for schemas named after four types of fruit. +#[pyclass( + skip_from_py_object, + name = "MyCatalogProvider", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug, Clone)] +pub(crate) struct MyCatalogProvider { + inner: Arc, +} + +impl CatalogProvider for MyCatalogProvider { + fn as_any(&self) -> &dyn Any { + self + } + + fn schema_names(&self) -> Vec { + self.inner.schema_names() + } + + fn schema(&self, name: &str) -> Option> { + self.inner.schema(name) + } + + fn register_schema( + &self, + name: &str, + schema: Arc, + ) -> Result>> { + self.inner.register_schema(name, schema) + } + + fn deregister_schema( + &self, + name: &str, + cascade: bool, + ) -> Result>> { + self.inner.deregister_schema(name, cascade) + } +} + +#[pymethods] +impl MyCatalogProvider { + #[new] + pub fn new() -> PyResult { + let inner = Arc::new(MemoryCatalogProvider::new()); + + let schema_name: &str = "my_schema"; + let _ = inner.register_schema(schema_name, Arc::new(FixedSchemaProvider::default())); + + Ok(Self { inner }) + } + + pub fn __datafusion_catalog_provider__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_catalog_provider".into(); + + let provider = Arc::clone(&self.inner) as Arc; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_CatalogProvider::new_with_ffi_codec(provider, None, codec); + + PyCapsule::new(py, provider, Some(name)) + } +} + +/// This catalog provider list is intended only for unit tests. +/// It pre-populates with a single catalog. +#[pyclass( + skip_from_py_object, + name = "MyCatalogProviderList", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug, Clone)] +pub(crate) struct MyCatalogProviderList { + inner: Arc, +} + +impl CatalogProviderList for MyCatalogProviderList { + fn as_any(&self) -> &dyn Any { + self + } + + fn catalog_names(&self) -> Vec { + self.inner.catalog_names() + } + + fn catalog(&self, name: &str) -> Option> { + self.inner.catalog(name) + } + + fn register_catalog( + &self, + name: String, + catalog: Arc, + ) -> Option> { + self.inner.register_catalog(name, catalog) + } +} + +#[pymethods] +impl MyCatalogProviderList { + #[new] + pub fn new() -> PyResult { + let inner = Arc::new(MemoryCatalogProviderList::new()); + + inner.register_catalog( + "auto_ffi_catalog".to_owned(), + Arc::new(MyCatalogProvider::new()?), + ); + + Ok(Self { inner }) + } + + pub fn __datafusion_catalog_provider_list__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_catalog_provider_list".into(); + + let provider = Arc::clone(&self.inner) as Arc; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_CatalogProviderList::new_with_ffi_codec(provider, None, codec); + + PyCapsule::new(py, provider, Some(name)) + } +} diff --git a/examples/datafusion-ffi-example/src/lib.rs b/examples/datafusion-ffi-example/src/lib.rs new file mode 100644 index 000000000..23f2001a2 --- /dev/null +++ b/examples/datafusion-ffi-example/src/lib.rs @@ -0,0 +1,48 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use pyo3::prelude::*; + +use crate::aggregate_udf::MySumUDF; +use crate::catalog_provider::{FixedSchemaProvider, MyCatalogProvider, MyCatalogProviderList}; +use crate::scalar_udf::IsNullUDF; +use crate::table_function::MyTableFunction; +use crate::table_provider::MyTableProvider; +use crate::window_udf::MyRankUDF; + +pub(crate) mod aggregate_udf; +pub(crate) mod catalog_provider; +pub(crate) mod scalar_udf; +pub(crate) mod table_function; +pub(crate) mod table_provider; +pub(crate) mod utils; +pub(crate) mod window_udf; + +#[pymodule] +fn datafusion_ffi_example(m: &Bound<'_, PyModule>) -> PyResult<()> { + pyo3_log::init(); + + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + m.add_class::()?; + Ok(()) +} diff --git a/examples/datafusion-ffi-example/src/scalar_udf.rs b/examples/datafusion-ffi-example/src/scalar_udf.rs new file mode 100644 index 000000000..374924781 --- /dev/null +++ b/examples/datafusion-ffi-example/src/scalar_udf.rs @@ -0,0 +1,97 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::sync::Arc; + +use arrow_array::{Array, BooleanArray}; +use arrow_schema::DataType; +use datafusion_common::ScalarValue; +use datafusion_common::error::Result as DataFusionResult; +use datafusion_expr::{ + ColumnarValue, ScalarFunctionArgs, ScalarUDF, ScalarUDFImpl, Signature, TypeSignature, + Volatility, +}; +use datafusion_ffi::udf::FFI_ScalarUDF; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyResult, Python, pyclass, pymethods}; + +#[pyclass( + from_py_object, + name = "IsNullUDF", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub(crate) struct IsNullUDF { + signature: Signature, +} + +#[pymethods] +impl IsNullUDF { + #[new] + fn new() -> Self { + Self { + signature: Signature::new(TypeSignature::Any(1), Volatility::Immutable), + } + } + + fn __datafusion_scalar_udf__<'py>(&self, py: Python<'py>) -> PyResult> { + let name = cr"datafusion_scalar_udf".into(); + + let func = Arc::new(ScalarUDF::from(self.clone())); + let provider = FFI_ScalarUDF::from(func); + + PyCapsule::new(py, provider, Some(name)) + } +} + +impl ScalarUDFImpl for IsNullUDF { + fn as_any(&self) -> &dyn Any { + self + } + + fn name(&self) -> &str { + "my_custom_is_null" + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn return_type(&self, _arg_types: &[DataType]) -> DataFusionResult { + Ok(DataType::Boolean) + } + + fn invoke_with_args(&self, args: ScalarFunctionArgs) -> DataFusionResult { + let input = &args.args[0]; + + Ok(match input { + ColumnarValue::Array(arr) => match arr.is_nullable() { + true => { + let nulls = arr.nulls().unwrap(); + let nulls = BooleanArray::from_iter(nulls.iter().map(|x| Some(!x))); + ColumnarValue::Array(Arc::new(nulls)) + } + false => ColumnarValue::Scalar(ScalarValue::Boolean(Some(false))), + }, + ColumnarValue::Scalar(sv) => { + ColumnarValue::Scalar(ScalarValue::Boolean(Some(sv == &ScalarValue::Null))) + } + }) + } +} diff --git a/examples/datafusion-ffi-example/src/table_function.rs b/examples/datafusion-ffi-example/src/table_function.rs new file mode 100644 index 000000000..0914e161c --- /dev/null +++ b/examples/datafusion-ffi-example/src/table_function.rs @@ -0,0 +1,66 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::sync::Arc; + +use datafusion_catalog::{TableFunctionImpl, TableProvider}; +use datafusion_common::error::Result as DataFusionResult; +use datafusion_expr::Expr; +use datafusion_ffi::udtf::FFI_TableFunction; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyAny, PyResult, Python, pyclass, pymethods}; + +use crate::table_provider::MyTableProvider; +use crate::utils::ffi_logical_codec_from_pycapsule; + +#[pyclass( + from_py_object, + name = "MyTableFunction", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug, Clone)] +pub(crate) struct MyTableFunction {} + +#[pymethods] +impl MyTableFunction { + #[new] + fn new() -> Self { + Self {} + } + + fn __datafusion_table_function__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_table_function".into(); + + let func = self.clone(); + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = FFI_TableFunction::new_with_ffi_codec(Arc::new(func), None, codec); + + PyCapsule::new(py, provider, Some(name)) + } +} + +impl TableFunctionImpl for MyTableFunction { + fn call(&self, _args: &[Expr]) -> DataFusionResult> { + let provider = MyTableProvider::new(4, 3, 2).create_table()?; + Ok(Arc::new(provider)) + } +} diff --git a/examples/datafusion-ffi-example/src/table_provider.rs b/examples/datafusion-ffi-example/src/table_provider.rs new file mode 100644 index 000000000..2c79e6ef9 --- /dev/null +++ b/examples/datafusion-ffi-example/src/table_provider.rs @@ -0,0 +1,115 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::sync::Arc; + +use arrow_array::{ArrayRef, RecordBatch}; +use arrow_schema::{DataType, Field, Schema}; +use datafusion_catalog::MemTable; +use datafusion_common::error::{DataFusionError, Result as DataFusionResult}; +use datafusion_ffi::table_provider::FFI_TableProvider; +use pyo3::exceptions::PyRuntimeError; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyAny, PyResult, Python, pyclass, pymethods}; + +use crate::utils::ffi_logical_codec_from_pycapsule; + +/// In order to provide a test that demonstrates different sized record batches, +/// the first batch will have num_rows, the second batch num_rows+1, and so on. +#[pyclass( + from_py_object, + name = "MyTableProvider", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Clone)] +pub(crate) struct MyTableProvider { + num_cols: usize, + num_rows: usize, + num_batches: usize, +} + +fn create_record_batch( + schema: &Arc, + num_cols: usize, + start_value: i32, + num_values: usize, +) -> DataFusionResult { + let end_value = start_value + num_values as i32; + let row_values: Vec = (start_value..end_value).collect(); + + let columns: Vec<_> = (0..num_cols) + .map(|_| Arc::new(arrow::array::Int32Array::from(row_values.clone())) as ArrayRef) + .collect(); + + RecordBatch::try_new(Arc::clone(schema), columns).map_err(DataFusionError::from) +} + +impl MyTableProvider { + pub fn create_table(&self) -> DataFusionResult { + let fields: Vec<_> = (0..self.num_cols) + .map(|idx| (b'A' + idx as u8) as char) + .map(|col_name| Field::new(col_name, DataType::Int32, true)) + .collect(); + + let schema = Arc::new(Schema::new(fields)); + + let batches: DataFusionResult> = (0..self.num_batches) + .map(|batch_idx| { + let start_value = batch_idx * self.num_rows; + create_record_batch( + &schema, + self.num_cols, + start_value as i32, + self.num_rows + batch_idx, + ) + }) + .collect(); + + MemTable::try_new(schema, vec![batches?]) + } +} + +#[pymethods] +impl MyTableProvider { + #[new] + pub fn new(num_cols: usize, num_rows: usize, num_batches: usize) -> Self { + Self { + num_cols, + num_rows, + num_batches, + } + } + + pub fn __datafusion_table_provider__<'py>( + &self, + py: Python<'py>, + session: Bound, + ) -> PyResult> { + let name = cr"datafusion_table_provider".into(); + + let provider = self + .create_table() + .map_err(|e: DataFusionError| PyRuntimeError::new_err(e.to_string()))?; + + let codec = ffi_logical_codec_from_pycapsule(session)?; + let provider = + FFI_TableProvider::new_with_ffi_codec(Arc::new(provider), false, None, codec); + + PyCapsule::new(py, provider, Some(name)) + } +} diff --git a/examples/datafusion-ffi-example/src/utils.rs b/examples/datafusion-ffi-example/src/utils.rs new file mode 100644 index 000000000..5f2865aa2 --- /dev/null +++ b/examples/datafusion-ffi-example/src/utils.rs @@ -0,0 +1,64 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::ptr::NonNull; + +use datafusion_ffi::proto::logical_extension_codec::FFI_LogicalExtensionCodec; +use pyo3::exceptions::PyValueError; +use pyo3::ffi::c_str; +use pyo3::prelude::{PyAnyMethods, PyCapsuleMethods}; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyAny, PyResult}; + +pub(crate) fn ffi_logical_codec_from_pycapsule( + obj: Bound, +) -> PyResult { + let attr_name = "__datafusion_logical_extension_codec__"; + let capsule = if obj.hasattr(attr_name)? { + obj.getattr(attr_name)?.call0()? + } else { + obj + }; + + let capsule = capsule.cast::()?; + validate_pycapsule(capsule, "datafusion_logical_extension_codec")?; + + let data: NonNull = capsule + .pointer_checked(Some(c_str!("datafusion_logical_extension_codec")))? + .cast(); + let codec = unsafe { data.as_ref() }; + + Ok(codec.clone()) +} + +pub(crate) fn validate_pycapsule(capsule: &Bound, name: &str) -> PyResult<()> { + let capsule_name = capsule.name()?; + if capsule_name.is_none() { + return Err(PyValueError::new_err(format!( + "Expected {name} PyCapsule to have name set." + ))); + } + + let capsule_name = unsafe { capsule_name.unwrap().as_cstr().to_str()? }; + if capsule_name != name { + return Err(PyValueError::new_err(format!( + "Expected name '{name}' in PyCapsule, instead got '{capsule_name}'" + ))); + } + + Ok(()) +} diff --git a/examples/datafusion-ffi-example/src/window_udf.rs b/examples/datafusion-ffi-example/src/window_udf.rs new file mode 100644 index 000000000..cbf179a86 --- /dev/null +++ b/examples/datafusion-ffi-example/src/window_udf.rs @@ -0,0 +1,87 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use std::any::Any; +use std::sync::Arc; + +use arrow_schema::{DataType, FieldRef}; +use datafusion_common::error::Result as DataFusionResult; +use datafusion_expr::function::{PartitionEvaluatorArgs, WindowUDFFieldArgs}; +use datafusion_expr::{PartitionEvaluator, Signature, WindowUDF, WindowUDFImpl}; +use datafusion_ffi::udwf::FFI_WindowUDF; +use datafusion_functions_window::rank::rank_udwf; +use pyo3::types::PyCapsule; +use pyo3::{Bound, PyResult, Python, pyclass, pymethods}; + +#[pyclass( + from_py_object, + name = "MyRankUDF", + module = "datafusion_ffi_example", + subclass +)] +#[derive(Debug, Clone, Eq, PartialEq, Hash)] +pub(crate) struct MyRankUDF { + inner: Arc, +} + +#[pymethods] +impl MyRankUDF { + #[new] + fn new() -> PyResult { + Ok(Self { inner: rank_udwf() }) + } + + fn __datafusion_window_udf__<'py>(&self, py: Python<'py>) -> PyResult> { + let name = cr"datafusion_window_udf".into(); + + let func = Arc::new(WindowUDF::from(self.clone())); + let provider = FFI_WindowUDF::from(func); + + PyCapsule::new(py, provider, Some(name)) + } +} + +impl WindowUDFImpl for MyRankUDF { + fn as_any(&self) -> &dyn Any { + self + } + + fn name(&self) -> &str { + "my_custom_rank" + } + + fn signature(&self) -> &Signature { + self.inner.signature() + } + + fn partition_evaluator( + &self, + partition_evaluator_args: PartitionEvaluatorArgs, + ) -> DataFusionResult> { + self.inner + .inner() + .partition_evaluator(partition_evaluator_args) + } + + fn field(&self, field_args: WindowUDFFieldArgs) -> DataFusionResult { + self.inner.inner().field(field_args) + } + + fn coerce_types(&self, arg_types: &[DataType]) -> DataFusionResult> { + self.inner.coerce_types(arg_types) + } +} diff --git a/examples/export.py b/examples/export.py index d179bf39d..c7a387bcb 100644 --- a/examples/export.py +++ b/examples/export.py @@ -17,7 +17,6 @@ import datafusion - # create a context ctx = datafusion.SessionContext() @@ -48,6 +47,6 @@ pylist = df.to_pylist() assert pylist == [{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}] -# export to Pyton dictionary of columns +# export to Python dictionary of columns pydict = df.to_pydict() assert pydict == {"a": [1, 2, 3], "b": [4, 5, 6]} diff --git a/examples/import.py b/examples/import.py index a249a1c4e..7b5ab5082 100644 --- a/examples/import.py +++ b/examples/import.py @@ -16,10 +16,9 @@ # under the License. import datafusion -import pyarrow as pa import pandas as pd import polars as pl - +import pyarrow as pa # Create a context ctx = datafusion.SessionContext() @@ -28,7 +27,7 @@ # The dictionary keys represent column names and the dictionary values # represent column values df = ctx.from_pydict({"a": [1, 2, 3], "b": [4, 5, 6]}) -assert type(df) == datafusion.DataFrame +assert type(df) is datafusion.DataFrame # Dataframe: # +---+---+ # | a | b | @@ -40,19 +39,19 @@ # Create a datafusion DataFrame from a Python list of rows df = ctx.from_pylist([{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}]) -assert type(df) == datafusion.DataFrame +assert type(df) is datafusion.DataFrame # Convert pandas DataFrame to datafusion DataFrame pandas_df = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) df = ctx.from_pandas(pandas_df) -assert type(df) == datafusion.DataFrame +assert type(df) is datafusion.DataFrame # Convert polars DataFrame to datafusion DataFrame polars_df = pl.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}) df = ctx.from_polars(polars_df) -assert type(df) == datafusion.DataFrame +assert type(df) is datafusion.DataFrame # Convert Arrow Table to datafusion DataFrame arrow_table = pa.Table.from_pydict({"a": [1, 2, 3], "b": [4, 5, 6]}) -df = ctx.from_arrow_table(arrow_table) -assert type(df) == datafusion.DataFrame +df = ctx.from_arrow(arrow_table) +assert type(df) is datafusion.DataFrame diff --git a/examples/python-udaf.py b/examples/python-udaf.py index ed705f5a9..6655edb0a 100644 --- a/examples/python-udaf.py +++ b/examples/python-udaf.py @@ -15,11 +15,10 @@ # specific language governing permissions and limitations # under the License. -import pyarrow -import pyarrow.compute import datafusion -from datafusion import udaf, Accumulator -from datafusion import col +import pyarrow as pa +import pyarrow.compute +from datafusion import Accumulator, col, udaf class MyAccumulator(Accumulator): @@ -27,25 +26,21 @@ class MyAccumulator(Accumulator): Interface of a user-defined accumulation. """ - def __init__(self): - self._sum = pyarrow.scalar(0.0) + def __init__(self) -> None: + self._sum = pa.scalar(0.0) - def update(self, values: pyarrow.Array) -> None: + def update(self, values: pa.Array) -> None: # not nice since pyarrow scalars can't be summed yet. This breaks on `None` - self._sum = pyarrow.scalar( - self._sum.as_py() + pyarrow.compute.sum(values).as_py() - ) + self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(values).as_py()) - def merge(self, states: pyarrow.Array) -> None: + def merge(self, states: pa.Array) -> None: # not nice since pyarrow scalars can't be summed yet. This breaks on `None` - self._sum = pyarrow.scalar( - self._sum.as_py() + pyarrow.compute.sum(states).as_py() - ) + self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(states).as_py()) - def state(self) -> pyarrow.Array: - return pyarrow.array([self._sum.as_py()]) + def state(self) -> pa.Array: + return pa.array([self._sum.as_py()]) - def evaluate(self) -> pyarrow.Scalar: + def evaluate(self) -> pa.Scalar: return self._sum @@ -53,17 +48,17 @@ def evaluate(self) -> pyarrow.Scalar: ctx = datafusion.SessionContext() # create a RecordBatch and a new DataFrame from it -batch = pyarrow.RecordBatch.from_arrays( - [pyarrow.array([1, 2, 3]), pyarrow.array([4, 5, 6])], +batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], names=["a", "b"], ) df = ctx.create_dataframe([[batch]]) my_udaf = udaf( MyAccumulator, - pyarrow.float64(), - pyarrow.float64(), - [pyarrow.float64()], + pa.float64(), + pa.float64(), + [pa.float64()], "stable", ) @@ -71,4 +66,4 @@ def evaluate(self) -> pyarrow.Scalar: result = df.collect()[0] -assert result.column(0) == pyarrow.array([6.0]) +assert result.column(0) == pa.array([6.0]) diff --git a/examples/python-udf-comparisons.py b/examples/python-udf-comparisons.py new file mode 100644 index 000000000..b870645a3 --- /dev/null +++ b/examples/python-udf-comparisons.py @@ -0,0 +1,185 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import time +from pathlib import Path + +import pyarrow as pa +import pyarrow.compute as pc +from datafusion import SessionContext, col, lit, udf +from datafusion import functions as F + +path = Path(__file__).parent.resolve() +filepath = path / "./tpch/data/lineitem.parquet" + +# This example serves to demonstrate alternate approaches to answering the +# question "return all of the rows that have a specific combination of these +# values". We have the combinations we care about provided as a python +# list of tuples. There is no built in function that supports this operation, +# but it can be explicitly specified via a single expression or we can +# use a user defined function. + +ctx = SessionContext() + +# These part keys and suppliers are chosen because there are +# cases where two suppliers each have two of the part keys +# but we are interested in these specific combinations. + +values_of_interest = [ + (1530, 4031, "N"), + (6530, 1531, "N"), + (5618, 619, "N"), + (8118, 8119, "N"), +] + +partkeys = [lit(r[0]) for r in values_of_interest] +suppkeys = [lit(r[1]) for r in values_of_interest] +returnflags = [lit(r[2]) for r in values_of_interest] + +df_lineitem = ctx.read_parquet(filepath).select( + "l_partkey", "l_suppkey", "l_returnflag" +) + +start_time = time.time() + +df_simple_filter = df_lineitem.filter( + F.in_list(col("l_partkey"), partkeys), + F.in_list(col("l_suppkey"), suppkeys), + F.in_list(col("l_returnflag"), returnflags), +) + +num_rows = df_simple_filter.count() +print( + f"Simple filtering has number {num_rows} rows and took {time.time() - start_time} s" +) +print("This is the incorrect number of rows!") +start_time = time.time() + +# Explicitly check for the combinations of interest. +# This works but is not scalable. + +filter_expr = ( + ( + (col("l_partkey") == values_of_interest[0][0]) + & (col("l_suppkey") == values_of_interest[0][1]) + & (col("l_returnflag") == values_of_interest[0][2]) + ) + | ( + (col("l_partkey") == values_of_interest[1][0]) + & (col("l_suppkey") == values_of_interest[1][1]) + & (col("l_returnflag") == values_of_interest[1][2]) + ) + | ( + (col("l_partkey") == values_of_interest[2][0]) + & (col("l_suppkey") == values_of_interest[2][1]) + & (col("l_returnflag") == values_of_interest[2][2]) + ) + | ( + (col("l_partkey") == values_of_interest[3][0]) + & (col("l_suppkey") == values_of_interest[3][1]) + & (col("l_returnflag") == values_of_interest[3][2]) + ) +) + +df_explicit_filter = df_lineitem.filter(filter_expr) + +num_rows = df_explicit_filter.count() +print( + f"Explicit filtering has number {num_rows} rows and took {time.time() - start_time} s" +) +start_time = time.time() + +# Instead try a python UDF + + +def is_of_interest_impl( + partkey_arr: pa.Array, + suppkey_arr: pa.Array, + returnflag_arr: pa.Array, +) -> pa.Array: + result = [] + for idx, partkey_val in enumerate(partkey_arr): + partkey = partkey_val.as_py() + suppkey = suppkey_arr[idx].as_py() + returnflag = returnflag_arr[idx].as_py() + value = (partkey, suppkey, returnflag) + result.append(value in values_of_interest) + + return pa.array(result) + + +is_of_interest = udf( + is_of_interest_impl, + [pa.int64(), pa.int64(), pa.utf8()], + pa.bool_(), + "stable", +) + +df_udf_filter = df_lineitem.filter( + is_of_interest(col("l_partkey"), col("l_suppkey"), col("l_returnflag")) +) + +num_rows = df_udf_filter.count() +print(f"UDF filtering has number {num_rows} rows and took {time.time() - start_time} s") +start_time = time.time() + +# Now use a user defined function but lean on the built in pyarrow array +# functions so we never convert rows to python objects. + +# To see other pyarrow compute functions see +# https://arrow.apache.org/docs/python/api/compute.html +# +# It is important that the number of rows in the returned array +# matches the original array, so we cannot use functions like +# filtered_partkey_arr.filter(filtered_suppkey_arr). + + +def udf_using_pyarrow_compute_impl( + partkey_arr: pa.Array, + suppkey_arr: pa.Array, + returnflag_arr: pa.Array, +) -> pa.Array: + results = None + for partkey, suppkey, returnflag in values_of_interest: + filtered_partkey_arr = pc.equal(partkey_arr, partkey) + filtered_suppkey_arr = pc.equal(suppkey_arr, suppkey) + filtered_returnflag_arr = pc.equal(returnflag_arr, returnflag) + + resultant_arr = pc.and_(filtered_partkey_arr, filtered_suppkey_arr) + resultant_arr = pc.and_(resultant_arr, filtered_returnflag_arr) + + results = resultant_arr if results is None else pc.or_(results, resultant_arr) + + return results + + +udf_using_pyarrow_compute = udf( + udf_using_pyarrow_compute_impl, + [pa.int64(), pa.int64(), pa.utf8()], + pa.bool_(), + "stable", +) + +df_udf_pyarrow_compute = df_lineitem.filter( + udf_using_pyarrow_compute(col("l_partkey"), col("l_suppkey"), col("l_returnflag")) +) + +num_rows = df_udf_pyarrow_compute.count() +print( + f"UDF filtering using pyarrow compute has number {num_rows} rows and took {time.time() - start_time} s" +) +start_time = time.time() diff --git a/examples/python-udf.py b/examples/python-udf.py index 30edd4198..1c08acd1a 100644 --- a/examples/python-udf.py +++ b/examples/python-udf.py @@ -15,22 +15,23 @@ # specific language governing permissions and limitations # under the License. -import pyarrow -from datafusion import udf, SessionContext, functions as f +import pyarrow as pa +from datafusion import SessionContext, udf +from datafusion import functions as f -def is_null(array: pyarrow.Array) -> pyarrow.Array: +def is_null(array: pa.Array) -> pa.Array: return array.is_null() -is_null_arr = udf(is_null, [pyarrow.int64()], pyarrow.bool_(), "stable") +is_null_arr = udf(is_null, [pa.int64()], pa.bool_(), "stable") # create a context ctx = SessionContext() # create a RecordBatch and a new DataFrame from it -batch = pyarrow.RecordBatch.from_arrays( - [pyarrow.array([1, 2, 3]), pyarrow.array([4, 5, 6])], +batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], names=["a", "b"], ) df = ctx.create_dataframe([[batch]]) @@ -39,4 +40,4 @@ def is_null(array: pyarrow.Array) -> pyarrow.Array: result = df.collect()[0] -assert result.column(0) == pyarrow.array([False] * 3) +assert result.column(0) == pa.array([False] * 3) diff --git a/examples/python-udwf.py b/examples/python-udwf.py new file mode 100644 index 000000000..645ded188 --- /dev/null +++ b/examples/python-udwf.py @@ -0,0 +1,274 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +import datafusion +import pyarrow as pa +from datafusion import col, lit, udwf +from datafusion import functions as f +from datafusion.expr import WindowFrame +from datafusion.user_defined import WindowEvaluator + +# This example creates five different examples of user defined window functions in order +# to demonstrate the variety of ways a user may need to implement. + + +class ExponentialSmoothDefault(WindowEvaluator): + """Create a running smooth operation across an entire partition at once.""" + + def __init__(self, alpha: float) -> None: + self.alpha = alpha + + def evaluate_all(self, values: list[pa.Array], num_rows: int) -> pa.Array: + results = [] + curr_value = 0.0 + values = values[0] + for idx in range(num_rows): + if idx == 0: + curr_value = values[idx].as_py() + else: + curr_value = values[idx].as_py() * self.alpha + curr_value * ( + 1.0 - self.alpha + ) + results.append(curr_value) + + return pa.array(results) + + +class SmoothBoundedFromPreviousRow(WindowEvaluator): + """Smooth over from the previous to current row only.""" + + def __init__(self, alpha: float) -> None: + self.alpha = alpha + + def supports_bounded_execution(self) -> bool: + return True + + def get_range(self, idx: int, num_rows: int) -> tuple[int, int]: # noqa: ARG002 + # Override the default range of current row since uses_window_frame is False + # So for the purpose of this test we just smooth from the previous row to + # current. + if idx == 0: + return (0, 0) + return (idx - 1, idx) + + def evaluate( + self, values: list[pa.Array], eval_range: tuple[int, int] + ) -> pa.Scalar: + (start, stop) = eval_range + curr_value = 0.0 + values = values[0] + for idx in range(start, stop + 1): + if idx == start: + curr_value = values[idx].as_py() + else: + curr_value = values[idx].as_py() * self.alpha + curr_value * ( + 1.0 - self.alpha + ) + return pa.scalar(curr_value).cast(pa.float64()) + + +class SmoothAcrossRank(WindowEvaluator): + """Smooth over the rank from the previous rank to current.""" + + def __init__(self, alpha: float) -> None: + self.alpha = alpha + + def include_rank(self) -> bool: + return True + + def evaluate_all_with_rank( + self, num_rows: int, ranks_in_partition: list[tuple[int, int]] + ) -> pa.Array: + results = [] + for idx in range(num_rows): + if idx == 0: + prior_value = 1.0 + matching_row = [ + i + for i in range(len(ranks_in_partition)) + if ranks_in_partition[i][0] <= idx and ranks_in_partition[i][1] > idx + ][0] + 1 + curr_value = matching_row * self.alpha + prior_value * (1.0 - self.alpha) + results.append(curr_value) + prior_value = matching_row + + return pa.array(results) + + +class ExponentialSmoothFrame(WindowEvaluator): + "Find the value across an entire frame using exponential smoothing" + + def __init__(self, alpha: float) -> None: + self.alpha = alpha + + def uses_window_frame(self) -> bool: + return True + + def evaluate( + self, values: list[pa.Array], eval_range: tuple[int, int] + ) -> pa.Scalar: + (start, stop) = eval_range + curr_value = 0.0 + if len(values) > 1: + order_by = values[1] # noqa: F841 + values = values[0] + else: + values = values[0] + for idx in range(start, stop): + if idx == start: + curr_value = values[idx].as_py() + else: + curr_value = values[idx].as_py() * self.alpha + curr_value * ( + 1.0 - self.alpha + ) + return pa.scalar(curr_value).cast(pa.float64()) + + +class SmoothTwoColumn(WindowEvaluator): + """Smooth once column based on a condition of another column. + + If the second column is above a threshold, then smooth over the first column from + the previous and next rows. + """ + + def __init__(self, alpha: float) -> None: + self.alpha = alpha + + def evaluate_all(self, values: list[pa.Array], num_rows: int) -> pa.Array: + results = [] + values_a = values[0] + values_b = values[1] + for idx in range(num_rows): + if not values_b[idx].is_valid: + if idx == 0: + results.append(values_a[1].cast(pa.float64())) + elif idx == num_rows - 1: + results.append(values_a[num_rows - 2].cast(pa.float64())) + else: + results.append( + pa.scalar( + values_a[idx - 1].as_py() * self.alpha + + values_a[idx + 1].as_py() * (1.0 - self.alpha) + ) + ) + else: + results.append(values_a[idx].cast(pa.float64())) + + return pa.array(results) + + +# create a context +ctx = datafusion.SessionContext() + +# create a RecordBatch and a new DataFrame from it +batch = pa.RecordBatch.from_arrays( + [ + pa.array([1.0, 2.1, 2.9, 4.0, 5.1, 6.0, 6.9, 8.0]), + pa.array([1, 2, None, 4, 5, 6, None, 8]), + pa.array(["A", "A", "A", "A", "A", "B", "B", "B"]), + ], + names=["a", "b", "c"], +) +df = ctx.create_dataframe([[batch]]) + +exp_smooth = udwf( + lambda: ExponentialSmoothDefault(0.9), + pa.float64(), + pa.float64(), + volatility="immutable", +) + +smooth_two_row = udwf( + lambda: SmoothBoundedFromPreviousRow(0.9), + pa.float64(), + pa.float64(), + volatility="immutable", +) + +smooth_rank = udwf( + lambda: SmoothAcrossRank(0.9), + pa.float64(), + pa.float64(), + volatility="immutable", +) + +smooth_frame = udwf( + lambda: ExponentialSmoothFrame(0.9), + pa.float64(), + pa.float64(), + volatility="immutable", + name="smooth_frame", +) + +smooth_two_col = udwf( + lambda: SmoothTwoColumn(0.9), + [pa.float64(), pa.int64()], + pa.float64(), + volatility="immutable", +) + +# These are done with separate statements instead of one large `select` because that will +# attempt to combine the window operations and our defined UDFs do not all support that. +( + df.with_column("exp_smooth", exp_smooth(col("a"))) + .with_column("smooth_prior_row", smooth_two_row(col("a"))) + .with_column("smooth_rank", smooth_rank(col("a")).order_by(col("c")).build()) + .with_column("smooth_two_col", smooth_two_col(col("a"), col("b"))) + .with_column( + "smooth_frame", + smooth_frame(col("a")).window_frame(WindowFrame("rows", None, 0)).build(), + ) + .select( + "a", + "b", + "c", + "exp_smooth", + "smooth_prior_row", + "smooth_rank", + "smooth_two_col", + "smooth_frame", + ) +).show() + +assert df.select(f.round(exp_smooth(col("a")), lit(3))).collect()[0].column( + 0 +) == pa.array([1, 1.99, 2.809, 3.881, 4.978, 5.898, 6.8, 7.88]) + + +assert df.select(f.round(smooth_two_row(col("a")), lit(3))).collect()[0].column( + 0 +) == pa.array([1.0, 1.99, 2.82, 3.89, 4.99, 5.91, 6.81, 7.89]) + + +assert df.select(smooth_rank(col("a")).order_by(col("c")).build()).collect()[0].column( + 0 +) == pa.array([1, 1, 1, 1, 1, 1.9, 2.0, 2.0]) + + +assert df.select(smooth_two_col(col("a"), col("b"))).collect()[0].column(0) == pa.array( + [1, 2.1, 2.29, 4, 5.1, 6, 6.2, 8.0] +) + + +assert df.select( + f.round( + smooth_frame(col("a")).window_frame(WindowFrame("rows", None, 0)).build(), + lit(3), + ) +).collect()[0].column(0) == pa.array([1, 1.99, 2.809, 3.881, 4.978, 5.898, 6.8, 7.88]) diff --git a/examples/query-pyarrow-data.py b/examples/query-pyarrow-data.py index 83e6884a7..9cfe8a62b 100644 --- a/examples/query-pyarrow-data.py +++ b/examples/query-pyarrow-data.py @@ -16,16 +16,15 @@ # under the License. import datafusion +import pyarrow as pa from datafusion import col -import pyarrow - # create a context ctx = datafusion.SessionContext() # create a RecordBatch and a new DataFrame from it -batch = pyarrow.RecordBatch.from_arrays( - [pyarrow.array([1, 2, 3]), pyarrow.array([4, 5, 6])], +batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], names=["a", "b"], ) df = ctx.create_dataframe([[batch]]) @@ -39,5 +38,5 @@ # execute and collect the first (and only) batch result = df.collect()[0] -assert result.column(0) == pyarrow.array([5, 7, 9]) -assert result.column(1) == pyarrow.array([-3, -3, -3]) +assert result.column(0) == pa.array([5, 7, 9]) +assert result.column(1) == pa.array([-3, -3, -3]) diff --git a/examples/sql-parquet-s3.py b/examples/sql-parquet-s3.py index bd7da5e20..866e2ac68 100644 --- a/examples/sql-parquet-s3.py +++ b/examples/sql-parquet-s3.py @@ -16,6 +16,7 @@ # under the License. import os + import datafusion from datafusion.object_store import AmazonS3 @@ -31,7 +32,7 @@ ctx = datafusion.SessionContext() path = f"s3://{bucket_name}/" -ctx.register_object_store(path, s3) +ctx.register_object_store("s3://", s3, None) ctx.register_parquet("trips", path) diff --git a/examples/sql-to-pandas.py b/examples/sql-to-pandas.py index 3e99b22de..34f7bde1b 100644 --- a/examples/sql-to-pandas.py +++ b/examples/sql-to-pandas.py @@ -17,7 +17,6 @@ from datafusion import SessionContext - # Create a DataFusion context ctx = SessionContext() diff --git a/examples/sql-using-python-udaf.py b/examples/sql-using-python-udaf.py index 7ccf5d3cb..f42bbdc23 100644 --- a/examples/sql-using-python-udaf.py +++ b/examples/sql-using-python-udaf.py @@ -15,8 +15,8 @@ # specific language governing permissions and limitations # under the License. -from datafusion import udaf, SessionContext, Accumulator import pyarrow as pa +from datafusion import Accumulator, SessionContext, udaf # Define a user-defined aggregation function (UDAF) @@ -25,19 +25,19 @@ class MyAccumulator(Accumulator): Interface of a user-defined accumulation. """ - def __init__(self): + def __init__(self) -> None: self._sum = pa.scalar(0.0) - def update(self, values: pa.Array) -> None: + def update(self, values: list[pa.Array]) -> None: # not nice since pyarrow scalars can't be summed yet. This breaks on `None` self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(values).as_py()) def merge(self, states: pa.Array) -> None: # not nice since pyarrow scalars can't be summed yet. This breaks on `None` - self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(states).as_py()) + self._sum = pa.scalar(self._sum.as_py() + pa.compute.sum(states[0]).as_py()) - def state(self) -> pa.Array: - return pa.array([self._sum.as_py()]) + def state(self) -> list[pa.Array]: + return [self._sum] def evaluate(self) -> pa.Scalar: return self._sum diff --git a/examples/sql-using-python-udf.py b/examples/sql-using-python-udf.py index d6bbe3ab0..2f0a0b67d 100644 --- a/examples/sql-using-python-udf.py +++ b/examples/sql-using-python-udf.py @@ -15,8 +15,8 @@ # specific language governing permissions and limitations # under the License. -from datafusion import udf, SessionContext import pyarrow as pa +from datafusion import SessionContext, udf # Define a user-defined function (UDF) diff --git a/examples/substrait.py b/examples/substrait.py index 23cd74649..fa6f77912 100644 --- a/examples/substrait.py +++ b/examples/substrait.py @@ -18,16 +18,13 @@ from datafusion import SessionContext from datafusion import substrait as ss - # Create a DataFusion context ctx = SessionContext() # Register table with context ctx.register_csv("aggregate_test_data", "./testing/data/csv/aggregate_test_100.csv") -substrait_plan = ss.substrait.serde.serialize_to_plan( - "SELECT * FROM aggregate_test_data", ctx -) +substrait_plan = ss.Serde.serialize_to_plan("SELECT * FROM aggregate_test_data", ctx) # type(substrait_plan) -> # Encode it to bytes @@ -38,17 +35,15 @@ # Alternative serialization approaches # type(substrait_bytes) -> , at this point the bytes can be distributed to file, network, etc safely # where they could subsequently be deserialized on the receiving end. -substrait_bytes = ss.substrait.serde.serialize_bytes( - "SELECT * FROM aggregate_test_data", ctx -) +substrait_bytes = ss.Serde.serialize_bytes("SELECT * FROM aggregate_test_data", ctx) # Imagine here bytes would be read from network, file, etc ... for example brevity this is omitted and variable is simply reused # type(substrait_plan) -> -substrait_plan = ss.substrait.serde.deserialize_bytes(substrait_bytes) +substrait_plan = ss.Serde.deserialize_bytes(substrait_bytes) # type(df_logical_plan) -> -df_logical_plan = ss.substrait.consumer.from_substrait_plan(ctx, substrait_plan) +df_logical_plan = ss.Consumer.from_substrait_plan(ctx, substrait_plan) # Back to Substrait Plan just for demonstration purposes # type(substrait_plan) -> -substrait_plan = ss.substrait.producer.to_substrait_plan(df_logical_plan) +substrait_plan = ss.Producer.to_substrait_plan(df_logical_plan, ctx) diff --git a/examples/tpch/_tests.py b/examples/tpch/_tests.py index 8804041b1..780fcf5e5 100644 --- a/examples/tpch/_tests.py +++ b/examples/tpch/_tests.py @@ -15,38 +15,39 @@ # specific language governing permissions and limitations # under the License. -import pytest from importlib import import_module + import pyarrow as pa -from datafusion import col, lit, functions as F +import pytest +from datafusion import DataFrame, col, lit +from datafusion import functions as F from util import get_answer_file def df_selection(col_name, col_type): - if col_type == pa.float64() or isinstance(col_type, pa.Decimal128Type): + if col_type == pa.float64(): return F.round(col(col_name), lit(2)).alias(col_name) - elif col_type == pa.string(): + if isinstance(col_type, pa.Decimal128Type): + return F.round(col(col_name).cast(pa.float64()), lit(2)).alias(col_name) + if col_type == pa.string() or col_type == pa.string_view(): return F.trim(col(col_name)).alias(col_name) - else: - return col(col_name) + return col(col_name) def load_schema(col_name, col_type): if col_type == pa.int64() or col_type == pa.int32(): return col_name, pa.string() - elif isinstance(col_type, pa.Decimal128Type): + if isinstance(col_type, pa.Decimal128Type): return col_name, pa.float64() - else: - return col_name, col_type + return col_name, col_type def expected_selection(col_name, col_type): if col_type == pa.int64() or col_type == pa.int32(): return F.trim(col(col_name)).cast(col_type).alias(col_name) - elif col_type == pa.string(): + if col_type == pa.string() or col_type == pa.string_view(): return F.trim(col(col_name)).alias(col_name) - else: - return col(col_name) + return col(col_name) def selections_and_schema(original_schema): @@ -92,12 +93,13 @@ def check_q17(df): ("q22_global_sales_opportunity", "q22"), ], ) -def test_tpch_query_vs_answer_file(query_code: str, answer_file: str): +def test_tpch_query_vs_answer_file(query_code: str, answer_file: str) -> None: module = import_module(query_code) - df = module.df + df: DataFrame = module.df - # Treat q17 as a special case. The answer file does not match the spec. Running at - # scale factor 1, we have manually verified this result does match the expected value. + # Treat q17 as a special case. The answer file does not match the spec. + # Running at scale factor 1, we have manually verified this result does + # match the expected value. if answer_file == "q17": return check_q17(df) @@ -120,5 +122,7 @@ def test_tpch_query_vs_answer_file(query_code: str, answer_file: str): cols = list(read_schema.names) - assert df.join(df_expected, (cols, cols), "anti").count() == 0 + assert df.join(df_expected, on=cols, how="anti").count() == 0 assert df.count() == df_expected.count() + + return None diff --git a/examples/tpch/convert_data_to_parquet.py b/examples/tpch/convert_data_to_parquet.py index a8091a708..af554c39e 100644 --- a/examples/tpch/convert_data_to_parquet.py +++ b/examples/tpch/convert_data_to_parquet.py @@ -22,122 +22,121 @@ as will be generated by the script provided in this repository. """ -import os -import pyarrow +from pathlib import Path + import datafusion +import pyarrow as pa ctx = datafusion.SessionContext() all_schemas = {} all_schemas["customer"] = [ - ("C_CUSTKEY", pyarrow.int64()), - ("C_NAME", pyarrow.string()), - ("C_ADDRESS", pyarrow.string()), - ("C_NATIONKEY", pyarrow.int64()), - ("C_PHONE", pyarrow.string()), - ("C_ACCTBAL", pyarrow.decimal128(15, 2)), - ("C_MKTSEGMENT", pyarrow.string()), - ("C_COMMENT", pyarrow.string()), + ("C_CUSTKEY", pa.int64()), + ("C_NAME", pa.string()), + ("C_ADDRESS", pa.string()), + ("C_NATIONKEY", pa.int64()), + ("C_PHONE", pa.string()), + ("C_ACCTBAL", pa.decimal128(15, 2)), + ("C_MKTSEGMENT", pa.string()), + ("C_COMMENT", pa.string()), ] all_schemas["lineitem"] = [ - ("L_ORDERKEY", pyarrow.int64()), - ("L_PARTKEY", pyarrow.int64()), - ("L_SUPPKEY", pyarrow.int64()), - ("L_LINENUMBER", pyarrow.int32()), - ("L_QUANTITY", pyarrow.decimal128(15, 2)), - ("L_EXTENDEDPRICE", pyarrow.decimal128(15, 2)), - ("L_DISCOUNT", pyarrow.decimal128(15, 2)), - ("L_TAX", pyarrow.decimal128(15, 2)), - ("L_RETURNFLAG", pyarrow.string()), - ("L_LINESTATUS", pyarrow.string()), - ("L_SHIPDATE", pyarrow.date32()), - ("L_COMMITDATE", pyarrow.date32()), - ("L_RECEIPTDATE", pyarrow.date32()), - ("L_SHIPINSTRUCT", pyarrow.string()), - ("L_SHIPMODE", pyarrow.string()), - ("L_COMMENT", pyarrow.string()), + ("L_ORDERKEY", pa.int64()), + ("L_PARTKEY", pa.int64()), + ("L_SUPPKEY", pa.int64()), + ("L_LINENUMBER", pa.int32()), + ("L_QUANTITY", pa.decimal128(15, 2)), + ("L_EXTENDEDPRICE", pa.decimal128(15, 2)), + ("L_DISCOUNT", pa.decimal128(15, 2)), + ("L_TAX", pa.decimal128(15, 2)), + ("L_RETURNFLAG", pa.string()), + ("L_LINESTATUS", pa.string()), + ("L_SHIPDATE", pa.date32()), + ("L_COMMITDATE", pa.date32()), + ("L_RECEIPTDATE", pa.date32()), + ("L_SHIPINSTRUCT", pa.string()), + ("L_SHIPMODE", pa.string()), + ("L_COMMENT", pa.string()), ] all_schemas["nation"] = [ - ("N_NATIONKEY", pyarrow.int64()), - ("N_NAME", pyarrow.string()), - ("N_REGIONKEY", pyarrow.int64()), - ("N_COMMENT", pyarrow.string()), + ("N_NATIONKEY", pa.int64()), + ("N_NAME", pa.string()), + ("N_REGIONKEY", pa.int64()), + ("N_COMMENT", pa.string()), ] all_schemas["orders"] = [ - ("O_ORDERKEY", pyarrow.int64()), - ("O_CUSTKEY", pyarrow.int64()), - ("O_ORDERSTATUS", pyarrow.string()), - ("O_TOTALPRICE", pyarrow.decimal128(15, 2)), - ("O_ORDERDATE", pyarrow.date32()), - ("O_ORDERPRIORITY", pyarrow.string()), - ("O_CLERK", pyarrow.string()), - ("O_SHIPPRIORITY", pyarrow.int32()), - ("O_COMMENT", pyarrow.string()), + ("O_ORDERKEY", pa.int64()), + ("O_CUSTKEY", pa.int64()), + ("O_ORDERSTATUS", pa.string()), + ("O_TOTALPRICE", pa.decimal128(15, 2)), + ("O_ORDERDATE", pa.date32()), + ("O_ORDERPRIORITY", pa.string()), + ("O_CLERK", pa.string()), + ("O_SHIPPRIORITY", pa.int32()), + ("O_COMMENT", pa.string()), ] all_schemas["part"] = [ - ("P_PARTKEY", pyarrow.int64()), - ("P_NAME", pyarrow.string()), - ("P_MFGR", pyarrow.string()), - ("P_BRAND", pyarrow.string()), - ("P_TYPE", pyarrow.string()), - ("P_SIZE", pyarrow.int32()), - ("P_CONTAINER", pyarrow.string()), - ("P_RETAILPRICE", pyarrow.decimal128(15, 2)), - ("P_COMMENT", pyarrow.string()), + ("P_PARTKEY", pa.int64()), + ("P_NAME", pa.string()), + ("P_MFGR", pa.string()), + ("P_BRAND", pa.string()), + ("P_TYPE", pa.string()), + ("P_SIZE", pa.int32()), + ("P_CONTAINER", pa.string()), + ("P_RETAILPRICE", pa.decimal128(15, 2)), + ("P_COMMENT", pa.string()), ] all_schemas["partsupp"] = [ - ("PS_PARTKEY", pyarrow.int64()), - ("PS_SUPPKEY", pyarrow.int64()), - ("PS_AVAILQTY", pyarrow.int32()), - ("PS_SUPPLYCOST", pyarrow.decimal128(15, 2)), - ("PS_COMMENT", pyarrow.string()), + ("PS_PARTKEY", pa.int64()), + ("PS_SUPPKEY", pa.int64()), + ("PS_AVAILQTY", pa.int32()), + ("PS_SUPPLYCOST", pa.decimal128(15, 2)), + ("PS_COMMENT", pa.string()), ] all_schemas["region"] = [ - ("r_REGIONKEY", pyarrow.int64()), - ("r_NAME", pyarrow.string()), - ("r_COMMENT", pyarrow.string()), + ("r_REGIONKEY", pa.int64()), + ("r_NAME", pa.string()), + ("r_COMMENT", pa.string()), ] all_schemas["supplier"] = [ - ("S_SUPPKEY", pyarrow.int64()), - ("S_NAME", pyarrow.string()), - ("S_ADDRESS", pyarrow.string()), - ("S_NATIONKEY", pyarrow.int32()), - ("S_PHONE", pyarrow.string()), - ("S_ACCTBAL", pyarrow.decimal128(15, 2)), - ("S_COMMENT", pyarrow.string()), + ("S_SUPPKEY", pa.int64()), + ("S_NAME", pa.string()), + ("S_ADDRESS", pa.string()), + ("S_NATIONKEY", pa.int32()), + ("S_PHONE", pa.string()), + ("S_ACCTBAL", pa.decimal128(15, 2)), + ("S_COMMENT", pa.string()), ] -curr_dir = os.path.dirname(os.path.abspath(__file__)) -for filename, curr_schema in all_schemas.items(): +curr_dir = Path(__file__).resolve().parent +for filename, curr_schema_val in all_schemas.items(): # For convenience, go ahead and convert the schema column names to lowercase - curr_schema = [(s[0].lower(), s[1]) for s in curr_schema] + curr_schema = [(s[0].lower(), s[1]) for s in curr_schema_val] # Pre-collect the output columns so we can ignore the null field we add # in to handle the trailing | in the file output_cols = [r[0] for r in curr_schema] - curr_schema = [pyarrow.field(r[0], r[1], nullable=False) for r in curr_schema] + curr_schema = [pa.field(r[0], r[1], nullable=False) for r in curr_schema] # Trailing | requires extra field for in processing - curr_schema.append(("some_null", pyarrow.null())) + curr_schema.append(("some_null", pa.null())) - schema = pyarrow.schema(curr_schema) + schema = pa.schema(curr_schema) - source_file = os.path.abspath( - os.path.join(curr_dir, f"../../benchmarks/tpch/data/{filename}.csv") - ) - dest_file = os.path.abspath(os.path.join(curr_dir, f"./data/{filename}.parquet")) + source_file = (curr_dir / f"../../benchmarks/tpch/data/{filename}.csv").resolve() + dest_file = (curr_dir / f"./data/{filename}.parquet").resolve() df = ctx.read_csv(source_file, schema=schema, has_header=False, delimiter="|") - df = df.select_columns(*output_cols) + df = df.select(*output_cols) df.write_parquet(dest_file, compression="snappy") diff --git a/examples/tpch/q01_pricing_summary_report.py b/examples/tpch/q01_pricing_summary_report.py index cb9485a7a..3f97f00dc 100644 --- a/examples/tpch/q01_pricing_summary_report.py +++ b/examples/tpch/q01_pricing_summary_report.py @@ -30,7 +30,8 @@ """ import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path ctx = SessionContext() diff --git a/examples/tpch/q02_minimum_cost_supplier.py b/examples/tpch/q02_minimum_cost_supplier.py index f4020d7bb..7390d0892 100644 --- a/examples/tpch/q02_minimum_cost_supplier.py +++ b/examples/tpch/q02_minimum_cost_supplier.py @@ -30,7 +30,8 @@ """ import datafusion -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path # This is the part we're looking for. Values selected here differ from the spec in order to run @@ -43,10 +44,10 @@ ctx = SessionContext() -df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns( +df_part = ctx.read_parquet(get_data_path("part.parquet")).select( "p_partkey", "p_mfgr", "p_type", "p_size" ) -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_acctbal", "s_name", "s_address", @@ -55,13 +56,13 @@ "s_nationkey", "s_suppkey", ) -df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select_columns( +df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select( "ps_partkey", "ps_suppkey", "ps_supplycost" ) -df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns( +df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select( "n_nationkey", "n_regionkey", "n_name" ) -df_region = ctx.read_parquet(get_data_path("region.parquet")).select_columns( +df_region = ctx.read_parquet(get_data_path("region.parquet")).select( "r_regionkey", "r_name" ) @@ -80,23 +81,27 @@ # Now that we have the region, find suppliers in that region. Suppliers are tied to their nation # and nations are tied to the region. -df_nation = df_nation.join(df_region, (["n_regionkey"], ["r_regionkey"]), how="inner") +df_nation = df_nation.join( + df_region, left_on=["n_regionkey"], right_on=["r_regionkey"], how="inner" +) df_supplier = df_supplier.join( - df_nation, (["s_nationkey"], ["n_nationkey"]), how="inner" + df_nation, left_on=["s_nationkey"], right_on=["n_nationkey"], how="inner" ) # Now that we know who the potential suppliers are for the part, we can limit out part # supplies table down. We can further join down to the specific parts we've identified # as matching the request -df = df_partsupp.join(df_supplier, (["ps_suppkey"], ["s_suppkey"]), how="inner") +df = df_partsupp.join( + df_supplier, left_on=["ps_suppkey"], right_on=["s_suppkey"], how="inner" +) # Locate the minimum cost across all suppliers. There are multiple ways you could do this, # but one way is to create a window function across all suppliers, find the minimum, and # create a column of that value. We can then filter down any rows for which the cost and # minimum do not match. -# The default window frame as of 5/6/2024 is from unbounded preceeding to the current row. +# The default window frame as of 5/6/2024 is from unbounded preceding to the current row. # We want to evaluate the entire data frame, so we specify this. window_frame = datafusion.WindowFrame("rows", None, None) df = df.with_column( @@ -111,11 +116,11 @@ df = df.filter(col("min_cost") == col("ps_supplycost")) -df = df.join(df_part, (["ps_partkey"], ["p_partkey"]), how="inner") +df = df.join(df_part, left_on=["ps_partkey"], right_on=["p_partkey"], how="inner") # From the problem statement, these are the values we wish to output -df = df.select_columns( +df = df.select( "s_acctbal", "s_name", "n_name", diff --git a/examples/tpch/q03_shipping_priority.py b/examples/tpch/q03_shipping_priority.py index 6a4886d83..fc1231e0a 100644 --- a/examples/tpch/q03_shipping_priority.py +++ b/examples/tpch/q03_shipping_priority.py @@ -27,7 +27,8 @@ as part of their TPC Benchmark H Specification revision 2.18.0. """ -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path SEGMENT_OF_INTEREST = "BUILDING" @@ -37,13 +38,13 @@ ctx = SessionContext() -df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns( +df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select( "c_mktsegment", "c_custkey" ) -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_orderdate", "o_shippriority", "o_custkey", "o_orderkey" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_orderkey", "l_extendedprice", "l_discount", "l_shipdate" ) @@ -55,9 +56,9 @@ # Join all 3 dataframes -df = df_customer.join(df_orders, (["c_custkey"], ["o_custkey"]), how="inner").join( - df_lineitem, (["o_orderkey"], ["l_orderkey"]), how="inner" -) +df = df_customer.join( + df_orders, left_on=["c_custkey"], right_on=["o_custkey"], how="inner" +).join(df_lineitem, left_on=["o_orderkey"], right_on=["l_orderkey"], how="inner") # Compute the revenue @@ -80,7 +81,7 @@ # Change the order that the columns are reported in just to match the spec -df = df.select_columns("l_orderkey", "revenue", "o_orderdate", "o_shippriority") +df = df.select("l_orderkey", "revenue", "o_orderdate", "o_shippriority") # Show result diff --git a/examples/tpch/q04_order_priority_checking.py b/examples/tpch/q04_order_priority_checking.py index 9dbd81674..426338aea 100644 --- a/examples/tpch/q04_order_priority_checking.py +++ b/examples/tpch/q04_order_priority_checking.py @@ -27,8 +27,10 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path # Ideally we could put 3 months into the interval. See note below. @@ -39,10 +41,10 @@ ctx = SessionContext() -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_orderdate", "o_orderpriority", "o_orderkey" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_orderkey", "l_commitdate", "l_receiptdate" ) @@ -53,9 +55,9 @@ # Limit results to cases where commitment date before receipt date # Aggregate the results so we only get one row to join with the order table. -# Alterately, and likely more idomatic is instead of `.aggregate` you could -# do `.select_columns("l_orderkey").distinct()`. The goal here is to show -# mulitple examples of how to use Data Fusion. +# Alternately, and likely more idiomatic is instead of `.aggregate` you could +# do `.select("l_orderkey").distinct()`. The goal here is to show +# multiple examples of how to use Data Fusion. df_lineitem = df_lineitem.filter(col("l_commitdate") < col("l_receiptdate")).aggregate( [col("l_orderkey")], [] ) @@ -66,7 +68,9 @@ ) # Perform the join to find only orders for which there are lineitems outside of expected range -df = df_orders.join(df_lineitem, (["o_orderkey"], ["l_orderkey"]), how="inner") +df = df_orders.join( + df_lineitem, left_on=["o_orderkey"], right_on=["l_orderkey"], how="inner" +) # Based on priority, find the number of entries df = df.aggregate( diff --git a/examples/tpch/q05_local_supplier_volume.py b/examples/tpch/q05_local_supplier_volume.py index f17f600a4..fa2b01dea 100644 --- a/examples/tpch/q05_local_supplier_volume.py +++ b/examples/tpch/q05_local_supplier_volume.py @@ -30,11 +30,12 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path - DATE_OF_INTEREST = "1994-01-01" INTERVAL_DAYS = 365 REGION_OF_INTEREST = "ASIA" @@ -47,22 +48,22 @@ ctx = SessionContext() -df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns( +df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select( "c_custkey", "c_nationkey" ) -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_custkey", "o_orderkey", "o_orderdate" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_orderkey", "l_suppkey", "l_extendedprice", "l_discount" ) -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_suppkey", "s_nationkey" ) -df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns( +df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select( "n_nationkey", "n_regionkey", "n_name" ) -df_region = ctx.read_parquet(get_data_path("region.parquet")).select_columns( +df_region = ctx.read_parquet(get_data_path("region.parquet")).select( "r_regionkey", "r_name" ) @@ -76,15 +77,18 @@ # Join all the dataframes df = ( - df_customer.join(df_orders, (["c_custkey"], ["o_custkey"]), how="inner") - .join(df_lineitem, (["o_orderkey"], ["l_orderkey"]), how="inner") + df_customer.join( + df_orders, left_on=["c_custkey"], right_on=["o_custkey"], how="inner" + ) + .join(df_lineitem, left_on=["o_orderkey"], right_on=["l_orderkey"], how="inner") .join( df_supplier, - (["l_suppkey", "c_nationkey"], ["s_suppkey", "s_nationkey"]), + left_on=["l_suppkey", "c_nationkey"], + right_on=["s_suppkey", "s_nationkey"], how="inner", ) - .join(df_nation, (["s_nationkey"], ["n_nationkey"]), how="inner") - .join(df_region, (["n_regionkey"], ["r_regionkey"]), how="inner") + .join(df_nation, left_on=["s_nationkey"], right_on=["n_nationkey"], how="inner") + .join(df_region, left_on=["n_regionkey"], right_on=["r_regionkey"], how="inner") ) # Compute the final result diff --git a/examples/tpch/q06_forecasting_revenue_change.py b/examples/tpch/q06_forecasting_revenue_change.py index ec98aaf5e..1de5848b1 100644 --- a/examples/tpch/q06_forecasting_revenue_change.py +++ b/examples/tpch/q06_forecasting_revenue_change.py @@ -30,8 +30,10 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path # Variables from the example query @@ -51,7 +53,7 @@ ctx = SessionContext() -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_shipdate", "l_quantity", "l_extendedprice", "l_discount" ) @@ -82,5 +84,5 @@ revenue = df.collect()[0]["revenue"][0].as_py() -# Note: the output value from this query may be dependant on the size of the database generated +# Note: the output value from this query may be dependent on the size of the database generated print(f"Potential lost revenue: {revenue:.2f}") diff --git a/examples/tpch/q07_volume_shipping.py b/examples/tpch/q07_volume_shipping.py index fd7323b79..ff2f891f1 100644 --- a/examples/tpch/q07_volume_shipping.py +++ b/examples/tpch/q07_volume_shipping.py @@ -29,8 +29,10 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path # Variables of interest to query over @@ -49,19 +51,19 @@ ctx = SessionContext() -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_suppkey", "s_nationkey" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_shipdate", "l_extendedprice", "l_discount", "l_suppkey", "l_orderkey" ) -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_orderkey", "o_custkey" ) -df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns( +df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select( "c_custkey", "c_nationkey" ) -df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns( +df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select( "n_nationkey", "n_name" ) @@ -77,8 +79,8 @@ # the two nations of interest. Since there is no `otherwise()` statement, any values that do # not match these will result in a null value and then get filtered out. # -# To do the same using a simle filter would be: -# df_nation = df_nation.filter((F.col("n_name") == nation_1) | (F.col("n_name") == nation_2)) +# To do the same using a simple filter would be: +# df_nation = df_nation.filter((F.col("n_name") == nation_1) | (F.col("n_name") == nation_2)) # noqa: ERA001 df_nation = df_nation.with_column( "n_name", F.case(col("n_name")) @@ -90,20 +92,22 @@ # Limit suppliers to either nation df_supplier = df_supplier.join( - df_nation, (["s_nationkey"], ["n_nationkey"]), how="inner" + df_nation, left_on=["s_nationkey"], right_on=["n_nationkey"], how="inner" ).select(col("s_suppkey"), col("n_name").alias("supp_nation")) # Limit customers to either nation df_customer = df_customer.join( - df_nation, (["c_nationkey"], ["n_nationkey"]), how="inner" + df_nation, left_on=["c_nationkey"], right_on=["n_nationkey"], how="inner" ).select(col("c_custkey"), col("n_name").alias("cust_nation")) # Join up all the data frames from line items, and make sure the supplier and customer are in # different nations. df = ( - df_lineitem.join(df_orders, (["l_orderkey"], ["o_orderkey"]), how="inner") - .join(df_customer, (["o_custkey"], ["c_custkey"]), how="inner") - .join(df_supplier, (["l_suppkey"], ["s_suppkey"]), how="inner") + df_lineitem.join( + df_orders, left_on=["l_orderkey"], right_on=["o_orderkey"], how="inner" + ) + .join(df_customer, left_on=["o_custkey"], right_on=["c_custkey"], how="inner") + .join(df_supplier, left_on=["l_suppkey"], right_on=["s_suppkey"], how="inner") .filter(col("cust_nation") != col("supp_nation")) ) diff --git a/examples/tpch/q08_market_share.py b/examples/tpch/q08_market_share.py index cd6bc1fa9..4bf50efba 100644 --- a/examples/tpch/q08_market_share.py +++ b/examples/tpch/q08_market_share.py @@ -28,8 +28,10 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path supplier_nation = lit("BRAZIL") @@ -47,25 +49,23 @@ ctx = SessionContext() -df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns( - "p_partkey", "p_type" -) -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_part = ctx.read_parquet(get_data_path("part.parquet")).select("p_partkey", "p_type") +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_suppkey", "s_nationkey" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_partkey", "l_extendedprice", "l_discount", "l_suppkey", "l_orderkey" ) -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_orderkey", "o_custkey", "o_orderdate" ) -df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns( +df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select( "c_custkey", "c_nationkey" ) -df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns( +df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select( "n_nationkey", "n_name", "n_regionkey" ) -df_region = ctx.read_parquet(get_data_path("region.parquet")).select_columns( +df_region = ctx.read_parquet(get_data_path("region.parquet")).select( "r_regionkey", "r_name" ) @@ -91,27 +91,27 @@ # After this join we have all of the possible sales nations df_regional_customers = df_regional_customers.join( - df_nation, (["r_regionkey"], ["n_regionkey"]), how="inner" + df_nation, left_on=["r_regionkey"], right_on=["n_regionkey"], how="inner" ) # Now find the possible customers df_regional_customers = df_regional_customers.join( - df_customer, (["n_nationkey"], ["c_nationkey"]), how="inner" + df_customer, left_on=["n_nationkey"], right_on=["c_nationkey"], how="inner" ) # Next find orders for these customers df_regional_customers = df_regional_customers.join( - df_orders, (["c_custkey"], ["o_custkey"]), how="inner" + df_orders, left_on=["c_custkey"], right_on=["o_custkey"], how="inner" ) # Find all line items from these orders df_regional_customers = df_regional_customers.join( - df_lineitem, (["o_orderkey"], ["l_orderkey"]), how="inner" + df_lineitem, left_on=["o_orderkey"], right_on=["l_orderkey"], how="inner" ) # Limit to the part of interest df_regional_customers = df_regional_customers.join( - df_part, (["l_partkey"], ["p_partkey"]), how="inner" + df_part, left_on=["l_partkey"], right_on=["p_partkey"], how="inner" ) # Compute the volume for each line item @@ -128,12 +128,12 @@ # Determine the suppliers by the limited nation key we have in our single row df above df_national_suppliers = df_national_suppliers.join( - df_supplier, (["n_nationkey"], ["s_nationkey"]), how="inner" + df_supplier, left_on=["n_nationkey"], right_on=["s_nationkey"], how="inner" ) # When we join to the customer dataframe, we don't want to confuse other columns, so only # select the supplier key that we need -df_national_suppliers = df_national_suppliers.select_columns("s_suppkey") +df_national_suppliers = df_national_suppliers.select("s_suppkey") # Part 3: Combine suppliers and customers and compute the market share @@ -143,14 +143,14 @@ # column only from suppliers in the nation we are evaluating. df = df_regional_customers.join( - df_national_suppliers, (["l_suppkey"], ["s_suppkey"]), how="left" + df_national_suppliers, left_on=["l_suppkey"], right_on=["s_suppkey"], how="left" ) # Use a case statement to compute the volume sold by suppliers in the nation of interest df = df.with_column( "national_volume", F.case(col("s_suppkey").is_null()) - .when(lit(False), col("volume")) + .when(lit(value=False), col("volume")) .otherwise(lit(0.0)), ) diff --git a/examples/tpch/q09_product_type_profit_measure.py b/examples/tpch/q09_product_type_profit_measure.py index b4a7369f8..e2abbd095 100644 --- a/examples/tpch/q09_product_type_profit_measure.py +++ b/examples/tpch/q09_product_type_profit_measure.py @@ -30,7 +30,8 @@ """ import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path part_color = lit("green") @@ -39,16 +40,14 @@ ctx = SessionContext() -df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns( - "p_partkey", "p_name" -) -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_part = ctx.read_parquet(get_data_path("part.parquet")).select("p_partkey", "p_name") +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_suppkey", "s_nationkey" ) -df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select_columns( +df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select( "ps_suppkey", "ps_partkey", "ps_supplycost" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_partkey", "l_extendedprice", "l_discount", @@ -56,10 +55,10 @@ "l_orderkey", "l_quantity", ) -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_orderkey", "o_custkey", "o_orderdate" ) -df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns( +df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select( "n_nationkey", "n_name", "n_regionkey" ) @@ -67,13 +66,16 @@ df = df_part.filter(F.strpos(col("p_name"), part_color) > lit(0)) # We have a series of joins that get us to limit down to the line items we need -df = df.join(df_lineitem, (["p_partkey"], ["l_partkey"]), how="inner") -df = df.join(df_supplier, (["l_suppkey"], ["s_suppkey"]), how="inner") -df = df.join(df_orders, (["l_orderkey"], ["o_orderkey"]), how="inner") +df = df.join(df_lineitem, left_on=["p_partkey"], right_on=["l_partkey"], how="inner") +df = df.join(df_supplier, left_on=["l_suppkey"], right_on=["s_suppkey"], how="inner") +df = df.join(df_orders, left_on=["l_orderkey"], right_on=["o_orderkey"], how="inner") df = df.join( - df_partsupp, (["l_suppkey", "l_partkey"], ["ps_suppkey", "ps_partkey"]), how="inner" + df_partsupp, + left_on=["l_suppkey", "l_partkey"], + right_on=["ps_suppkey", "ps_partkey"], + how="inner", ) -df = df.join(df_nation, (["s_nationkey"], ["n_nationkey"]), how="inner") +df = df.join(df_nation, left_on=["s_nationkey"], right_on=["n_nationkey"], how="inner") # Compute the intermediate values and limit down to the expressions we need df = df.select( diff --git a/examples/tpch/q10_returned_item_reporting.py b/examples/tpch/q10_returned_item_reporting.py index 78327c3ad..ed822e264 100644 --- a/examples/tpch/q10_returned_item_reporting.py +++ b/examples/tpch/q10_returned_item_reporting.py @@ -30,8 +30,10 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path DATE_START_OF_QUARTER = "1993-10-01" @@ -44,7 +46,7 @@ ctx = SessionContext() -df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns( +df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select( "c_custkey", "c_nationkey", "c_name", @@ -53,13 +55,13 @@ "c_phone", "c_comment", ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_extendedprice", "l_discount", "l_orderkey", "l_returnflag" ) -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_orderkey", "o_custkey", "o_orderdate" ) -df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns( +df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select( "n_nationkey", "n_name", "n_regionkey" ) @@ -74,7 +76,7 @@ col("o_orderdate") < date_start_of_quarter + interval_one_quarter ) -df = df.join(df_lineitem, (["o_orderkey"], ["l_orderkey"]), how="inner") +df = df.join(df_lineitem, left_on=["o_orderkey"], right_on=["l_orderkey"], how="inner") # Compute the revenue df = df.aggregate( @@ -83,11 +85,11 @@ ) # Now join in the customer data -df = df.join(df_customer, (["o_custkey"], ["c_custkey"]), how="inner") -df = df.join(df_nation, (["c_nationkey"], ["n_nationkey"]), how="inner") +df = df.join(df_customer, left_on=["o_custkey"], right_on=["c_custkey"], how="inner") +df = df.join(df_nation, left_on=["c_nationkey"], right_on=["n_nationkey"], how="inner") # These are the columns the problem statement requires -df = df.select_columns( +df = df.select( "c_custkey", "c_name", "revenue", diff --git a/examples/tpch/q11_important_stock_identification.py b/examples/tpch/q11_important_stock_identification.py index 267248707..22829ab7c 100644 --- a/examples/tpch/q11_important_stock_identification.py +++ b/examples/tpch/q11_important_stock_identification.py @@ -27,7 +27,8 @@ as part of their TPC Benchmark H Specification revision 2.18.0. """ -from datafusion import SessionContext, WindowFrame, col, lit, functions as F +from datafusion import SessionContext, WindowFrame, col, lit +from datafusion import functions as F from util import get_data_path NATION = "GERMANY" @@ -37,13 +38,13 @@ ctx = SessionContext() -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_suppkey", "s_nationkey" ) -df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select_columns( +df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select( "ps_supplycost", "ps_availqty", "ps_suppkey", "ps_partkey" ) -df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns( +df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select( "n_nationkey", "n_name" ) @@ -52,9 +53,11 @@ # Find part supplies of within this target nation -df = df_nation.join(df_supplier, (["n_nationkey"], ["s_nationkey"]), how="inner") +df = df_nation.join( + df_supplier, left_on=["n_nationkey"], right_on=["s_nationkey"], how="inner" +) -df = df.join(df_partsupp, (["s_suppkey"], ["ps_suppkey"]), how="inner") +df = df.join(df_partsupp, left_on=["s_suppkey"], right_on=["ps_suppkey"], how="inner") # Compute the value of individual parts @@ -63,7 +66,7 @@ # Compute total value of specific parts df = df.aggregate([col("ps_partkey")], [F.sum(col("value")).alias("value")]) -# By default window functions go from unbounded preceeding to current row, but we want +# By default window functions go from unbounded preceding to current row, but we want # to compute this sum across all rows window_frame = WindowFrame("rows", None, None) @@ -75,7 +78,7 @@ df = df.filter(col("value") / col("total_value") >= lit(FRACTION)) # We only need to report on these two columns -df = df.select_columns("ps_partkey", "value") +df = df.select("ps_partkey", "value") # Sort in descending order of value df = df.sort(col("value").sort(ascending=False)) diff --git a/examples/tpch/q12_ship_mode_order_priority.py b/examples/tpch/q12_ship_mode_order_priority.py index 150870c64..9071597f0 100644 --- a/examples/tpch/q12_ship_mode_order_priority.py +++ b/examples/tpch/q12_ship_mode_order_priority.py @@ -30,8 +30,10 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path SHIP_MODE_1 = "MAIL" @@ -42,10 +44,10 @@ ctx = SessionContext() -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_orderkey", "o_orderpriority" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_orderkey", "l_shipmode", "l_commitdate", "l_shipdate", "l_receiptdate" ) @@ -71,11 +73,11 @@ # matches either of the two values, but we want to show doing some array operations in this # example. If you want to see this done with filters, comment out the above line and uncomment # this one. -# df = df.filter((col("l_shipmode") == lit(SHIP_MODE_1)) | (col("l_shipmode") == lit(SHIP_MODE_2))) +# df = df.filter((col("l_shipmode") == lit(SHIP_MODE_1)) | (col("l_shipmode") == lit(SHIP_MODE_2))) # noqa: ERA001 # We need order priority, so join order df to line item -df = df.join(df_orders, (["l_orderkey"], ["o_orderkey"]), how="inner") +df = df.join(df_orders, left_on=["l_orderkey"], right_on=["o_orderkey"], how="inner") # Restrict to line items we care about based on the problem statement. df = df.filter(col("l_commitdate") < col("l_receiptdate")) diff --git a/examples/tpch/q13_customer_distribution.py b/examples/tpch/q13_customer_distribution.py index bc0a5bd1f..93f082ea3 100644 --- a/examples/tpch/q13_customer_distribution.py +++ b/examples/tpch/q13_customer_distribution.py @@ -28,7 +28,8 @@ as part of their TPC Benchmark H Specification revision 2.18.0. """ -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path WORD_1 = "special" @@ -38,12 +39,10 @@ ctx = SessionContext() -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_custkey", "o_comment" ) -df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns( - "c_custkey" -) +df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select("c_custkey") # Use a regex to remove special cases df_orders = df_orders.filter( @@ -51,7 +50,9 @@ ) # Since we may have customers with no orders we must do a left join -df = df_customer.join(df_orders, (["c_custkey"], ["o_custkey"]), how="left") +df = df_customer.join( + df_orders, left_on=["c_custkey"], right_on=["o_custkey"], how="left" +) # Find the number of orders for each customer df = df.aggregate([col("c_custkey")], [F.count(col("o_custkey")).alias("c_count")]) diff --git a/examples/tpch/q14_promotion_effect.py b/examples/tpch/q14_promotion_effect.py index 8cb1e4c5a..d62f76e3c 100644 --- a/examples/tpch/q14_promotion_effect.py +++ b/examples/tpch/q14_promotion_effect.py @@ -27,8 +27,10 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path DATE = "1995-09-01" @@ -41,12 +43,10 @@ ctx = SessionContext() -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_partkey", "l_shipdate", "l_extendedprice", "l_discount" ) -df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns( - "p_partkey", "p_type" -) +df_part = ctx.read_parquet(get_data_path("part.parquet")).select("p_partkey", "p_type") # Check part type begins with PROMO @@ -59,7 +59,9 @@ ) # Left join so we can sum up the promo parts different from other parts -df = df_lineitem.join(df_part, (["l_partkey"], ["p_partkey"]), "left") +df = df_lineitem.join( + df_part, left_on=["l_partkey"], right_on=["p_partkey"], how="left" +) # Make a factor of 1.0 if it is a promotion, 0.0 otherwise df = df.with_column("promo_factor", F.coalesce(col("promo_factor"), lit(0.0))) diff --git a/examples/tpch/q15_top_supplier.py b/examples/tpch/q15_top_supplier.py index 4b9e4c1dd..c321048f2 100644 --- a/examples/tpch/q15_top_supplier.py +++ b/examples/tpch/q15_top_supplier.py @@ -27,8 +27,10 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, WindowFrame, col, lit, functions as F +from datafusion import SessionContext, WindowFrame, col, lit +from datafusion import functions as F from util import get_data_path DATE = "1996-01-01" @@ -41,10 +43,10 @@ ctx = SessionContext() -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_suppkey", "l_shipdate", "l_extendedprice", "l_discount" ) -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_suppkey", "s_name", "s_address", @@ -76,10 +78,10 @@ # Now that we know the supplier(s) with maximum revenue, get the rest of their information # from the supplier table -df = df.join(df_supplier, (["l_suppkey"], ["s_suppkey"]), "inner") +df = df.join(df_supplier, left_on=["l_suppkey"], right_on=["s_suppkey"], how="inner") -# Return only the colums requested -df = df.select_columns("s_suppkey", "s_name", "s_address", "s_phone", "total_revenue") +# Return only the columns requested +df = df.select("s_suppkey", "s_name", "s_address", "s_phone", "total_revenue") # If we have more than one, sort by supplier number (suppkey) df = df.sort(col("s_suppkey").sort()) diff --git a/examples/tpch/q16_part_supplier_relationship.py b/examples/tpch/q16_part_supplier_relationship.py index fdcb5b4db..65043ffda 100644 --- a/examples/tpch/q16_part_supplier_relationship.py +++ b/examples/tpch/q16_part_supplier_relationship.py @@ -29,7 +29,8 @@ """ import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path BRAND = "Brand#45" @@ -40,13 +41,13 @@ ctx = SessionContext() -df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns( +df_part = ctx.read_parquet(get_data_path("part.parquet")).select( "p_partkey", "p_brand", "p_type", "p_size" ) -df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select_columns( +df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select( "ps_suppkey", "ps_partkey" ) -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_suppkey", "s_comment" ) @@ -56,7 +57,7 @@ # Remove unwanted suppliers df_partsupp = df_partsupp.join( - df_unwanted_suppliers, (["ps_suppkey"], ["s_suppkey"]), "anti" + df_unwanted_suppliers, left_on=["ps_suppkey"], right_on=["s_suppkey"], how="anti" ) # Select the parts we are interested in @@ -73,9 +74,11 @@ p_sizes = F.make_array(*[lit(s).cast(pa.int32()) for s in SIZES_OF_INTEREST]) df_part = df_part.filter(~F.array_position(p_sizes, col("p_size")).is_null()) -df = df_part.join(df_partsupp, (["p_partkey"], ["ps_partkey"]), "inner") +df = df_part.join( + df_partsupp, left_on=["p_partkey"], right_on=["ps_partkey"], how="inner" +) -df = df.select_columns("p_brand", "p_type", "p_size", "ps_suppkey").distinct() +df = df.select("p_brand", "p_type", "p_size", "ps_suppkey").distinct() df = df.aggregate( [col("p_brand"), col("p_type"), col("p_size")], diff --git a/examples/tpch/q17_small_quantity_order.py b/examples/tpch/q17_small_quantity_order.py index e0ee8bb90..6d76fe506 100644 --- a/examples/tpch/q17_small_quantity_order.py +++ b/examples/tpch/q17_small_quantity_order.py @@ -28,7 +28,8 @@ as part of their TPC Benchmark H Specification revision 2.18.0. """ -from datafusion import SessionContext, WindowFrame, col, lit, functions as F +from datafusion import SessionContext, WindowFrame, col, lit +from datafusion import functions as F from util import get_data_path BRAND = "Brand#23" @@ -38,10 +39,10 @@ ctx = SessionContext() -df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns( +df_part = ctx.read_parquet(get_data_path("part.parquet")).select( "p_partkey", "p_brand", "p_container" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_partkey", "l_quantity", "l_extendedprice" ) @@ -51,7 +52,7 @@ ) # Combine data -df = df.join(df_lineitem, (["p_partkey"], ["l_partkey"]), "inner") +df = df.join(df_lineitem, left_on=["p_partkey"], right_on=["l_partkey"], how="inner") # Find the average quantity window_frame = WindowFrame("rows", None, None) diff --git a/examples/tpch/q18_large_volume_customer.py b/examples/tpch/q18_large_volume_customer.py index 10c5f6e6a..834d181c9 100644 --- a/examples/tpch/q18_large_volume_customer.py +++ b/examples/tpch/q18_large_volume_customer.py @@ -26,7 +26,8 @@ as part of their TPC Benchmark H Specification revision 2.18.0. """ -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path QUANTITY = 300 @@ -35,13 +36,13 @@ ctx = SessionContext() -df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns( +df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select( "c_custkey", "c_name" ) -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_orderkey", "o_custkey", "o_orderdate", "o_totalprice" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_orderkey", "l_quantity", "l_extendedprice" ) @@ -54,10 +55,10 @@ # We've identified the orders of interest, now join the additional data # we are required to report on -df = df.join(df_orders, (["l_orderkey"], ["o_orderkey"]), "inner") -df = df.join(df_customer, (["o_custkey"], ["c_custkey"]), "inner") +df = df.join(df_orders, left_on=["l_orderkey"], right_on=["o_orderkey"], how="inner") +df = df.join(df_customer, left_on=["o_custkey"], right_on=["c_custkey"], how="inner") -df = df.select_columns( +df = df.select( "c_name", "c_custkey", "o_orderkey", "o_orderdate", "o_totalprice", "total_quantity" ) diff --git a/examples/tpch/q19_discounted_revenue.py b/examples/tpch/q19_discounted_revenue.py index b15cd98bf..bd492aac0 100644 --- a/examples/tpch/q19_discounted_revenue.py +++ b/examples/tpch/q19_discounted_revenue.py @@ -27,7 +27,8 @@ """ import pyarrow as pa -from datafusion import SessionContext, col, lit, udf, functions as F +from datafusion import SessionContext, col, lit, udf +from datafusion import functions as F from util import get_data_path items_of_interest = { @@ -52,10 +53,10 @@ ctx = SessionContext() -df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns( +df_part = ctx.read_parquet(get_data_path("part.parquet")).select( "p_partkey", "p_brand", "p_container", "p_size" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_partkey", "l_quantity", "l_shipmode", @@ -72,7 +73,7 @@ (col("l_shipmode") == lit("AIR")) | (col("l_shipmode") == lit("AIR REG")) ) -df = df.join(df_part, (["l_partkey"], ["p_partkey"]), "inner") +df = df.join(df_part, left_on=["l_partkey"], right_on=["p_partkey"], how="inner") # Create the user defined function (UDF) definition that does the work @@ -88,8 +89,8 @@ def is_of_interest( same number of rows in the output. """ result = [] - for idx, brand in enumerate(brand_arr): - brand = brand.as_py() + for idx, brand_val in enumerate(brand_arr): + brand = brand_val.as_py() if brand in items_of_interest: values_of_interest = items_of_interest[brand] diff --git a/examples/tpch/q20_potential_part_promotion.py b/examples/tpch/q20_potential_part_promotion.py index 05a267450..a25188d31 100644 --- a/examples/tpch/q20_potential_part_promotion.py +++ b/examples/tpch/q20_potential_part_promotion.py @@ -28,8 +28,10 @@ """ from datetime import datetime + import pyarrow as pa -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path COLOR_OF_INTEREST = "forest" @@ -40,19 +42,17 @@ ctx = SessionContext() -df_part = ctx.read_parquet(get_data_path("part.parquet")).select_columns( - "p_partkey", "p_name" -) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_part = ctx.read_parquet(get_data_path("part.parquet")).select("p_partkey", "p_name") +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_shipdate", "l_partkey", "l_suppkey", "l_quantity" ) -df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select_columns( +df_partsupp = ctx.read_parquet(get_data_path("partsupp.parquet")).select( "ps_partkey", "ps_suppkey", "ps_availqty" ) -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_suppkey", "s_address", "s_name", "s_nationkey" ) -df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns( +df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select( "n_nationkey", "n_name" ) @@ -72,26 +72,29 @@ ) # This will filter down the line items to the parts of interest -df = df.join(df_part, (["l_partkey"], ["p_partkey"]), "inner") +df = df.join(df_part, left_on="l_partkey", right_on="p_partkey", how="inner") -# Compute the total sold and limit ourselves to indivdual supplier/part combinations +# Compute the total sold and limit ourselves to individual supplier/part combinations df = df.aggregate( [col("l_partkey"), col("l_suppkey")], [F.sum(col("l_quantity")).alias("total_sold")] ) df = df.join( - df_partsupp, (["l_partkey", "l_suppkey"], ["ps_partkey", "ps_suppkey"]), "inner" + df_partsupp, + left_on=["l_partkey", "l_suppkey"], + right_on=["ps_partkey", "ps_suppkey"], + how="inner", ) # Find cases of excess quantity df.filter(col("ps_availqty") > lit(0.5) * col("total_sold")) # We could do these joins earlier, but now limit to the nation of interest suppliers -df = df.join(df_supplier, (["ps_suppkey"], ["s_suppkey"]), "inner") -df = df.join(df_nation, (["s_nationkey"], ["n_nationkey"]), "inner") +df = df.join(df_supplier, left_on=["ps_suppkey"], right_on=["s_suppkey"], how="inner") +df = df.join(df_nation, left_on=["s_nationkey"], right_on=["n_nationkey"], how="inner") # Restrict to the requested data per the problem statement -df = df.select_columns("s_name", "s_address").distinct() +df = df.select("s_name", "s_address").distinct() df = df.sort(col("s_name").sort()) diff --git a/examples/tpch/q21_suppliers_kept_orders_waiting.py b/examples/tpch/q21_suppliers_kept_orders_waiting.py index 9f59804e5..619c4406b 100644 --- a/examples/tpch/q21_suppliers_kept_orders_waiting.py +++ b/examples/tpch/q21_suppliers_kept_orders_waiting.py @@ -26,7 +26,8 @@ as part of their TPC Benchmark H Specification revision 2.18.0. """ -from datafusion import SessionContext, col, lit, functions as F +from datafusion import SessionContext, col, lit +from datafusion import functions as F from util import get_data_path NATION_OF_INTEREST = "SAUDI ARABIA" @@ -35,16 +36,16 @@ ctx = SessionContext() -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select( "o_orderkey", "o_orderstatus" ) -df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select_columns( +df_lineitem = ctx.read_parquet(get_data_path("lineitem.parquet")).select( "l_orderkey", "l_receiptdate", "l_commitdate", "l_suppkey" ) -df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select_columns( +df_supplier = ctx.read_parquet(get_data_path("supplier.parquet")).select( "s_suppkey", "s_name", "s_nationkey" ) -df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select_columns( +df_nation = ctx.read_parquet(get_data_path("nation.parquet")).select( "n_nationkey", "n_name" ) @@ -52,19 +53,19 @@ df_suppliers_of_interest = df_nation.filter(col("n_name") == lit(NATION_OF_INTEREST)) df_suppliers_of_interest = df_suppliers_of_interest.join( - df_supplier, (["n_nationkey"], ["s_nationkey"]), "inner" + df_supplier, left_on="n_nationkey", right_on="s_nationkey", how="inner" ) # Find the failed orders and all their line items df = df_orders.filter(col("o_orderstatus") == lit("F")) -df = df_lineitem.join(df, (["l_orderkey"], ["o_orderkey"]), "inner") +df = df_lineitem.join(df, left_on="l_orderkey", right_on="o_orderkey", how="inner") # Identify the line items for which the order is failed due to. df = df.with_column( "failed_supp", F.case(col("l_receiptdate") > col("l_commitdate")) - .when(lit(True), col("l_suppkey")) + .when(lit(value=True), col("l_suppkey")) .end(), ) @@ -74,7 +75,7 @@ # only orders where this array is larger than one for multiple supplier orders. The second column # is all of the suppliers who failed to make their commitment. We can filter the second column for # arrays with size one. That combination will give us orders that had multiple suppliers where only -# one failed. Use distinct=True in the blow aggregation so we don't get multipe line items from the +# one failed. Use distinct=True in the blow aggregation so we don't get multiple line items from the # same supplier reported in either array. df = df.aggregate( [col("o_orderkey")], @@ -102,7 +103,9 @@ ) # Join to the supplier of interest list for the nation of interest -df = df.join(df_suppliers_of_interest, (["suppkey"], ["s_suppkey"]), "inner") +df = df.join( + df_suppliers_of_interest, left_on=["suppkey"], right_on=["s_suppkey"], how="inner" +) # Count how many orders that supplier is the only failed supplier for df = df.aggregate([col("s_name")], [F.count(col("o_orderkey")).alias("numwait")]) diff --git a/examples/tpch/q22_global_sales_opportunity.py b/examples/tpch/q22_global_sales_opportunity.py index 622c1429f..c4d115b74 100644 --- a/examples/tpch/q22_global_sales_opportunity.py +++ b/examples/tpch/q22_global_sales_opportunity.py @@ -26,7 +26,8 @@ as part of their TPC Benchmark H Specification revision 2.18.0. """ -from datafusion import SessionContext, WindowFrame, col, lit, functions as F +from datafusion import SessionContext, WindowFrame, col, lit +from datafusion import functions as F from util import get_data_path NATION_CODES = [13, 31, 23, 29, 30, 18, 17] @@ -35,24 +36,22 @@ ctx = SessionContext() -df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select_columns( +df_customer = ctx.read_parquet(get_data_path("customer.parquet")).select( "c_phone", "c_acctbal", "c_custkey" ) -df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select_columns( - "o_custkey" -) +df_orders = ctx.read_parquet(get_data_path("orders.parquet")).select("o_custkey") # The nation code is a two digit number, but we need to convert it to a string literal nation_codes = F.make_array(*[lit(str(n)) for n in NATION_CODES]) -# Use the substring operation to extract the first two charaters of the phone number +# Use the substring operation to extract the first two characters of the phone number df = df_customer.with_column("cntrycode", F.substring(col("c_phone"), lit(0), lit(3))) # Limit our search to customers with some balance and in the country code above df = df.filter(col("c_acctbal") > lit(0.0)) df = df.filter(~F.array_position(nation_codes, col("cntrycode")).is_null()) -# Compute the average balance. By default, the window frame is from unbounded preceeding to the +# Compute the average balance. By default, the window frame is from unbounded preceding to the # current row. We want our frame to cover the entire data frame. window_frame = WindowFrame("rows", None, None) df = df.with_column( @@ -64,7 +63,7 @@ df = df.filter(col("c_acctbal") > col("avg_balance")) # Limit results to customers with no orders -df = df.join(df_orders, (["c_custkey"], ["o_custkey"]), "anti") +df = df.join(df_orders, left_on="c_custkey", right_on="o_custkey", how="anti") # Count up the customers and the balances df = df.aggregate( diff --git a/examples/tpch/util.py b/examples/tpch/util.py index 7e3d659dd..ec53bcd15 100644 --- a/examples/tpch/util.py +++ b/examples/tpch/util.py @@ -19,18 +19,16 @@ Common utilities for running TPC-H examples. """ -import os +from pathlib import Path -def get_data_path(filename: str) -> str: - path = os.path.dirname(os.path.abspath(__file__)) +def get_data_path(filename: str) -> Path: + path = Path(__file__).resolve().parent - return os.path.join(path, "data", filename) + return path / "data" / filename -def get_answer_file(answer_file: str) -> str: - path = os.path.dirname(os.path.abspath(__file__)) +def get_answer_file(answer_file: str) -> Path: + path = Path(__file__).resolve().parent - return os.path.join( - path, "../../benchmarks/tpch/data/answers", f"{answer_file}.out" - ) + return path / "../../benchmarks/tpch/data/answers" / f"{answer_file}.out" diff --git a/pyproject.toml b/pyproject.toml index b706065a4..b238e049e 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,35 +16,39 @@ # under the License. [build-system] -requires = ["maturin>=1.5.1,<1.6.0"] +requires = ["maturin>=1.8.1"] build-backend = "maturin" [project] name = "datafusion" description = "Build and run queries against data" readme = "README.md" -license = {file = "LICENSE.txt"} -requires-python = ">=3.6" -keywords = ["datafusion", "dataframe", "rust", "query-engine"] -classifier = [ - "Development Status :: 2 - Pre-Alpha", - "Intended Audience :: Developers", - "License :: OSI Approved :: Apache Software License", - "License :: OSI Approved", - "Operating System :: MacOS", - "Operating System :: Microsoft :: Windows", - "Operating System :: POSIX :: Linux", - "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", - "Programming Language :: Python :: 3.10", - "Programming Language :: Python", - "Programming Language :: Rust", +license = { file = "LICENSE.txt" } +requires-python = ">=3.10" +keywords = ["dataframe", "datafusion", "query-engine", "rust"] +classifiers = [ + "Development Status :: 2 - Pre-Alpha", + "Intended Audience :: Developers", + "License :: OSI Approved :: Apache Software License", + "License :: OSI Approved", + "Operating System :: MacOS", + "Operating System :: Microsoft :: Windows", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", + "Programming Language :: Python :: 3.14", + "Programming Language :: Python", + "Programming Language :: Rust", ] dependencies = [ - "pyarrow>=11.0.0", + "pyarrow>=16.0.0;python_version<'3.14'", + "pyarrow>=22.0.0;python_version>='3.14'", + "typing-extensions;python_version<'3.13'", ] +dynamic = ["version"] [project.urls] homepage = "https://datafusion.apache.org/python" @@ -57,10 +61,145 @@ profile = "black" [tool.maturin] python-source = "python" module-name = "datafusion._internal" -include = [ - { path = "Cargo.lock", format = "sdist" } -] -exclude = [".github/**", "ci/**", ".asf.yaml"] +include = [{ path = "Cargo.lock", format = "sdist" }] +exclude = [".asf.yaml", ".github/**", "ci/**"] # Require Cargo.lock is up to date locked = true features = ["substrait"] + +[tool.pytest.ini_options] +asyncio_mode = "auto" +asyncio_default_fixture_loop_scope = "function" +addopts = "--doctest-modules" +doctest_optionflags = ["NORMALIZE_WHITESPACE", "ELLIPSIS"] +testpaths = ["python/tests", "python/datafusion"] + +# Enable docstring linting using the google style guide +[tool.ruff.lint] +select = ["ALL"] +ignore = [ + "A001", # Allow using words like min as variable names + "A002", # Allow using words like filter as variable names + "A005", # Allow module named io + "ANN401", # Allow Any for wrapper classes + "COM812", # Recommended to ignore these rules when using with ruff-format + "FBT001", # Allow boolean positional args + "FBT002", # Allow boolean positional args + "FIX002", # Allow TODO lines - consider removing at some point + "ISC001", # Recommended to ignore these rules when using with ruff-format + "N812", # Allow importing functions as `F` + "PD901", # Allow variable name df + "PLR0913", # Allow many arguments in function definition + "SLF001", # Allow accessing private members + "TD002", # Do not require author names in TODO statements + "TD003", # Allow TODO lines +] + +[tool.ruff.lint.pydocstyle] +convention = "google" + +[tool.ruff.lint.pycodestyle] +max-doc-length = 88 + +[tool.ruff.lint.flake8-boolean-trap] +extend-allowed-calls = ["datafusion.lit", "lit"] + +# Disable docstring checking for these directories +[tool.ruff.lint.per-file-ignores] +"python/tests/*" = [ + "ANN", + "ARG", + "BLE001", + "D", + "PD", + "PLC0415", + "PLR0913", + "PLR2004", + "PT004", + "PT011", + "RUF015", + "S101", + "S608", + "SLF", +] +"examples/*" = [ + "ANN001", + "ANN202", + "D", + "DTZ007", + "E501", + "INP001", + "PLR2004", + "RUF015", + "S101", + "T201", + "W505", +] +"dev/*" = [ + "ANN001", + "C", + "D", + "E", + "ERA001", + "EXE", + "N817", + "PLR", + "S", + "SIM", + "T", + "UP", +] +"benchmarks/*" = [ + "ANN001", + "BLE", + "D", + "E", + "ERA001", + "EXE", + "F", + "FURB", + "INP001", + "PLR", + "S", + "SIM", + "T", + "TD", + "TRY", + "UP", +] +"docs/*" = ["D"] +"docs/source/conf.py" = ["ANN001", "ERA001", "INP001"] + +[tool.codespell] +skip = ["./python/tests/test_functions.py", "./target", "uv.lock"] +count = true +ignore-words-list = ["IST", "ans"] + +[dependency-groups] +dev = [ + "arro3-core==0.6.5", + "codespell==2.4.1", + "maturin>=1.8.1", + "nanoarrow==0.8.0", + "numpy>1.25.0;python_version<'3.14'", + "numpy>=2.3.2;python_version>='3.14'", + "pre-commit>=4.3.0", + "pyarrow>=19.0.0", + "pygithub==2.5.0", + "pytest-asyncio>=0.23.3", + "pytest>=7.4.4", + "pyyaml>=6.0.3", + "ruff>=0.9.1", + "toml>=0.10.2", +] +docs = [ + "ipython>=8.12.3", + "jinja2>=3.1.5", + "myst-parser>=3.0.1", + "pandas>=2.0.3", + "pickleshare>=0.7.5", + "pydata-sphinx-theme==0.8.0", + "setuptools>=75.3.0", + "sphinx-autoapi>=3.4.0", + "sphinx>=7.1.2", +] diff --git a/python/datafusion/__init__.py b/python/datafusion/__init__.py index 846b1a459..2e6f81166 100644 --- a/python/datafusion/__init__.py +++ b/python/datafusion/__init__.py @@ -15,206 +15,146 @@ # specific language governing permissions and limitations # under the License. -from abc import ABCMeta, abstractmethod -from typing import List +"""DataFusion python package. + +This is a Python library that binds to Apache Arrow in-memory query engine DataFusion. +See https://datafusion.apache.org/python for more information. +""" + +from __future__ import annotations + +from typing import Any try: import importlib.metadata as importlib_metadata except ImportError: - import importlib_metadata - -import pyarrow as pa - -from ._internal import ( - AggregateUDF, - Config, - DataFrame, - SessionContext, + import importlib_metadata # type: ignore[import] + +# Public submodules +from . import functions, object_store, substrait, unparser + +# The following imports are okay to remain as opaque to the user. +from ._internal import Config +from .catalog import Catalog, Database, Table +from .col import col, column +from .common import DFSchema +from .context import ( + RuntimeEnvBuilder, SessionConfig, - RuntimeConfig, - ScalarUDF, + SessionContext, SQLOptions, ) - -from .common import ( - DFSchema, +from .dataframe import ( + DataFrame, + DataFrameWriteOptions, + InsertOp, + ParquetColumnOptions, + ParquetWriterOptions, ) - -from .expr import ( - Alias, - Analyze, - Expr, - Filter, - Limit, - Like, - ILike, - Projection, - SimilarTo, - ScalarVariable, - Sort, - TableScan, - Not, - IsNotNull, - IsTrue, - IsFalse, - IsUnknown, - IsNotTrue, - IsNotFalse, - IsNotUnknown, - Negative, - InList, - Exists, - Subquery, - InSubquery, - ScalarSubquery, - GroupingSet, - Placeholder, - Case, - Cast, - TryCast, - Between, - Explain, - CreateMemoryTable, - SubqueryAlias, - Extension, - CreateView, - Distinct, - DropTable, - Repartition, - Partitioning, - Window, - WindowFrame, +from .dataframe_formatter import configure_formatter +from .expr import Expr, WindowFrame +from .io import read_avro, read_csv, read_json, read_parquet +from .options import CsvReadOptions +from .plan import ExecutionPlan, LogicalPlan +from .record_batch import RecordBatch, RecordBatchStream +from .user_defined import ( + Accumulator, + AggregateUDF, + ScalarUDF, + TableFunction, + WindowUDF, + udaf, + udf, + udtf, + udwf, ) __version__ = importlib_metadata.version(__name__) __all__ = [ + "Accumulator", + "AggregateUDF", + "Catalog", "Config", + "CsvReadOptions", + "DFSchema", "DataFrame", - "SessionContext", - "SessionConfig", - "SQLOptions", - "RuntimeConfig", + "DataFrameWriteOptions", + "Database", + "ExecutionPlan", "Expr", - "AggregateUDF", + "InsertOp", + "LogicalPlan", + "ParquetColumnOptions", + "ParquetWriterOptions", + "RecordBatch", + "RecordBatchStream", + "RuntimeEnvBuilder", + "SQLOptions", "ScalarUDF", - "Window", + "SessionConfig", + "SessionContext", + "Table", + "TableFunction", "WindowFrame", + "WindowUDF", + "catalog", + "col", "column", + "common", + "configure_formatter", + "expr", + "functions", + "lit", "literal", - "TableScan", - "Projection", - "DFSchema", - "DFField", - "Analyze", - "Sort", - "Limit", - "Filter", - "Like", - "ILike", - "SimilarTo", - "ScalarVariable", - "Alias", - "Not", - "IsNotNull", - "IsTrue", - "IsFalse", - "IsUnknown", - "IsNotTrue", - "IsNotFalse", - "IsNotUnknown", - "Negative", - "ScalarFunction", - "BuiltinScalarFunction", - "InList", - "Exists", - "Subquery", - "InSubquery", - "ScalarSubquery", - "GroupingSet", - "Placeholder", - "Case", - "Cast", - "TryCast", - "Between", - "Explain", - "SubqueryAlias", - "Extension", - "CreateMemoryTable", - "CreateView", - "Distinct", - "DropTable", - "Repartition", - "Partitioning", + "object_store", + "options", + "read_avro", + "read_csv", + "read_json", + "read_parquet", + "substrait", + "udaf", + "udf", + "udtf", + "udwf", + "unparser", ] -class Accumulator(metaclass=ABCMeta): - @abstractmethod - def state(self) -> List[pa.Scalar]: - pass - - @abstractmethod - def update(self, values: pa.Array) -> None: - pass - - @abstractmethod - def merge(self, states: pa.Array) -> None: - pass +def literal(value: Any) -> Expr: + """Create a literal expression.""" + return Expr.literal(value) - @abstractmethod - def evaluate(self) -> pa.Scalar: - pass +def string_literal(value: str) -> Expr: + """Create a UTF8 literal expression. -def column(value): - return Expr.column(value) + It differs from `literal` which creates a UTF8view literal. + """ + return Expr.string_literal(value) -col = column +def str_lit(value: str) -> Expr: + """Alias for `string_literal`.""" + return string_literal(value) -def literal(value): - if not isinstance(value, pa.Scalar): - value = pa.scalar(value) +def lit(value: Any) -> Expr: + """Create a literal expression.""" return Expr.literal(value) -lit = literal +def literal_with_metadata(value: Any, metadata: dict[str, str]) -> Expr: + """Creates a new expression representing a scalar value with metadata. - -def udf(func, input_types, return_type, volatility, name=None): - """ - Create a new User Defined Function + Args: + value: A valid PyArrow scalar value or easily castable to one. + metadata: Metadata to attach to the expression. """ - if not callable(func): - raise TypeError("`func` argument must be callable") - if name is None: - name = func.__qualname__.lower() - return ScalarUDF( - name=name, - func=func, - input_types=input_types, - return_type=return_type, - volatility=volatility, - ) - - -def udaf(accum, input_type, return_type, state_type, volatility, name=None): - """ - Create a new User Defined Aggregate Function - """ - if not issubclass(accum, Accumulator): - raise TypeError("`accum` must implement the abstract base class Accumulator") - if name is None: - name = accum.__qualname__.lower() - if isinstance(input_type, pa.lib.DataType): - input_type = [input_type] - return AggregateUDF( - name=name, - accumulator=accum, - input_type=input_type, - return_type=return_type, - state_type=state_type, - volatility=volatility, - ) + return Expr.literal_with_metadata(value, metadata) + + +def lit_with_metadata(value: Any, metadata: dict[str, str]) -> Expr: + """Alias for literal_with_metadata.""" + return literal_with_metadata(value, metadata) diff --git a/python/datafusion/catalog.py b/python/datafusion/catalog.py new file mode 100644 index 000000000..bc43cf349 --- /dev/null +++ b/python/datafusion/catalog.py @@ -0,0 +1,371 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Data catalog providers.""" + +from __future__ import annotations + +from abc import ABC, abstractmethod +from typing import TYPE_CHECKING, Any, Protocol + +import datafusion._internal as df_internal + +if TYPE_CHECKING: + import pyarrow as pa + + from datafusion import DataFrame, SessionContext + from datafusion.context import TableProviderExportable + +try: + from warnings import deprecated # Python 3.13+ +except ImportError: + from typing_extensions import deprecated # Python 3.12 + + +__all__ = [ + "Catalog", + "CatalogList", + "CatalogProvider", + "CatalogProviderList", + "Schema", + "SchemaProvider", + "Table", +] + + +class CatalogList: + """DataFusion data catalog list.""" + + def __init__(self, catalog_list: df_internal.catalog.RawCatalogList) -> None: + """This constructor is not typically called by the end user.""" + self.catalog_list = catalog_list + + def __repr__(self) -> str: + """Print a string representation of the catalog list.""" + return self.catalog_list.__repr__() + + def names(self) -> set[str]: + """This is an alias for `catalog_names`.""" + return self.catalog_names() + + def catalog_names(self) -> set[str]: + """Returns the list of schemas in this catalog.""" + return self.catalog_list.catalog_names() + + @staticmethod + def memory_catalog(ctx: SessionContext | None = None) -> CatalogList: + """Create an in-memory catalog provider list.""" + catalog_list = df_internal.catalog.RawCatalogList.memory_catalog(ctx) + return CatalogList(catalog_list) + + def catalog(self, name: str = "datafusion") -> Catalog: + """Returns the catalog with the given ``name`` from this catalog.""" + catalog = self.catalog_list.catalog(name) + + return ( + Catalog(catalog) + if isinstance(catalog, df_internal.catalog.RawCatalog) + else catalog + ) + + def register_catalog( + self, + name: str, + catalog: Catalog | CatalogProvider | CatalogProviderExportable, + ) -> Catalog | None: + """Register a catalog with this catalog list.""" + if isinstance(catalog, Catalog): + return self.catalog_list.register_catalog(name, catalog.catalog) + return self.catalog_list.register_catalog(name, catalog) + + +class Catalog: + """DataFusion data catalog.""" + + def __init__(self, catalog: df_internal.catalog.RawCatalog) -> None: + """This constructor is not typically called by the end user.""" + self.catalog = catalog + + def __repr__(self) -> str: + """Print a string representation of the catalog.""" + return self.catalog.__repr__() + + def names(self) -> set[str]: + """This is an alias for `schema_names`.""" + return self.schema_names() + + def schema_names(self) -> set[str]: + """Returns the list of schemas in this catalog.""" + return self.catalog.schema_names() + + @staticmethod + def memory_catalog(ctx: SessionContext | None = None) -> Catalog: + """Create an in-memory catalog provider.""" + catalog = df_internal.catalog.RawCatalog.memory_catalog(ctx) + return Catalog(catalog) + + def schema(self, name: str = "public") -> Schema: + """Returns the database with the given ``name`` from this catalog.""" + schema = self.catalog.schema(name) + + return ( + Schema(schema) + if isinstance(schema, df_internal.catalog.RawSchema) + else schema + ) + + @deprecated("Use `schema` instead.") + def database(self, name: str = "public") -> Schema: + """Returns the database with the given ``name`` from this catalog.""" + return self.schema(name) + + def register_schema( + self, + name: str, + schema: Schema | SchemaProvider | SchemaProviderExportable, + ) -> Schema | None: + """Register a schema with this catalog.""" + if isinstance(schema, Schema): + return self.catalog.register_schema(name, schema._raw_schema) + return self.catalog.register_schema(name, schema) + + def deregister_schema(self, name: str, cascade: bool = True) -> Schema | None: + """Deregister a schema from this catalog.""" + return self.catalog.deregister_schema(name, cascade) + + +class Schema: + """DataFusion Schema.""" + + def __init__(self, schema: df_internal.catalog.RawSchema) -> None: + """This constructor is not typically called by the end user.""" + self._raw_schema = schema + + def __repr__(self) -> str: + """Print a string representation of the schema.""" + return self._raw_schema.__repr__() + + @staticmethod + def memory_schema(ctx: SessionContext | None = None) -> Schema: + """Create an in-memory schema provider.""" + schema = df_internal.catalog.RawSchema.memory_schema(ctx) + return Schema(schema) + + def names(self) -> set[str]: + """This is an alias for `table_names`.""" + return self.table_names() + + def table_names(self) -> set[str]: + """Returns the list of all tables in this schema.""" + return self._raw_schema.table_names + + def table(self, name: str) -> Table: + """Return the table with the given ``name`` from this schema.""" + return Table(self._raw_schema.table(name)) + + def register_table( + self, + name: str, + table: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset, + ) -> None: + """Register a table in this schema.""" + return self._raw_schema.register_table(name, table) + + def deregister_table(self, name: str) -> None: + """Deregister a table provider from this schema.""" + return self._raw_schema.deregister_table(name) + + def table_exist(self, name: str) -> bool: + """Determines if a table exists in this schema.""" + return self._raw_schema.table_exist(name) + + +@deprecated("Use `Schema` instead.") +class Database(Schema): + """See `Schema`.""" + + +class Table: + """A DataFusion table. + + Internally we currently support the following types of tables: + + - Tables created using built-in DataFusion methods, such as + reading from CSV or Parquet + - pyarrow datasets + - DataFusion DataFrames, which will be converted into a view + - Externally provided tables implemented with the FFI PyCapsule + interface (advanced) + """ + + __slots__ = ("_inner",) + + def __init__( + self, + table: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset, + ctx: SessionContext | None = None, + ) -> None: + """Constructor.""" + self._inner = df_internal.catalog.RawTable(table, ctx) + + def __repr__(self) -> str: + """Print a string representation of the table.""" + return repr(self._inner) + + @staticmethod + @deprecated("Use Table() constructor instead.") + def from_dataset(dataset: pa.dataset.Dataset) -> Table: + """Turn a :mod:`pyarrow.dataset` ``Dataset`` into a :class:`Table`.""" + return Table(dataset) + + @property + def schema(self) -> pa.Schema: + """Returns the schema associated with this table.""" + return self._inner.schema + + @property + def kind(self) -> str: + """Returns the kind of table.""" + return self._inner.kind + + +class CatalogProviderList(ABC): + """Abstract class for defining a Python based Catalog Provider List.""" + + @abstractmethod + def catalog_names(self) -> set[str]: + """Set of the names of all catalogs in this catalog list.""" + ... + + @abstractmethod + def catalog( + self, name: str + ) -> CatalogProviderExportable | CatalogProvider | Catalog | None: + """Retrieve a specific catalog from this catalog list.""" + ... + + def register_catalog( # noqa: B027 + self, name: str, catalog: CatalogProviderExportable | CatalogProvider | Catalog + ) -> None: + """Add a catalog to this catalog list. + + This method is optional. If your catalog provides a fixed list of catalogs, you + do not need to implement this method. + """ + + +class CatalogProviderListExportable(Protocol): + """Type hint for object that has __datafusion_catalog_provider_list__ PyCapsule. + + https://docs.rs/datafusion/latest/datafusion/catalog/trait.CatalogProviderList.html + """ + + def __datafusion_catalog_provider_list__(self, session: Any) -> object: ... + + +class CatalogProvider(ABC): + """Abstract class for defining a Python based Catalog Provider.""" + + @abstractmethod + def schema_names(self) -> set[str]: + """Set of the names of all schemas in this catalog.""" + ... + + @abstractmethod + def schema(self, name: str) -> Schema | None: + """Retrieve a specific schema from this catalog.""" + ... + + def register_schema( # noqa: B027 + self, name: str, schema: SchemaProviderExportable | SchemaProvider | Schema + ) -> None: + """Add a schema to this catalog. + + This method is optional. If your catalog provides a fixed list of schemas, you + do not need to implement this method. + """ + + def deregister_schema(self, name: str, cascade: bool) -> None: # noqa: B027 + """Remove a schema from this catalog. + + This method is optional. If your catalog provides a fixed list of schemas, you + do not need to implement this method. + + Args: + name: The name of the schema to remove. + cascade: If true, deregister the tables within the schema. + """ + + +class CatalogProviderExportable(Protocol): + """Type hint for object that has __datafusion_catalog_provider__ PyCapsule. + + https://docs.rs/datafusion/latest/datafusion/catalog/trait.CatalogProvider.html + """ + + def __datafusion_catalog_provider__(self, session: Any) -> object: ... + + +class SchemaProvider(ABC): + """Abstract class for defining a Python based Schema Provider.""" + + def owner_name(self) -> str | None: + """Returns the owner of the schema. + + This is an optional method. The default return is None. + """ + return None + + @abstractmethod + def table_names(self) -> set[str]: + """Set of the names of all tables in this schema.""" + ... + + @abstractmethod + def table(self, name: str) -> Table | None: + """Retrieve a specific table from this schema.""" + ... + + def register_table( # noqa: B027 + self, name: str, table: Table | TableProviderExportable | Any + ) -> None: + """Add a table to this schema. + + This method is optional. If your schema provides a fixed list of tables, you do + not need to implement this method. + """ + + def deregister_table(self, name: str, cascade: bool) -> None: # noqa: B027 + """Remove a table from this schema. + + This method is optional. If your schema provides a fixed list of tables, you do + not need to implement this method. + """ + + @abstractmethod + def table_exist(self, name: str) -> bool: + """Returns true if the table exists in this schema.""" + ... + + +class SchemaProviderExportable(Protocol): + """Type hint for object that has __datafusion_schema_provider__ PyCapsule. + + https://docs.rs/datafusion/latest/datafusion/catalog/trait.SchemaProvider.html + """ + + def __datafusion_schema_provider__(self, session: Any) -> object: ... diff --git a/python/datafusion/col.py b/python/datafusion/col.py new file mode 100644 index 000000000..1141dc092 --- /dev/null +++ b/python/datafusion/col.py @@ -0,0 +1,45 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Col class.""" + +from datafusion.expr import Expr + + +class Col: + """Create a column expression. + + This helper class allows an extra syntax of creating columns using the __getattr__ + method. + """ + + def __call__(self, value: str) -> Expr: + """Create a column expression.""" + return Expr.column(value) + + def __getattr__(self, value: str) -> Expr: + """Create a column using attribute syntax.""" + # For autocomplete to work with IPython + if value.startswith("__wrapped__"): + return getattr(type(self), value) + + return Expr.column(value) + + +col: Col = Col() +column: Col = Col() +__all__ = ["col", "column"] diff --git a/python/datafusion/common.py b/python/datafusion/common.py index dd56640a4..c689a816d 100644 --- a/python/datafusion/common.py +++ b/python/datafusion/common.py @@ -14,10 +14,56 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +"""Common data types used throughout the DataFusion project.""" +from enum import Enum -from ._internal import common +from ._internal import common as common_internal +# TODO: these should all have proper wrapper classes -def __getattr__(name): - return getattr(common, name) +DFSchema = common_internal.DFSchema +DataType = common_internal.DataType +DataTypeMap = common_internal.DataTypeMap +PythonType = common_internal.PythonType +RexType = common_internal.RexType +SqlFunction = common_internal.SqlFunction +SqlSchema = common_internal.SqlSchema +SqlStatistics = common_internal.SqlStatistics +SqlTable = common_internal.SqlTable +SqlType = common_internal.SqlType +SqlView = common_internal.SqlView +TableType = common_internal.TableType +TableSource = common_internal.TableSource +Constraints = common_internal.Constraints + +__all__ = [ + "Constraints", + "DFSchema", + "DataType", + "DataTypeMap", + "NullTreatment", + "PythonType", + "RexType", + "SqlFunction", + "SqlSchema", + "SqlStatistics", + "SqlTable", + "SqlType", + "SqlView", + "TableSource", + "TableType", +] + + +class NullTreatment(Enum): + """Describe how null values are to be treated by functions. + + This is used primarily by aggregate and window functions. It can be set on + these functions using the builder approach described in + ref:`_window_functions` and ref:`_aggregation` in the online documentation. + + """ + + RESPECT_NULLS = common_internal.NullTreatment.RESPECT_NULLS + IGNORE_NULLS = common_internal.NullTreatment.IGNORE_NULLS diff --git a/python/datafusion/context.py b/python/datafusion/context.py new file mode 100644 index 000000000..0d8259774 --- /dev/null +++ b/python/datafusion/context.py @@ -0,0 +1,1386 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Session Context and it's associated configuration.""" + +from __future__ import annotations + +import uuid +import warnings +from typing import TYPE_CHECKING, Any, Protocol + +try: + from warnings import deprecated # Python 3.13+ +except ImportError: + from typing_extensions import deprecated # Python 3.12 + + +import pyarrow as pa + +from datafusion.catalog import ( + Catalog, + CatalogList, + CatalogProviderExportable, + CatalogProviderList, + CatalogProviderListExportable, +) +from datafusion.dataframe import DataFrame +from datafusion.expr import sort_list_to_raw_sort_list +from datafusion.options import ( + DEFAULT_MAX_INFER_SCHEMA, + CsvReadOptions, + _convert_table_partition_cols, +) +from datafusion.record_batch import RecordBatchStream + +from ._internal import RuntimeEnvBuilder as RuntimeEnvBuilderInternal +from ._internal import SessionConfig as SessionConfigInternal +from ._internal import SessionContext as SessionContextInternal +from ._internal import SQLOptions as SQLOptionsInternal +from ._internal import expr as expr_internal + +if TYPE_CHECKING: + import pathlib + from collections.abc import Sequence + + import pandas as pd + import polars as pl # type: ignore[import] + + from datafusion.catalog import CatalogProvider, Table + from datafusion.expr import SortKey + from datafusion.plan import ExecutionPlan, LogicalPlan + from datafusion.user_defined import ( + AggregateUDF, + ScalarUDF, + TableFunction, + WindowUDF, + ) + + +class ArrowStreamExportable(Protocol): + """Type hint for object exporting Arrow C Stream via Arrow PyCapsule Interface. + + https://arrow.apache.org/docs/format/CDataInterface/PyCapsuleInterface.html + """ + + def __arrow_c_stream__( # noqa: D105 + self, requested_schema: object | None = None + ) -> object: ... + + +class ArrowArrayExportable(Protocol): + """Type hint for object exporting Arrow C Array via Arrow PyCapsule Interface. + + https://arrow.apache.org/docs/format/CDataInterface/PyCapsuleInterface.html + """ + + def __arrow_c_array__( # noqa: D105 + self, requested_schema: object | None = None + ) -> tuple[object, object]: ... + + +class TableProviderExportable(Protocol): + """Type hint for object that has __datafusion_table_provider__ PyCapsule. + + https://datafusion.apache.org/python/user-guide/io/table_provider.html + """ + + def __datafusion_table_provider__(self, session: Any) -> object: ... # noqa: D105 + + +class SessionConfig: + """Session configuration options.""" + + def __init__(self, config_options: dict[str, str] | None = None) -> None: + """Create a new :py:class:`SessionConfig` with the given configuration options. + + Args: + config_options: Configuration options. + """ + self.config_internal = SessionConfigInternal(config_options) + + def with_create_default_catalog_and_schema( + self, enabled: bool = True + ) -> SessionConfig: + """Control if the default catalog and schema will be automatically created. + + Args: + enabled: Whether the default catalog and schema will be + automatically created. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = ( + self.config_internal.with_create_default_catalog_and_schema(enabled) + ) + return self + + def with_default_catalog_and_schema( + self, catalog: str, schema: str + ) -> SessionConfig: + """Select a name for the default catalog and schema. + + Args: + catalog: Catalog name. + schema: Schema name. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_default_catalog_and_schema( + catalog, schema + ) + return self + + def with_information_schema(self, enabled: bool = True) -> SessionConfig: + """Enable or disable the inclusion of ``information_schema`` virtual tables. + + Args: + enabled: Whether to include ``information_schema`` virtual tables. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_information_schema(enabled) + return self + + def with_batch_size(self, batch_size: int) -> SessionConfig: + """Customize batch size. + + Args: + batch_size: Batch size. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_batch_size(batch_size) + return self + + def with_target_partitions(self, target_partitions: int) -> SessionConfig: + """Customize the number of target partitions for query execution. + + Increasing partitions can increase concurrency. + + Args: + target_partitions: Number of target partitions. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_target_partitions( + target_partitions + ) + return self + + def with_repartition_aggregations(self, enabled: bool = True) -> SessionConfig: + """Enable or disable the use of repartitioning for aggregations. + + Enabling this improves parallelism. + + Args: + enabled: Whether to use repartitioning for aggregations. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_repartition_aggregations( + enabled + ) + return self + + def with_repartition_joins(self, enabled: bool = True) -> SessionConfig: + """Enable or disable the use of repartitioning for joins to improve parallelism. + + Args: + enabled: Whether to use repartitioning for joins. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_repartition_joins(enabled) + return self + + def with_repartition_windows(self, enabled: bool = True) -> SessionConfig: + """Enable or disable the use of repartitioning for window functions. + + This may improve parallelism. + + Args: + enabled: Whether to use repartitioning for window functions. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_repartition_windows(enabled) + return self + + def with_repartition_sorts(self, enabled: bool = True) -> SessionConfig: + """Enable or disable the use of repartitioning for window functions. + + This may improve parallelism. + + Args: + enabled: Whether to use repartitioning for window functions. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_repartition_sorts(enabled) + return self + + def with_repartition_file_scans(self, enabled: bool = True) -> SessionConfig: + """Enable or disable the use of repartitioning for file scans. + + Args: + enabled: Whether to use repartitioning for file scans. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_repartition_file_scans(enabled) + return self + + def with_repartition_file_min_size(self, size: int) -> SessionConfig: + """Set minimum file range size for repartitioning scans. + + Args: + size: Minimum file range size. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_repartition_file_min_size(size) + return self + + def with_parquet_pruning(self, enabled: bool = True) -> SessionConfig: + """Enable or disable the use of pruning predicate for parquet readers. + + Pruning predicates will enable the reader to skip row groups. + + Args: + enabled: Whether to use pruning predicate for parquet readers. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.with_parquet_pruning(enabled) + return self + + def set(self, key: str, value: str) -> SessionConfig: + """Set a configuration option. + + Args: + key: Option key. + value: Option value. + + Returns: + A new :py:class:`SessionConfig` object with the updated setting. + """ + self.config_internal = self.config_internal.set(key, value) + return self + + +class RuntimeEnvBuilder: + """Runtime configuration options.""" + + def __init__(self) -> None: + """Create a new :py:class:`RuntimeEnvBuilder` with default values.""" + self.config_internal = RuntimeEnvBuilderInternal() + + def with_disk_manager_disabled(self) -> RuntimeEnvBuilder: + """Disable the disk manager, attempts to create temporary files will error. + + Returns: + A new :py:class:`RuntimeEnvBuilder` object with the updated setting. + """ + self.config_internal = self.config_internal.with_disk_manager_disabled() + return self + + def with_disk_manager_os(self) -> RuntimeEnvBuilder: + """Use the operating system's temporary directory for disk manager. + + Returns: + A new :py:class:`RuntimeEnvBuilder` object with the updated setting. + """ + self.config_internal = self.config_internal.with_disk_manager_os() + return self + + def with_disk_manager_specified( + self, *paths: str | pathlib.Path + ) -> RuntimeEnvBuilder: + """Use the specified paths for the disk manager's temporary files. + + Args: + paths: Paths to use for the disk manager's temporary files. + + Returns: + A new :py:class:`RuntimeEnvBuilder` object with the updated setting. + """ + paths_list = [str(p) for p in paths] + self.config_internal = self.config_internal.with_disk_manager_specified( + paths_list + ) + return self + + def with_unbounded_memory_pool(self) -> RuntimeEnvBuilder: + """Use an unbounded memory pool. + + Returns: + A new :py:class:`RuntimeEnvBuilder` object with the updated setting. + """ + self.config_internal = self.config_internal.with_unbounded_memory_pool() + return self + + def with_fair_spill_pool(self, size: int) -> RuntimeEnvBuilder: + """Use a fair spill pool with the specified size. + + This pool works best when you know beforehand the query has multiple spillable + operators that will likely all need to spill. Sometimes it will cause spills + even when there was sufficient memory (reserved for other operators) to avoid + doing so:: + + ┌───────────────────────z──────────────────────z───────────────┐ + │ z z │ + │ z z │ + │ Spillable z Unspillable z Free │ + │ Memory z Memory z Memory │ + │ z z │ + │ z z │ + └───────────────────────z──────────────────────z───────────────┘ + + Args: + size: Size of the memory pool in bytes. + + Returns: + A new :py:class:`RuntimeEnvBuilder` object with the updated setting. + + Examples usage:: + + config = RuntimeEnvBuilder().with_fair_spill_pool(1024) + """ + self.config_internal = self.config_internal.with_fair_spill_pool(size) + return self + + def with_greedy_memory_pool(self, size: int) -> RuntimeEnvBuilder: + """Use a greedy memory pool with the specified size. + + This pool works well for queries that do not need to spill or have a single + spillable operator. See :py:func:`with_fair_spill_pool` if there are + multiple spillable operators that all will spill. + + Args: + size: Size of the memory pool in bytes. + + Returns: + A new :py:class:`RuntimeEnvBuilder` object with the updated setting. + + Example usage:: + + config = RuntimeEnvBuilder().with_greedy_memory_pool(1024) + """ + self.config_internal = self.config_internal.with_greedy_memory_pool(size) + return self + + def with_temp_file_path(self, path: str | pathlib.Path) -> RuntimeEnvBuilder: + """Use the specified path to create any needed temporary files. + + Args: + path: Path to use for temporary files. + + Returns: + A new :py:class:`RuntimeEnvBuilder` object with the updated setting. + + Example usage:: + + config = RuntimeEnvBuilder().with_temp_file_path("/tmp") + """ + self.config_internal = self.config_internal.with_temp_file_path(str(path)) + return self + + +@deprecated("Use `RuntimeEnvBuilder` instead.") +class RuntimeConfig(RuntimeEnvBuilder): + """See `RuntimeEnvBuilder`.""" + + +class SQLOptions: + """Options to be used when performing SQL queries.""" + + def __init__(self) -> None: + """Create a new :py:class:`SQLOptions` with default values. + + The default values are: + - DDL commands are allowed + - DML commands are allowed + - Statements are allowed + """ + self.options_internal = SQLOptionsInternal() + + def with_allow_ddl(self, allow: bool = True) -> SQLOptions: + """Should DDL (Data Definition Language) commands be run? + + Examples of DDL commands include ``CREATE TABLE`` and ``DROP TABLE``. + + Args: + allow: Allow DDL commands to be run. + + Returns: + A new :py:class:`SQLOptions` object with the updated setting. + + Example usage:: + + options = SQLOptions().with_allow_ddl(True) + """ + self.options_internal = self.options_internal.with_allow_ddl(allow) + return self + + def with_allow_dml(self, allow: bool = True) -> SQLOptions: + """Should DML (Data Manipulation Language) commands be run? + + Examples of DML commands include ``INSERT INTO`` and ``DELETE``. + + Args: + allow: Allow DML commands to be run. + + Returns: + A new :py:class:`SQLOptions` object with the updated setting. + + Example usage:: + + options = SQLOptions().with_allow_dml(True) + """ + self.options_internal = self.options_internal.with_allow_dml(allow) + return self + + def with_allow_statements(self, allow: bool = True) -> SQLOptions: + """Should statements such as ``SET VARIABLE`` and ``BEGIN TRANSACTION`` be run? + + Args: + allow: Allow statements to be run. + + Returns: + A new :py:class:SQLOptions` object with the updated setting. + + Example usage:: + + options = SQLOptions().with_allow_statements(True) + """ + self.options_internal = self.options_internal.with_allow_statements(allow) + return self + + +class SessionContext: + """This is the main interface for executing queries and creating DataFrames. + + See :ref:`user_guide_concepts` in the online documentation for more information. + """ + + def __init__( + self, + config: SessionConfig | None = None, + runtime: RuntimeEnvBuilder | None = None, + ) -> None: + """Main interface for executing queries with DataFusion. + + Maintains the state of the connection between a user and an instance + of the connection between a user and an instance of the DataFusion + engine. + + Args: + config: Session configuration options. + runtime: Runtime configuration options. + + Example usage: + + The following example demonstrates how to use the context to execute + a query against a CSV data source using the :py:class:`DataFrame` API:: + + from datafusion import SessionContext + + ctx = SessionContext() + df = ctx.read_csv("data.csv") + """ + config = config.config_internal if config is not None else None + runtime = runtime.config_internal if runtime is not None else None + + self.ctx = SessionContextInternal(config, runtime) + + def __repr__(self) -> str: + """Print a string representation of the Session Context.""" + return self.ctx.__repr__() + + @classmethod + def global_ctx(cls) -> SessionContext: + """Retrieve the global context as a `SessionContext` wrapper. + + Returns: + A `SessionContext` object that wraps the global `SessionContextInternal`. + """ + internal_ctx = SessionContextInternal.global_ctx() + wrapper = cls() + wrapper.ctx = internal_ctx + return wrapper + + def enable_url_table(self) -> SessionContext: + """Control if local files can be queried as tables. + + Returns: + A new :py:class:`SessionContext` object with url table enabled. + """ + klass = self.__class__ + obj = klass.__new__(klass) + obj.ctx = self.ctx.enable_url_table() + return obj + + def register_object_store( + self, schema: str, store: Any, host: str | None = None + ) -> None: + """Add a new object store into the session. + + Args: + schema: The data source schema. + store: The :py:class:`~datafusion.object_store.ObjectStore` to register. + host: URL for the host. + """ + self.ctx.register_object_store(schema, store, host) + + def register_listing_table( + self, + name: str, + path: str | pathlib.Path, + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + file_extension: str = ".parquet", + schema: pa.Schema | None = None, + file_sort_order: Sequence[Sequence[SortKey]] | None = None, + ) -> None: + """Register multiple files as a single table. + + Registers a :py:class:`~datafusion.catalog.Table` that can assemble multiple + files from locations in an :py:class:`~datafusion.object_store.ObjectStore` + instance. + + Args: + name: Name of the resultant table. + path: Path to the file to register. + table_partition_cols: Partition columns. + file_extension: File extension of the provided table. + schema: The data source schema. + file_sort_order: Sort order for the file. Each sort key can be + specified as a column name (``str``), an expression + (``Expr``), or a ``SortExpr``. + """ + if table_partition_cols is None: + table_partition_cols = [] + table_partition_cols = _convert_table_partition_cols(table_partition_cols) + self.ctx.register_listing_table( + name, + str(path), + table_partition_cols, + file_extension, + schema, + self._convert_file_sort_order(file_sort_order), + ) + + def sql( + self, + query: str, + options: SQLOptions | None = None, + param_values: dict[str, Any] | None = None, + **named_params: Any, + ) -> DataFrame: + """Create a :py:class:`~datafusion.DataFrame` from SQL query text. + + See the online documentation for a description of how to perform + parameterized substitution via either the ``param_values`` option + or passing in ``named_params``. + + Note: This API implements DDL statements such as ``CREATE TABLE`` and + ``CREATE VIEW`` and DML statements such as ``INSERT INTO`` with in-memory + default implementation.See + :py:func:`~datafusion.context.SessionContext.sql_with_options`. + + Args: + query: SQL query text. + options: If provided, the query will be validated against these options. + param_values: Provides substitution of scalar values in the query + after parsing. + named_params: Provides string or DataFrame substitution in the query string. + + Returns: + DataFrame representation of the SQL query. + """ + + def value_to_scalar(value: Any) -> pa.Scalar: + if isinstance(value, pa.Scalar): + return value + return pa.scalar(value) + + def value_to_string(value: Any) -> str: + if isinstance(value, DataFrame): + view_name = str(uuid.uuid4()).replace("-", "_") + view_name = f"view_{view_name}" + view = value.df.into_view(temporary=True) + self.ctx.register_table(view_name, view) + return view_name + return str(value) + + param_values = ( + {name: value_to_scalar(value) for (name, value) in param_values.items()} + if param_values is not None + else {} + ) + param_strings = ( + {name: value_to_string(value) for (name, value) in named_params.items()} + if named_params is not None + else {} + ) + + options_raw = options.options_internal if options is not None else None + + return DataFrame( + self.ctx.sql_with_options( + query, + options=options_raw, + param_values=param_values, + param_strings=param_strings, + ) + ) + + def sql_with_options( + self, + query: str, + options: SQLOptions, + param_values: dict[str, Any] | None = None, + **named_params: Any, + ) -> DataFrame: + """Create a :py:class:`~datafusion.dataframe.DataFrame` from SQL query text. + + This function will first validate that the query is allowed by the + provided options. + + Args: + query: SQL query text. + options: SQL options. + param_values: Provides substitution of scalar values in the query + after parsing. + named_params: Provides string or DataFrame substitution in the query string. + + Returns: + DataFrame representation of the SQL query. + """ + return self.sql( + query, options=options, param_values=param_values, **named_params + ) + + def create_dataframe( + self, + partitions: list[list[pa.RecordBatch]], + name: str | None = None, + schema: pa.Schema | None = None, + ) -> DataFrame: + """Create and return a dataframe using the provided partitions. + + Args: + partitions: :py:class:`pa.RecordBatch` partitions to register. + name: Resultant dataframe name. + schema: Schema for the partitions. + + Returns: + DataFrame representation of the SQL query. + """ + return DataFrame(self.ctx.create_dataframe(partitions, name, schema)) + + def create_dataframe_from_logical_plan(self, plan: LogicalPlan) -> DataFrame: + """Create a :py:class:`~datafusion.dataframe.DataFrame` from an existing plan. + + Args: + plan: Logical plan. + + Returns: + DataFrame representation of the logical plan. + """ + return DataFrame(self.ctx.create_dataframe_from_logical_plan(plan._raw_plan)) + + def from_pylist( + self, data: list[dict[str, Any]], name: str | None = None + ) -> DataFrame: + """Create a :py:class:`~datafusion.dataframe.DataFrame` from a list. + + Args: + data: List of dictionaries. + name: Name of the DataFrame. + + Returns: + DataFrame representation of the list of dictionaries. + """ + return DataFrame(self.ctx.from_pylist(data, name)) + + def from_pydict( + self, data: dict[str, list[Any]], name: str | None = None + ) -> DataFrame: + """Create a :py:class:`~datafusion.dataframe.DataFrame` from a dictionary. + + Args: + data: Dictionary of lists. + name: Name of the DataFrame. + + Returns: + DataFrame representation of the dictionary of lists. + """ + return DataFrame(self.ctx.from_pydict(data, name)) + + def from_arrow( + self, + data: ArrowStreamExportable | ArrowArrayExportable, + name: str | None = None, + ) -> DataFrame: + """Create a :py:class:`~datafusion.dataframe.DataFrame` from an Arrow source. + + The Arrow data source can be any object that implements either + ``__arrow_c_stream__`` or ``__arrow_c_array__``. For the latter, it must return + a struct array. + + Arrow data can be Polars, Pandas, Pyarrow etc. + + Args: + data: Arrow data source. + name: Name of the DataFrame. + + Returns: + DataFrame representation of the Arrow table. + """ + return DataFrame(self.ctx.from_arrow(data, name)) + + @deprecated("Use ``from_arrow`` instead.") + def from_arrow_table(self, data: pa.Table, name: str | None = None) -> DataFrame: + """Create a :py:class:`~datafusion.dataframe.DataFrame` from an Arrow table. + + This is an alias for :py:func:`from_arrow`. + """ + return self.from_arrow(data, name) + + def from_pandas(self, data: pd.DataFrame, name: str | None = None) -> DataFrame: + """Create a :py:class:`~datafusion.dataframe.DataFrame` from a Pandas DataFrame. + + Args: + data: Pandas DataFrame. + name: Name of the DataFrame. + + Returns: + DataFrame representation of the Pandas DataFrame. + """ + return DataFrame(self.ctx.from_pandas(data, name)) + + def from_polars(self, data: pl.DataFrame, name: str | None = None) -> DataFrame: + """Create a :py:class:`~datafusion.dataframe.DataFrame` from a Polars DataFrame. + + Args: + data: Polars DataFrame. + name: Name of the DataFrame. + + Returns: + DataFrame representation of the Polars DataFrame. + """ + return DataFrame(self.ctx.from_polars(data, name)) + + # https://github.com/apache/datafusion-python/pull/1016#discussion_r1983239116 + # is the discussion on how we arrived at adding register_view + def register_view(self, name: str, df: DataFrame) -> None: + """Register a :py:class:`~datafusion.dataframe.DataFrame` as a view. + + Args: + name (str): The name to register the view under. + df (DataFrame): The DataFrame to be converted into a view and registered. + """ + view = df.into_view() + self.ctx.register_table(name, view) + + def register_table( + self, + name: str, + table: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset, + ) -> None: + """Register a :py:class:`~datafusion.Table` with this context. + + The registered table can be referenced from SQL statements executed against + this context. + + Args: + name: Name of the resultant table. + table: Any object that can be converted into a :class:`Table`. + """ + self.ctx.register_table(name, table) + + def deregister_table(self, name: str) -> None: + """Remove a table from the session.""" + self.ctx.deregister_table(name) + + def catalog_names(self) -> set[str]: + """Returns the list of catalogs in this context.""" + return self.ctx.catalog_names() + + def register_catalog_provider_list( + self, + provider: CatalogProviderListExportable | CatalogProviderList | CatalogList, + ) -> None: + """Register a catalog provider list.""" + if isinstance(provider, CatalogList): + self.ctx.register_catalog_provider_list(provider.catalog) + else: + self.ctx.register_catalog_provider_list(provider) + + def register_catalog_provider( + self, name: str, provider: CatalogProviderExportable | CatalogProvider | Catalog + ) -> None: + """Register a catalog provider.""" + if isinstance(provider, Catalog): + self.ctx.register_catalog_provider(name, provider.catalog) + else: + self.ctx.register_catalog_provider(name, provider) + + @deprecated("Use register_table() instead.") + def register_table_provider( + self, + name: str, + provider: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset, + ) -> None: + """Register a table provider. + + Deprecated: use :meth:`register_table` instead. + """ + self.register_table(name, provider) + + def register_udtf(self, func: TableFunction) -> None: + """Register a user defined table function.""" + self.ctx.register_udtf(func._udtf) + + def register_record_batches( + self, name: str, partitions: list[list[pa.RecordBatch]] + ) -> None: + """Register record batches as a table. + + This function will convert the provided partitions into a table and + register it into the session using the given name. + + Args: + name: Name of the resultant table. + partitions: Record batches to register as a table. + """ + self.ctx.register_record_batches(name, partitions) + + def register_parquet( + self, + name: str, + path: str | pathlib.Path, + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + parquet_pruning: bool = True, + file_extension: str = ".parquet", + skip_metadata: bool = True, + schema: pa.Schema | None = None, + file_sort_order: Sequence[Sequence[SortKey]] | None = None, + ) -> None: + """Register a Parquet file as a table. + + The registered table can be referenced from SQL statement executed + against this context. + + Args: + name: Name of the table to register. + path: Path to the Parquet file. + table_partition_cols: Partition columns. + parquet_pruning: Whether the parquet reader should use the + predicate to prune row groups. + file_extension: File extension; only files with this extension are + selected for data input. + skip_metadata: Whether the parquet reader should skip any metadata + that may be in the file schema. This can help avoid schema + conflicts due to metadata. + schema: The data source schema. + file_sort_order: Sort order for the file. Each sort key can be + specified as a column name (``str``), an expression + (``Expr``), or a ``SortExpr``. + """ + if table_partition_cols is None: + table_partition_cols = [] + table_partition_cols = _convert_table_partition_cols(table_partition_cols) + self.ctx.register_parquet( + name, + str(path), + table_partition_cols, + parquet_pruning, + file_extension, + skip_metadata, + schema, + self._convert_file_sort_order(file_sort_order), + ) + + def register_csv( + self, + name: str, + path: str | pathlib.Path | list[str | pathlib.Path], + schema: pa.Schema | None = None, + has_header: bool = True, + delimiter: str = ",", + schema_infer_max_records: int = DEFAULT_MAX_INFER_SCHEMA, + file_extension: str = ".csv", + file_compression_type: str | None = None, + options: CsvReadOptions | None = None, + ) -> None: + """Register a CSV file as a table. + + The registered table can be referenced from SQL statement executed against. + + Args: + name: Name of the table to register. + path: Path to the CSV file. It also accepts a list of Paths. + schema: An optional schema representing the CSV file. If None, the + CSV reader will try to infer it based on data in file. + has_header: Whether the CSV file have a header. If schema inference + is run on a file with no headers, default column names are + created. + delimiter: An optional column delimiter. + schema_infer_max_records: Maximum number of rows to read from CSV + files for schema inference if needed. + file_extension: File extension; only files with this extension are + selected for data input. + file_compression_type: File compression type. + options: Set advanced options for CSV reading. This cannot be + combined with any of the other options in this method. + """ + path_arg = [str(p) for p in path] if isinstance(path, list) else str(path) + + if options is not None and ( + schema is not None + or not has_header + or delimiter != "," + or schema_infer_max_records != DEFAULT_MAX_INFER_SCHEMA + or file_extension != ".csv" + or file_compression_type is not None + ): + message = ( + "Combining CsvReadOptions parameter with additional options " + "is not supported. Use CsvReadOptions to set parameters." + ) + warnings.warn( + message, + category=UserWarning, + stacklevel=2, + ) + + options = ( + options + if options is not None + else CsvReadOptions( + schema=schema, + has_header=has_header, + delimiter=delimiter, + schema_infer_max_records=schema_infer_max_records, + file_extension=file_extension, + file_compression_type=file_compression_type, + ) + ) + + self.ctx.register_csv( + name, + path_arg, + options.to_inner(), + ) + + def register_json( + self, + name: str, + path: str | pathlib.Path, + schema: pa.Schema | None = None, + schema_infer_max_records: int = 1000, + file_extension: str = ".json", + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + file_compression_type: str | None = None, + ) -> None: + """Register a JSON file as a table. + + The registered table can be referenced from SQL statement executed + against this context. + + Args: + name: Name of the table to register. + path: Path to the JSON file. + schema: The data source schema. + schema_infer_max_records: Maximum number of rows to read from JSON + files for schema inference if needed. + file_extension: File extension; only files with this extension are + selected for data input. + table_partition_cols: Partition columns. + file_compression_type: File compression type. + """ + if table_partition_cols is None: + table_partition_cols = [] + table_partition_cols = _convert_table_partition_cols(table_partition_cols) + self.ctx.register_json( + name, + str(path), + schema, + schema_infer_max_records, + file_extension, + table_partition_cols, + file_compression_type, + ) + + def register_avro( + self, + name: str, + path: str | pathlib.Path, + schema: pa.Schema | None = None, + file_extension: str = ".avro", + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + ) -> None: + """Register an Avro file as a table. + + The registered table can be referenced from SQL statement executed against + this context. + + Args: + name: Name of the table to register. + path: Path to the Avro file. + schema: The data source schema. + file_extension: File extension to select. + table_partition_cols: Partition columns. + """ + if table_partition_cols is None: + table_partition_cols = [] + table_partition_cols = _convert_table_partition_cols(table_partition_cols) + self.ctx.register_avro( + name, str(path), schema, file_extension, table_partition_cols + ) + + def register_dataset(self, name: str, dataset: pa.dataset.Dataset) -> None: + """Register a :py:class:`pa.dataset.Dataset` as a table. + + Args: + name: Name of the table to register. + dataset: PyArrow dataset. + """ + self.ctx.register_dataset(name, dataset) + + def register_udf(self, udf: ScalarUDF) -> None: + """Register a user-defined function (UDF) with the context.""" + self.ctx.register_udf(udf._udf) + + def register_udaf(self, udaf: AggregateUDF) -> None: + """Register a user-defined aggregation function (UDAF) with the context.""" + self.ctx.register_udaf(udaf._udaf) + + def register_udwf(self, udwf: WindowUDF) -> None: + """Register a user-defined window function (UDWF) with the context.""" + self.ctx.register_udwf(udwf._udwf) + + def catalog(self, name: str = "datafusion") -> Catalog: + """Retrieve a catalog by name.""" + return Catalog(self.ctx.catalog(name)) + + @deprecated( + "Use the catalog provider interface ``SessionContext.Catalog`` to " + "examine available catalogs, schemas and tables" + ) + def tables(self) -> set[str]: + """Deprecated.""" + return self.ctx.tables() + + def table(self, name: str) -> DataFrame: + """Retrieve a previously registered table by name.""" + return DataFrame(self.ctx.table(name)) + + def table_exist(self, name: str) -> bool: + """Return whether a table with the given name exists.""" + return self.ctx.table_exist(name) + + def empty_table(self) -> DataFrame: + """Create an empty :py:class:`~datafusion.dataframe.DataFrame`.""" + return DataFrame(self.ctx.empty_table()) + + def session_id(self) -> str: + """Return an id that uniquely identifies this :py:class:`SessionContext`.""" + return self.ctx.session_id() + + def read_json( + self, + path: str | pathlib.Path, + schema: pa.Schema | None = None, + schema_infer_max_records: int = 1000, + file_extension: str = ".json", + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + file_compression_type: str | None = None, + ) -> DataFrame: + """Read a line-delimited JSON data source. + + Args: + path: Path to the JSON file. + schema: The data source schema. + schema_infer_max_records: Maximum number of rows to read from JSON + files for schema inference if needed. + file_extension: File extension; only files with this extension are + selected for data input. + table_partition_cols: Partition columns. + file_compression_type: File compression type. + + Returns: + DataFrame representation of the read JSON files. + """ + if table_partition_cols is None: + table_partition_cols = [] + table_partition_cols = _convert_table_partition_cols(table_partition_cols) + return DataFrame( + self.ctx.read_json( + str(path), + schema, + schema_infer_max_records, + file_extension, + table_partition_cols, + file_compression_type, + ) + ) + + def read_csv( + self, + path: str | pathlib.Path | list[str] | list[pathlib.Path], + schema: pa.Schema | None = None, + has_header: bool = True, + delimiter: str = ",", + schema_infer_max_records: int = DEFAULT_MAX_INFER_SCHEMA, + file_extension: str = ".csv", + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + file_compression_type: str | None = None, + options: CsvReadOptions | None = None, + ) -> DataFrame: + """Read a CSV data source. + + Args: + path: Path to the CSV file + schema: An optional schema representing the CSV files. If None, the + CSV reader will try to infer it based on data in file. + has_header: Whether the CSV file have a header. If schema inference + is run on a file with no headers, default column names are + created. + delimiter: An optional column delimiter. + schema_infer_max_records: Maximum number of rows to read from CSV + files for schema inference if needed. + file_extension: File extension; only files with this extension are + selected for data input. + table_partition_cols: Partition columns. + file_compression_type: File compression type. + options: Set advanced options for CSV reading. This cannot be + combined with any of the other options in this method. + + Returns: + DataFrame representation of the read CSV files + """ + path_arg = [str(p) for p in path] if isinstance(path, list) else str(path) + + if options is not None and ( + schema is not None + or not has_header + or delimiter != "," + or schema_infer_max_records != DEFAULT_MAX_INFER_SCHEMA + or file_extension != ".csv" + or table_partition_cols is not None + or file_compression_type is not None + ): + message = ( + "Combining CsvReadOptions parameter with additional options " + "is not supported. Use CsvReadOptions to set parameters." + ) + warnings.warn( + message, + category=UserWarning, + stacklevel=2, + ) + + options = ( + options + if options is not None + else CsvReadOptions( + schema=schema, + has_header=has_header, + delimiter=delimiter, + schema_infer_max_records=schema_infer_max_records, + file_extension=file_extension, + table_partition_cols=table_partition_cols, + file_compression_type=file_compression_type, + ) + ) + + return DataFrame( + self.ctx.read_csv( + path_arg, + options.to_inner(), + ) + ) + + def read_parquet( + self, + path: str | pathlib.Path, + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + parquet_pruning: bool = True, + file_extension: str = ".parquet", + skip_metadata: bool = True, + schema: pa.Schema | None = None, + file_sort_order: Sequence[Sequence[SortKey]] | None = None, + ) -> DataFrame: + """Read a Parquet source into a :py:class:`~datafusion.dataframe.Dataframe`. + + Args: + path: Path to the Parquet file. + table_partition_cols: Partition columns. + parquet_pruning: Whether the parquet reader should use the predicate + to prune row groups. + file_extension: File extension; only files with this extension are + selected for data input. + skip_metadata: Whether the parquet reader should skip any metadata + that may be in the file schema. This can help avoid schema + conflicts due to metadata. + schema: An optional schema representing the parquet files. If None, + the parquet reader will try to infer it based on data in the + file. + file_sort_order: Sort order for the file. Each sort key can be + specified as a column name (``str``), an expression + (``Expr``), or a ``SortExpr``. + + Returns: + DataFrame representation of the read Parquet files + """ + if table_partition_cols is None: + table_partition_cols = [] + table_partition_cols = _convert_table_partition_cols(table_partition_cols) + file_sort_order = self._convert_file_sort_order(file_sort_order) + return DataFrame( + self.ctx.read_parquet( + str(path), + table_partition_cols, + parquet_pruning, + file_extension, + skip_metadata, + schema, + file_sort_order, + ) + ) + + def read_avro( + self, + path: str | pathlib.Path, + schema: pa.Schema | None = None, + file_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + file_extension: str = ".avro", + ) -> DataFrame: + """Create a :py:class:`DataFrame` for reading Avro data source. + + Args: + path: Path to the Avro file. + schema: The data source schema. + file_partition_cols: Partition columns. + file_extension: File extension to select. + + Returns: + DataFrame representation of the read Avro file + """ + if file_partition_cols is None: + file_partition_cols = [] + file_partition_cols = _convert_table_partition_cols(file_partition_cols) + return DataFrame( + self.ctx.read_avro(str(path), schema, file_partition_cols, file_extension) + ) + + def read_table( + self, table: Table | TableProviderExportable | DataFrame | pa.dataset.Dataset + ) -> DataFrame: + """Creates a :py:class:`~datafusion.dataframe.DataFrame` from a table.""" + return DataFrame(self.ctx.read_table(table)) + + def execute(self, plan: ExecutionPlan, partitions: int) -> RecordBatchStream: + """Execute the ``plan`` and return the results.""" + return RecordBatchStream(self.ctx.execute(plan._raw_plan, partitions)) + + @staticmethod + def _convert_file_sort_order( + file_sort_order: Sequence[Sequence[SortKey]] | None, + ) -> list[list[expr_internal.SortExpr]] | None: + """Convert nested ``SortKey`` sequences into raw sort expressions. + + Each ``SortKey`` can be a column name string, an ``Expr``, or a + ``SortExpr`` and will be converted using + :func:`datafusion.expr.sort_list_to_raw_sort_list`. + """ + # Convert each ``SortKey`` in the provided sort order to the low-level + # representation expected by the Rust bindings. + return ( + [sort_list_to_raw_sort_list(f) for f in file_sort_order] + if file_sort_order is not None + else None + ) + + @staticmethod + def _convert_table_partition_cols( + table_partition_cols: list[tuple[str, str | pa.DataType]], + ) -> list[tuple[str, pa.DataType]]: + warn = False + converted_table_partition_cols = [] + + for col, data_type in table_partition_cols: + if isinstance(data_type, str): + warn = True + if data_type == "string": + converted_data_type = pa.string() + elif data_type == "int": + converted_data_type = pa.int32() + else: + message = ( + f"Unsupported literal data type '{data_type}' for partition " + "column. Supported types are 'string' and 'int'" + ) + raise ValueError(message) + else: + converted_data_type = data_type + + converted_table_partition_cols.append((col, converted_data_type)) + + if warn: + message = ( + "using literals for table_partition_cols data types is deprecated," + "use pyarrow types instead" + ) + warnings.warn( + message, + category=DeprecationWarning, + stacklevel=2, + ) + + return converted_table_partition_cols + + def __datafusion_task_context_provider__(self) -> Any: + """Access the PyCapsule FFI_TaskContextProvider.""" + return self.ctx.__datafusion_task_context_provider__() + + def __datafusion_logical_extension_codec__(self) -> Any: + """Access the PyCapsule FFI_LogicalExtensionCodec.""" + return self.ctx.__datafusion_logical_extension_codec__() + + def with_logical_extension_codec(self, codec: Any) -> SessionContext: + """Create a new session context with specified codec. + + This only supports codecs that have been implemented using the + FFI interface. + """ + return self.ctx.with_logical_extension_codec(codec) diff --git a/python/datafusion/dataframe.py b/python/datafusion/dataframe.py new file mode 100644 index 000000000..214d44a42 --- /dev/null +++ b/python/datafusion/dataframe.py @@ -0,0 +1,1441 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +""":py:class:`DataFrame` is one of the core concepts in DataFusion. + +See :ref:`user_guide_concepts` in the online documentation for more information. +""" + +from __future__ import annotations + +import warnings +from collections.abc import AsyncIterator, Iterable, Iterator, Sequence +from typing import ( + TYPE_CHECKING, + Any, + Literal, + overload, +) + +try: + from warnings import deprecated # Python 3.13+ +except ImportError: + from typing_extensions import deprecated # Python 3.12 + +from datafusion._internal import DataFrame as DataFrameInternal +from datafusion._internal import DataFrameWriteOptions as DataFrameWriteOptionsInternal +from datafusion._internal import InsertOp as InsertOpInternal +from datafusion._internal import ParquetColumnOptions as ParquetColumnOptionsInternal +from datafusion._internal import ParquetWriterOptions as ParquetWriterOptionsInternal +from datafusion.expr import ( + Expr, + SortExpr, + SortKey, + ensure_expr, + ensure_expr_list, + expr_list_to_raw_expr_list, + sort_list_to_raw_sort_list, +) +from datafusion.plan import ExecutionPlan, LogicalPlan +from datafusion.record_batch import RecordBatch, RecordBatchStream + +if TYPE_CHECKING: + import pathlib + from collections.abc import Callable + + import pandas as pd + import polars as pl + import pyarrow as pa + + from datafusion.catalog import Table + +from enum import Enum + + +# excerpt from deltalake +# https://github.com/apache/datafusion-python/pull/981#discussion_r1905619163 +class Compression(Enum): + """Enum representing the available compression types for Parquet files.""" + + UNCOMPRESSED = "uncompressed" + SNAPPY = "snappy" + GZIP = "gzip" + BROTLI = "brotli" + LZ4 = "lz4" + # lzo is not implemented yet + # https://github.com/apache/arrow-rs/issues/6970 + # LZO = "lzo" # noqa: ERA001 + ZSTD = "zstd" + LZ4_RAW = "lz4_raw" + + @classmethod + def from_str(cls: type[Compression], value: str) -> Compression: + """Convert a string to a Compression enum value. + + Args: + value: The string representation of the compression type. + + Returns: + The Compression enum lowercase value. + + Raises: + ValueError: If the string does not match any Compression enum value. + """ + try: + return cls(value.lower()) + except ValueError as err: + valid_values = str([item.value for item in Compression]) + error_msg = f""" + {value} is not a valid Compression. + Valid values are: {valid_values} + """ + raise ValueError(error_msg) from err + + def get_default_level(self) -> int | None: + """Get the default compression level for the compression type. + + Returns: + The default compression level for the compression type. + """ + # GZIP, BROTLI default values from deltalake repo + # https://github.com/apache/datafusion-python/pull/981#discussion_r1905619163 + # ZSTD default value from delta-rs + # https://github.com/apache/datafusion-python/pull/981#discussion_r1904789223 + if self == Compression.GZIP: + return 6 + if self == Compression.BROTLI: + return 1 + if self == Compression.ZSTD: + return 4 + return None + + +class ParquetWriterOptions: + """Advanced parquet writer options. + + Allows settings the writer options that apply to the entire file. Some options can + also be set on a column by column basis, with the field ``column_specific_options`` + (see ``ParquetColumnOptions``). + """ + + def __init__( + self, + data_pagesize_limit: int = 1024 * 1024, + write_batch_size: int = 1024, + writer_version: str = "1.0", + skip_arrow_metadata: bool = False, + compression: str | None = "zstd(3)", + compression_level: int | None = None, + dictionary_enabled: bool | None = True, + dictionary_page_size_limit: int = 1024 * 1024, + statistics_enabled: str | None = "page", + max_row_group_size: int = 1024 * 1024, + created_by: str = "datafusion-python", + column_index_truncate_length: int | None = 64, + statistics_truncate_length: int | None = None, + data_page_row_count_limit: int = 20_000, + encoding: str | None = None, + bloom_filter_on_write: bool = False, + bloom_filter_fpp: float | None = None, + bloom_filter_ndv: int | None = None, + allow_single_file_parallelism: bool = True, + maximum_parallel_row_group_writers: int = 1, + maximum_buffered_record_batches_per_stream: int = 2, + column_specific_options: dict[str, ParquetColumnOptions] | None = None, + ) -> None: + """Initialize the ParquetWriterOptions. + + Args: + data_pagesize_limit: Sets best effort maximum size of data page in bytes. + write_batch_size: Sets write_batch_size in bytes. + writer_version: Sets parquet writer version. Valid values are ``1.0`` and + ``2.0``. + skip_arrow_metadata: Skip encoding the embedded arrow metadata in the + KV_meta. + compression: Compression type to use. Default is ``zstd(3)``. + Available compression types are + + - ``uncompressed``: No compression. + - ``snappy``: Snappy compression. + - ``gzip(n)``: Gzip compression with level n. + - ``brotli(n)``: Brotli compression with level n. + - ``lz4``: LZ4 compression. + - ``lz4_raw``: LZ4_RAW compression. + - ``zstd(n)``: Zstandard compression with level n. + compression_level: Compression level to set. + dictionary_enabled: Sets if dictionary encoding is enabled. If ``None``, + uses the default parquet writer setting. + dictionary_page_size_limit: Sets best effort maximum dictionary page size, + in bytes. + statistics_enabled: Sets if statistics are enabled for any column Valid + values are ``none``, ``chunk``, and ``page``. If ``None``, uses the + default parquet writer setting. + max_row_group_size: Target maximum number of rows in each row group + (defaults to 1M rows). Writing larger row groups requires more memory + to write, but can get better compression and be faster to read. + created_by: Sets "created by" property. + column_index_truncate_length: Sets column index truncate length. + statistics_truncate_length: Sets statistics truncate length. If ``None``, + uses the default parquet writer setting. + data_page_row_count_limit: Sets best effort maximum number of rows in a data + page. + encoding: Sets default encoding for any column. Valid values are ``plain``, + ``plain_dictionary``, ``rle``, ``bit_packed``, ``delta_binary_packed``, + ``delta_length_byte_array``, ``delta_byte_array``, ``rle_dictionary``, + and ``byte_stream_split``. If ``None``, uses the default parquet writer + setting. + bloom_filter_on_write: Write bloom filters for all columns when creating + parquet files. + bloom_filter_fpp: Sets bloom filter false positive probability. If ``None``, + uses the default parquet writer setting + bloom_filter_ndv: Sets bloom filter number of distinct values. If ``None``, + uses the default parquet writer setting. + allow_single_file_parallelism: Controls whether DataFusion will attempt to + speed up writing parquet files by serializing them in parallel. Each + column in each row group in each output file are serialized in parallel + leveraging a maximum possible core count of + ``n_files * n_row_groups * n_columns``. + maximum_parallel_row_group_writers: By default parallel parquet writer is + tuned for minimum memory usage in a streaming execution plan. You may + see a performance benefit when writing large parquet files by increasing + ``maximum_parallel_row_group_writers`` and + ``maximum_buffered_record_batches_per_stream`` if your system has idle + cores and can tolerate additional memory usage. Boosting these values is + likely worthwhile when writing out already in-memory data, such as from + a cached data frame. + maximum_buffered_record_batches_per_stream: See + ``maximum_parallel_row_group_writers``. + column_specific_options: Overrides options for specific columns. If a column + is not a part of this dictionary, it will use the parameters provided + here. + """ + self.data_pagesize_limit = data_pagesize_limit + self.write_batch_size = write_batch_size + self.writer_version = writer_version + self.skip_arrow_metadata = skip_arrow_metadata + if compression_level is not None: + self.compression = f"{compression}({compression_level})" + else: + self.compression = compression + self.dictionary_enabled = dictionary_enabled + self.dictionary_page_size_limit = dictionary_page_size_limit + self.statistics_enabled = statistics_enabled + self.max_row_group_size = max_row_group_size + self.created_by = created_by + self.column_index_truncate_length = column_index_truncate_length + self.statistics_truncate_length = statistics_truncate_length + self.data_page_row_count_limit = data_page_row_count_limit + self.encoding = encoding + self.bloom_filter_on_write = bloom_filter_on_write + self.bloom_filter_fpp = bloom_filter_fpp + self.bloom_filter_ndv = bloom_filter_ndv + self.allow_single_file_parallelism = allow_single_file_parallelism + self.maximum_parallel_row_group_writers = maximum_parallel_row_group_writers + self.maximum_buffered_record_batches_per_stream = ( + maximum_buffered_record_batches_per_stream + ) + self.column_specific_options = column_specific_options + + +class ParquetColumnOptions: + """Parquet options for individual columns. + + Contains the available options that can be applied for an individual Parquet column, + replacing the global options in ``ParquetWriterOptions``. + """ + + def __init__( + self, + encoding: str | None = None, + dictionary_enabled: bool | None = None, + compression: str | None = None, + statistics_enabled: str | None = None, + bloom_filter_enabled: bool | None = None, + bloom_filter_fpp: float | None = None, + bloom_filter_ndv: int | None = None, + ) -> None: + """Initialize the ParquetColumnOptions. + + Args: + encoding: Sets encoding for the column path. Valid values are: ``plain``, + ``plain_dictionary``, ``rle``, ``bit_packed``, ``delta_binary_packed``, + ``delta_length_byte_array``, ``delta_byte_array``, ``rle_dictionary``, + and ``byte_stream_split``. These values are not case-sensitive. If + ``None``, uses the default parquet options + dictionary_enabled: Sets if dictionary encoding is enabled for the column + path. If `None`, uses the default parquet options + compression: Sets default parquet compression codec for the column path. + Valid values are ``uncompressed``, ``snappy``, ``gzip(level)``, ``lzo``, + ``brotli(level)``, ``lz4``, ``zstd(level)``, and ``lz4_raw``. These + values are not case-sensitive. If ``None``, uses the default parquet + options. + statistics_enabled: Sets if statistics are enabled for the column Valid + values are: ``none``, ``chunk``, and ``page`` These values are not case + sensitive. If ``None``, uses the default parquet options. + bloom_filter_enabled: Sets if bloom filter is enabled for the column path. + If ``None``, uses the default parquet options. + bloom_filter_fpp: Sets bloom filter false positive probability for the + column path. If ``None``, uses the default parquet options. + bloom_filter_ndv: Sets bloom filter number of distinct values. If ``None``, + uses the default parquet options. + """ + self.encoding = encoding + self.dictionary_enabled = dictionary_enabled + self.compression = compression + self.statistics_enabled = statistics_enabled + self.bloom_filter_enabled = bloom_filter_enabled + self.bloom_filter_fpp = bloom_filter_fpp + self.bloom_filter_ndv = bloom_filter_ndv + + +class DataFrame: + """Two dimensional table representation of data. + + DataFrame objects are iterable; iterating over a DataFrame yields + :class:`datafusion.RecordBatch` instances lazily. + + See :ref:`user_guide_concepts` in the online documentation for more information. + """ + + def __init__(self, df: DataFrameInternal) -> None: + """This constructor is not to be used by the end user. + + See :py:class:`~datafusion.context.SessionContext` for methods to + create a :py:class:`DataFrame`. + """ + self.df = df + + def into_view(self, temporary: bool = False) -> Table: + """Convert ``DataFrame`` into a :class:`~datafusion.Table`. + + Examples: + >>> from datafusion import SessionContext + >>> ctx = SessionContext() + >>> df = ctx.sql("SELECT 1 AS value") + >>> view = df.into_view() + >>> ctx.register_table("values_view", view) + >>> result = ctx.sql("SELECT value FROM values_view").collect() + >>> result[0].column("value").to_pylist() + [1] + """ + from datafusion.catalog import Table as _Table + + return _Table(self.df.into_view(temporary)) + + def __getitem__(self, key: str | list[str]) -> DataFrame: + """Return a new :py:class:`DataFrame` with the specified column or columns. + + Args: + key: Column name or list of column names to select. + + Returns: + DataFrame with the specified column or columns. + """ + return DataFrame(self.df.__getitem__(key)) + + def __repr__(self) -> str: + """Return a string representation of the DataFrame. + + Returns: + String representation of the DataFrame. + """ + return self.df.__repr__() + + def _repr_html_(self) -> str: + return self.df._repr_html_() + + @staticmethod + def default_str_repr( + batches: list[pa.RecordBatch], + schema: pa.Schema, + has_more: bool, + table_uuid: str | None = None, + ) -> str: + """Return the default string representation of a DataFrame. + + This method is used by the default formatter and implemented in Rust for + performance reasons. + """ + return DataFrameInternal.default_str_repr(batches, schema, has_more, table_uuid) + + def describe(self) -> DataFrame: + """Return the statistics for this DataFrame. + + Only summarized numeric datatypes at the moments and returns nulls + for non-numeric datatypes. + + The output format is modeled after pandas. + + Returns: + A summary DataFrame containing statistics. + """ + return DataFrame(self.df.describe()) + + def schema(self) -> pa.Schema: + """Return the :py:class:`pyarrow.Schema` of this DataFrame. + + The output schema contains information on the name, data type, and + nullability for each column. + + Returns: + Describing schema of the DataFrame + """ + return self.df.schema() + + @deprecated( + "select_columns() is deprecated. Use :py:meth:`~DataFrame.select` instead" + ) + def select_columns(self, *args: str) -> DataFrame: + """Filter the DataFrame by columns. + + Returns: + DataFrame only containing the specified columns. + """ + return self.select(*args) + + def select_exprs(self, *args: str) -> DataFrame: + """Project arbitrary list of expression strings into a new DataFrame. + + This method will parse string expressions into logical plan expressions. + The output DataFrame has one column for each expression. + + Returns: + DataFrame only containing the specified columns. + """ + return self.df.select_exprs(*args) + + def select(self, *exprs: Expr | str) -> DataFrame: + """Project arbitrary expressions into a new :py:class:`DataFrame`. + + Args: + exprs: Either column names or :py:class:`~datafusion.expr.Expr` to select. + + Returns: + DataFrame after projection. It has one column for each expression. + + Example usage: + + The following example will return 3 columns from the original dataframe. + The first two columns will be the original column ``a`` and ``b`` since the + string "a" is assumed to refer to column selection. Also a duplicate of + column ``a`` will be returned with the column name ``alternate_a``:: + + df = df.select("a", col("b"), col("a").alias("alternate_a")) + + """ + exprs_internal = expr_list_to_raw_expr_list(exprs) + return DataFrame(self.df.select(*exprs_internal)) + + def drop(self, *columns: str) -> DataFrame: + """Drop arbitrary amount of columns. + + Column names are case-sensitive and require double quotes to be dropped + if the original name is not strictly lower case. + + Args: + columns: Column names to drop from the dataframe. + + Returns: + DataFrame with those columns removed in the projection. + + Example Usage:: + df.drop('a') # To drop a lower-cased column 'a' + df.drop('"a"') # To drop an upper-cased column 'A' + """ + return DataFrame(self.df.drop(*columns)) + + def filter(self, *predicates: Expr | str) -> DataFrame: + """Return a DataFrame for which ``predicate`` evaluates to ``True``. + + Rows for which ``predicate`` evaluates to ``False`` or ``None`` are filtered + out. If more than one predicate is provided, these predicates will be + combined as a logical AND. Each ``predicate`` can be an + :class:`~datafusion.expr.Expr` created using helper functions such as + :func:`datafusion.col` or :func:`datafusion.lit`, or a SQL expression string + that will be parsed against the DataFrame schema. If more complex logic is + required, see the logical operations in :py:mod:`~datafusion.functions`. + + Example:: + + from datafusion import col, lit + df.filter(col("a") > lit(1)) + df.filter("a > 1") + + Args: + predicates: Predicate expression(s) or SQL strings to filter the DataFrame. + + Returns: + DataFrame after filtering. + """ + df = self.df + for predicate in predicates: + expr = ( + self.parse_sql_expr(predicate) + if isinstance(predicate, str) + else predicate + ) + df = df.filter(ensure_expr(expr)) + return DataFrame(df) + + def parse_sql_expr(self, expr: str) -> Expr: + """Creates logical expression from a SQL query text. + + The expression is created and processed against the current schema. + + Example:: + + from datafusion import col, lit + df.parse_sql_expr("a > 1") + + should produce: + + col("a") > lit(1) + + Args: + expr: Expression string to be converted to datafusion expression + + Returns: + Logical expression . + """ + return Expr(self.df.parse_sql_expr(expr)) + + def with_column(self, name: str, expr: Expr | str) -> DataFrame: + """Add an additional column to the DataFrame. + + The ``expr`` must be an :class:`~datafusion.expr.Expr` constructed with + :func:`datafusion.col` or :func:`datafusion.lit`, or a SQL expression + string that will be parsed against the DataFrame schema. + + Example:: + + from datafusion import col, lit + df.with_column("b", col("a") + lit(1)) + + Args: + name: Name of the column to add. + expr: Expression to compute the column. + + Returns: + DataFrame with the new column. + """ + expr = self.parse_sql_expr(expr) if isinstance(expr, str) else expr + + return DataFrame(self.df.with_column(name, ensure_expr(expr))) + + def with_columns( + self, *exprs: Expr | str | Iterable[Expr | str], **named_exprs: Expr | str + ) -> DataFrame: + """Add columns to the DataFrame. + + By passing expressions, iterables of expressions, string SQL expressions, + or named expressions. + All expressions must be :class:`~datafusion.expr.Expr` objects created via + :func:`datafusion.col` or :func:`datafusion.lit`, or SQL expression strings. + To pass named expressions use the form ``name=Expr``. + + Example usage: The following will add 4 columns labeled ``a``, ``b``, ``c``, + and ``d``:: + + from datafusion import col, lit + df = df.with_columns( + col("x").alias("a"), + [lit(1).alias("b"), col("y").alias("c")], + d=lit(3) + ) + + Equivalent example using just SQL strings: + + df = df.with_columns( + "x as a", + ["1 as b", "y as c"], + d="3" + ) + + Args: + exprs: Either a single expression, an iterable of expressions to add or + SQL expression strings. + named_exprs: Named expressions in the form of ``name=expr`` + + Returns: + DataFrame with the new columns added. + """ + expressions = [] + for expr in exprs: + if isinstance(expr, str): + expressions.append(self.parse_sql_expr(expr).expr) + elif isinstance(expr, Iterable) and not isinstance( + expr, Expr | str | bytes | bytearray + ): + expressions.extend( + [ + self.parse_sql_expr(e).expr + if isinstance(e, str) + else ensure_expr(e) + for e in expr + ] + ) + else: + expressions.append(ensure_expr(expr)) + + for alias, expr in named_exprs.items(): + e = self.parse_sql_expr(expr) if isinstance(expr, str) else expr + ensure_expr(e) + expressions.append(e.alias(alias).expr) + + return DataFrame(self.df.with_columns(expressions)) + + def with_column_renamed(self, old_name: str, new_name: str) -> DataFrame: + r"""Rename one column by applying a new projection. + + This is a no-op if the column to be renamed does not exist. + + The method supports case sensitive rename with wrapping column name + into one the following symbols (" or ' or \`). + + Args: + old_name: Old column name. + new_name: New column name. + + Returns: + DataFrame with the column renamed. + """ + return DataFrame(self.df.with_column_renamed(old_name, new_name)) + + def aggregate( + self, + group_by: Sequence[Expr | str] | Expr | str, + aggs: Sequence[Expr] | Expr, + ) -> DataFrame: + """Aggregates the rows of the current DataFrame. + + Args: + group_by: Sequence of expressions or column names to group by. + aggs: Sequence of expressions to aggregate. + + Returns: + DataFrame after aggregation. + """ + group_by_list = ( + list(group_by) + if isinstance(group_by, Sequence) and not isinstance(group_by, Expr | str) + else [group_by] + ) + aggs_list = ( + list(aggs) + if isinstance(aggs, Sequence) and not isinstance(aggs, Expr) + else [aggs] + ) + + group_by_exprs = expr_list_to_raw_expr_list(group_by_list) + aggs_exprs = ensure_expr_list(aggs_list) + return DataFrame(self.df.aggregate(group_by_exprs, aggs_exprs)) + + def sort(self, *exprs: SortKey) -> DataFrame: + """Sort the DataFrame by the specified sorting expressions or column names. + + Note that any expression can be turned into a sort expression by + calling its ``sort`` method. + + Args: + exprs: Sort expressions or column names, applied in order. + + Returns: + DataFrame after sorting. + """ + exprs_raw = sort_list_to_raw_sort_list(exprs) + return DataFrame(self.df.sort(*exprs_raw)) + + def cast(self, mapping: dict[str, pa.DataType[Any]]) -> DataFrame: + """Cast one or more columns to a different data type. + + Args: + mapping: Mapped with column as key and column dtype as value. + + Returns: + DataFrame after casting columns + """ + exprs = [Expr.column(col).cast(dtype) for col, dtype in mapping.items()] + return self.with_columns(exprs) + + def limit(self, count: int, offset: int = 0) -> DataFrame: + """Return a new :py:class:`DataFrame` with a limited number of rows. + + Args: + count: Number of rows to limit the DataFrame to. + offset: Number of rows to skip. + + Returns: + DataFrame after limiting. + """ + return DataFrame(self.df.limit(count, offset)) + + def head(self, n: int = 5) -> DataFrame: + """Return a new :py:class:`DataFrame` with a limited number of rows. + + Args: + n: Number of rows to take from the head of the DataFrame. + + Returns: + DataFrame after limiting. + """ + return DataFrame(self.df.limit(n, 0)) + + def tail(self, n: int = 5) -> DataFrame: + """Return a new :py:class:`DataFrame` with a limited number of rows. + + Be aware this could be potentially expensive since the row size needs to be + determined of the dataframe. This is done by collecting it. + + Args: + n: Number of rows to take from the tail of the DataFrame. + + Returns: + DataFrame after limiting. + """ + return DataFrame(self.df.limit(n, max(0, self.count() - n))) + + def collect(self) -> list[pa.RecordBatch]: + """Execute this :py:class:`DataFrame` and collect results into memory. + + Prior to calling ``collect``, modifying a DataFrame simply updates a plan + (no actual computation is performed). Calling ``collect`` triggers the + computation. + + Returns: + List of :py:class:`pyarrow.RecordBatch` collected from the DataFrame. + """ + return self.df.collect() + + def collect_column(self, column_name: str) -> pa.Array | pa.ChunkedArray: + """Executes this :py:class:`DataFrame` for a single column.""" + return self.df.collect_column(column_name) + + def cache(self) -> DataFrame: + """Cache the DataFrame as a memory table. + + Returns: + Cached DataFrame. + """ + return DataFrame(self.df.cache()) + + def collect_partitioned(self) -> list[list[pa.RecordBatch]]: + """Execute this DataFrame and collect all partitioned results. + + This operation returns :py:class:`pyarrow.RecordBatch` maintaining the input + partitioning. + + Returns: + List of list of :py:class:`RecordBatch` collected from the + DataFrame. + """ + return self.df.collect_partitioned() + + def show(self, num: int = 20) -> None: + """Execute the DataFrame and print the result to the console. + + Args: + num: Number of lines to show. + """ + self.df.show(num) + + def distinct(self) -> DataFrame: + """Return a new :py:class:`DataFrame` with all duplicated rows removed. + + Returns: + DataFrame after removing duplicates. + """ + return DataFrame(self.df.distinct()) + + @overload + def join( + self, + right: DataFrame, + on: str | Sequence[str], + how: Literal["inner", "left", "right", "full", "semi", "anti"] = "inner", + *, + left_on: None = None, + right_on: None = None, + join_keys: None = None, + coalesce_duplicate_keys: bool = True, + ) -> DataFrame: ... + + @overload + def join( + self, + right: DataFrame, + on: None = None, + how: Literal["inner", "left", "right", "full", "semi", "anti"] = "inner", + *, + left_on: str | Sequence[str], + right_on: str | Sequence[str], + join_keys: tuple[list[str], list[str]] | None = None, + coalesce_duplicate_keys: bool = True, + ) -> DataFrame: ... + + @overload + def join( + self, + right: DataFrame, + on: None = None, + how: Literal["inner", "left", "right", "full", "semi", "anti"] = "inner", + *, + join_keys: tuple[list[str], list[str]], + left_on: None = None, + right_on: None = None, + coalesce_duplicate_keys: bool = True, + ) -> DataFrame: ... + + def join( + self, + right: DataFrame, + on: str | Sequence[str] | tuple[list[str], list[str]] | None = None, + how: Literal["inner", "left", "right", "full", "semi", "anti"] = "inner", + *, + left_on: str | Sequence[str] | None = None, + right_on: str | Sequence[str] | None = None, + join_keys: tuple[list[str], list[str]] | None = None, + coalesce_duplicate_keys: bool = True, + ) -> DataFrame: + """Join this :py:class:`DataFrame` with another :py:class:`DataFrame`. + + `on` has to be provided or both `left_on` and `right_on` in conjunction. + + Args: + right: Other DataFrame to join with. + on: Column names to join on in both dataframes. + how: Type of join to perform. Supported types are "inner", "left", + "right", "full", "semi", "anti". + left_on: Join column of the left dataframe. + right_on: Join column of the right dataframe. + coalesce_duplicate_keys: When True, coalesce the columns + from the right DataFrame and left DataFrame + that have identical names in the ``on`` fields. + join_keys: Tuple of two lists of column names to join on. [Deprecated] + + Returns: + DataFrame after join. + """ + if join_keys is not None: + warnings.warn( + "`join_keys` is deprecated, use `on` or `left_on` with `right_on`", + category=DeprecationWarning, + stacklevel=2, + ) + left_on = join_keys[0] + right_on = join_keys[1] + + # This check is to prevent breaking API changes where users prior to + # DF 43.0.0 would pass the join_keys as a positional argument instead + # of a keyword argument. + if ( + isinstance(on, tuple) + and len(on) == 2 # noqa: PLR2004 + and isinstance(on[0], list) + and isinstance(on[1], list) + ): + # We know this is safe because we've checked the types + left_on = on[0] + right_on = on[1] + on = None + + if on is not None: + if left_on is not None or right_on is not None: + error_msg = "`left_on` or `right_on` should not provided with `on`" + raise ValueError(error_msg) + left_on = on + right_on = on + elif left_on is not None or right_on is not None: + if left_on is None or right_on is None: + error_msg = "`left_on` and `right_on` should both be provided." + raise ValueError(error_msg) + else: + error_msg = "either `on` or `left_on` and `right_on` should be provided." + raise ValueError(error_msg) + if isinstance(left_on, str): + left_on = [left_on] + if isinstance(right_on, str): + right_on = [right_on] + + return DataFrame( + self.df.join(right.df, how, left_on, right_on, coalesce_duplicate_keys) + ) + + def join_on( + self, + right: DataFrame, + *on_exprs: Expr, + how: Literal["inner", "left", "right", "full", "semi", "anti"] = "inner", + ) -> DataFrame: + """Join two :py:class:`DataFrame` using the specified expressions. + + Join predicates must be :class:`~datafusion.expr.Expr` objects, typically + built with :func:`datafusion.col`. On expressions are used to support + in-equality predicates. Equality predicates are correctly optimized. + + Example:: + + from datafusion import col + df.join_on(other_df, col("id") == col("other_id")) + + Args: + right: Other DataFrame to join with. + on_exprs: single or multiple (in)-equality predicates. + how: Type of join to perform. Supported types are "inner", "left", + "right", "full", "semi", "anti". + + Returns: + DataFrame after join. + """ + exprs = [ensure_expr(expr) for expr in on_exprs] + return DataFrame(self.df.join_on(right.df, exprs, how)) + + def explain(self, verbose: bool = False, analyze: bool = False) -> None: + """Print an explanation of the DataFrame's plan so far. + + If ``analyze`` is specified, runs the plan and reports metrics. + + Args: + verbose: If ``True``, more details will be included. + analyze: If ``True``, the plan will run and metrics reported. + """ + self.df.explain(verbose, analyze) + + def logical_plan(self) -> LogicalPlan: + """Return the unoptimized ``LogicalPlan``. + + Returns: + Unoptimized logical plan. + """ + return LogicalPlan(self.df.logical_plan()) + + def optimized_logical_plan(self) -> LogicalPlan: + """Return the optimized ``LogicalPlan``. + + Returns: + Optimized logical plan. + """ + return LogicalPlan(self.df.optimized_logical_plan()) + + def execution_plan(self) -> ExecutionPlan: + """Return the execution/physical plan. + + Returns: + Execution plan. + """ + return ExecutionPlan(self.df.execution_plan()) + + def repartition(self, num: int) -> DataFrame: + """Repartition a DataFrame into ``num`` partitions. + + The batches allocation uses a round-robin algorithm. + + Args: + num: Number of partitions to repartition the DataFrame into. + + Returns: + Repartitioned DataFrame. + """ + return DataFrame(self.df.repartition(num)) + + def repartition_by_hash(self, *exprs: Expr | str, num: int) -> DataFrame: + """Repartition a DataFrame using a hash partitioning scheme. + + Args: + exprs: Expressions or a SQL expression string to evaluate + and perform hashing on. + num: Number of partitions to repartition the DataFrame into. + + Returns: + Repartitioned DataFrame. + """ + exprs = [self.parse_sql_expr(e) if isinstance(e, str) else e for e in exprs] + exprs = expr_list_to_raw_expr_list(exprs) + + return DataFrame(self.df.repartition_by_hash(*exprs, num=num)) + + def union(self, other: DataFrame, distinct: bool = False) -> DataFrame: + """Calculate the union of two :py:class:`DataFrame`. + + The two :py:class:`DataFrame` must have exactly the same schema. + + Args: + other: DataFrame to union with. + distinct: If ``True``, duplicate rows will be removed. + + Returns: + DataFrame after union. + """ + return DataFrame(self.df.union(other.df, distinct)) + + def union_distinct(self, other: DataFrame) -> DataFrame: + """Calculate the distinct union of two :py:class:`DataFrame`. + + The two :py:class:`DataFrame` must have exactly the same schema. + Any duplicate rows are discarded. + + Args: + other: DataFrame to union with. + + Returns: + DataFrame after union. + """ + return DataFrame(self.df.union_distinct(other.df)) + + def intersect(self, other: DataFrame) -> DataFrame: + """Calculate the intersection of two :py:class:`DataFrame`. + + The two :py:class:`DataFrame` must have exactly the same schema. + + Args: + other: DataFrame to intersect with. + + Returns: + DataFrame after intersection. + """ + return DataFrame(self.df.intersect(other.df)) + + def except_all(self, other: DataFrame) -> DataFrame: + """Calculate the exception of two :py:class:`DataFrame`. + + The two :py:class:`DataFrame` must have exactly the same schema. + + Args: + other: DataFrame to calculate exception with. + + Returns: + DataFrame after exception. + """ + return DataFrame(self.df.except_all(other.df)) + + def write_csv( + self, + path: str | pathlib.Path, + with_header: bool = False, + write_options: DataFrameWriteOptions | None = None, + ) -> None: + """Execute the :py:class:`DataFrame` and write the results to a CSV file. + + Args: + path: Path of the CSV file to write. + with_header: If true, output the CSV header row. + write_options: Options that impact how the DataFrame is written. + """ + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_csv(str(path), with_header, raw_write_options) + + @overload + def write_parquet( + self, + path: str | pathlib.Path, + compression: str, + compression_level: int | None = None, + write_options: DataFrameWriteOptions | None = None, + ) -> None: ... + + @overload + def write_parquet( + self, + path: str | pathlib.Path, + compression: Compression = Compression.ZSTD, + compression_level: int | None = None, + write_options: DataFrameWriteOptions | None = None, + ) -> None: ... + + @overload + def write_parquet( + self, + path: str | pathlib.Path, + compression: ParquetWriterOptions, + compression_level: None = None, + write_options: DataFrameWriteOptions | None = None, + ) -> None: ... + + def write_parquet( + self, + path: str | pathlib.Path, + compression: str | Compression | ParquetWriterOptions = Compression.ZSTD, + compression_level: int | None = None, + write_options: DataFrameWriteOptions | None = None, + ) -> None: + """Execute the :py:class:`DataFrame` and write the results to a Parquet file. + + Available compression types are: + + - "uncompressed": No compression. + - "snappy": Snappy compression. + - "gzip": Gzip compression. + - "brotli": Brotli compression. + - "lz4": LZ4 compression. + - "lz4_raw": LZ4_RAW compression. + - "zstd": Zstandard compression. + + LZO compression is not yet implemented in arrow-rs and is therefore + excluded. + + Args: + path: Path of the Parquet file to write. + compression: Compression type to use. Default is "ZSTD". + compression_level: Compression level to use. For ZSTD, the + recommended range is 1 to 22, with the default being 4. Higher levels + provide better compression but slower speed. + write_options: Options that impact how the DataFrame is written. + """ + if isinstance(compression, ParquetWriterOptions): + if compression_level is not None: + msg = "compression_level should be None when using ParquetWriterOptions" + raise ValueError(msg) + self.write_parquet_with_options(path, compression) + return + + if isinstance(compression, str): + compression = Compression.from_str(compression) + + if ( + compression in {Compression.GZIP, Compression.BROTLI, Compression.ZSTD} + and compression_level is None + ): + compression_level = compression.get_default_level() + + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_parquet( + str(path), + compression.value, + compression_level, + raw_write_options, + ) + + def write_parquet_with_options( + self, + path: str | pathlib.Path, + options: ParquetWriterOptions, + write_options: DataFrameWriteOptions | None = None, + ) -> None: + """Execute the :py:class:`DataFrame` and write the results to a Parquet file. + + Allows advanced writer options to be set with `ParquetWriterOptions`. + + Args: + path: Path of the Parquet file to write. + options: Sets the writer parquet options (see `ParquetWriterOptions`). + write_options: Options that impact how the DataFrame is written. + """ + options_internal = ParquetWriterOptionsInternal( + options.data_pagesize_limit, + options.write_batch_size, + options.writer_version, + options.skip_arrow_metadata, + options.compression, + options.dictionary_enabled, + options.dictionary_page_size_limit, + options.statistics_enabled, + options.max_row_group_size, + options.created_by, + options.column_index_truncate_length, + options.statistics_truncate_length, + options.data_page_row_count_limit, + options.encoding, + options.bloom_filter_on_write, + options.bloom_filter_fpp, + options.bloom_filter_ndv, + options.allow_single_file_parallelism, + options.maximum_parallel_row_group_writers, + options.maximum_buffered_record_batches_per_stream, + ) + + column_specific_options_internal = {} + for column, opts in (options.column_specific_options or {}).items(): + column_specific_options_internal[column] = ParquetColumnOptionsInternal( + bloom_filter_enabled=opts.bloom_filter_enabled, + encoding=opts.encoding, + dictionary_enabled=opts.dictionary_enabled, + compression=opts.compression, + statistics_enabled=opts.statistics_enabled, + bloom_filter_fpp=opts.bloom_filter_fpp, + bloom_filter_ndv=opts.bloom_filter_ndv, + ) + + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_parquet_with_options( + str(path), + options_internal, + column_specific_options_internal, + raw_write_options, + ) + + def write_json( + self, + path: str | pathlib.Path, + write_options: DataFrameWriteOptions | None = None, + ) -> None: + """Execute the :py:class:`DataFrame` and write the results to a JSON file. + + Args: + path: Path of the JSON file to write. + write_options: Options that impact how the DataFrame is written. + """ + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_json(str(path), write_options=raw_write_options) + + def write_table( + self, table_name: str, write_options: DataFrameWriteOptions | None = None + ) -> None: + """Execute the :py:class:`DataFrame` and write the results to a table. + + The table must be registered with the session to perform this operation. + Not all table providers support writing operations. See the individual + implementations for details. + """ + raw_write_options = ( + write_options._raw_write_options if write_options is not None else None + ) + self.df.write_table(table_name, raw_write_options) + + def to_arrow_table(self) -> pa.Table: + """Execute the :py:class:`DataFrame` and convert it into an Arrow Table. + + Returns: + Arrow Table. + """ + return self.df.to_arrow_table() + + def execute_stream(self) -> RecordBatchStream: + """Executes this DataFrame and returns a stream over a single partition. + + Returns: + Record Batch Stream over a single partition. + """ + return RecordBatchStream(self.df.execute_stream()) + + def execute_stream_partitioned(self) -> list[RecordBatchStream]: + """Executes this DataFrame and returns a stream for each partition. + + Returns: + One record batch stream per partition. + """ + streams = self.df.execute_stream_partitioned() + return [RecordBatchStream(rbs) for rbs in streams] + + def to_pandas(self) -> pd.DataFrame: + """Execute the :py:class:`DataFrame` and convert it into a Pandas DataFrame. + + Returns: + Pandas DataFrame. + """ + return self.df.to_pandas() + + def to_pylist(self) -> list[dict[str, Any]]: + """Execute the :py:class:`DataFrame` and convert it into a list of dictionaries. + + Returns: + List of dictionaries. + """ + return self.df.to_pylist() + + def to_pydict(self) -> dict[str, list[Any]]: + """Execute the :py:class:`DataFrame` and convert it into a dictionary of lists. + + Returns: + Dictionary of lists. + """ + return self.df.to_pydict() + + def to_polars(self) -> pl.DataFrame: + """Execute the :py:class:`DataFrame` and convert it into a Polars DataFrame. + + Returns: + Polars DataFrame. + """ + return self.df.to_polars() + + def count(self) -> int: + """Return the total number of rows in this :py:class:`DataFrame`. + + Note that this method will actually run a plan to calculate the + count, which may be slow for large or complicated DataFrames. + + Returns: + Number of rows in the DataFrame. + """ + return self.df.count() + + @deprecated("Use :py:func:`unnest_columns` instead.") + def unnest_column(self, column: str, preserve_nulls: bool = True) -> DataFrame: + """See :py:func:`unnest_columns`.""" + return DataFrame(self.df.unnest_column(column, preserve_nulls=preserve_nulls)) + + def unnest_columns(self, *columns: str, preserve_nulls: bool = True) -> DataFrame: + """Expand columns of arrays into a single row per array element. + + Args: + columns: Column names to perform unnest operation on. + preserve_nulls: If False, rows with null entries will not be + returned. + + Returns: + A DataFrame with the columns expanded. + """ + columns = list(columns) + return DataFrame(self.df.unnest_columns(columns, preserve_nulls=preserve_nulls)) + + def __arrow_c_stream__(self, requested_schema: object | None = None) -> object: + """Export the DataFrame as an Arrow C Stream. + + The DataFrame is executed using DataFusion's streaming APIs and exposed via + Arrow's C Stream interface. Record batches are produced incrementally, so the + full result set is never materialized in memory. + + When ``requested_schema`` is provided, DataFusion applies only simple + projections such as selecting a subset of existing columns or reordering + them. Column renaming, computed expressions, or type coercion are not + supported through this interface. + + Args: + requested_schema: Either a :py:class:`pyarrow.Schema` or an Arrow C + Schema capsule (``PyCapsule``) produced by + ``schema._export_to_c_capsule()``. The DataFrame will attempt to + align its output with the fields and order specified by this schema. + + Returns: + Arrow ``PyCapsule`` object representing an ``ArrowArrayStream``. + + For practical usage patterns, see the Apache Arrow streaming + documentation: https://arrow.apache.org/docs/python/ipc.html#streaming. + + For details on DataFusion's Arrow integration and DataFrame streaming, + see the user guide (user-guide/io/arrow and user-guide/dataframe/index). + + Notes: + The Arrow C Data Interface PyCapsule details are documented by Apache + Arrow and can be found at: + https://arrow.apache.org/docs/format/CDataInterface/PyCapsuleInterface.html + """ + # ``DataFrame.__arrow_c_stream__`` in the Rust extension leverages + # ``execute_stream_partitioned`` under the hood to stream batches while + # preserving the original partition order. + return self.df.__arrow_c_stream__(requested_schema) + + def __iter__(self) -> Iterator[RecordBatch]: + """Return an iterator over this DataFrame's record batches.""" + return iter(self.execute_stream()) + + def __aiter__(self) -> AsyncIterator[RecordBatch]: + """Return an async iterator over this DataFrame's record batches. + + We're using __aiter__ because we support Python < 3.10 where aiter() is not + available. + """ + return self.execute_stream().__aiter__() + + def transform(self, func: Callable[..., DataFrame], *args: Any) -> DataFrame: + """Apply a function to the current DataFrame which returns another DataFrame. + + This is useful for chaining together multiple functions. For example:: + + def add_3(df: DataFrame) -> DataFrame: + return df.with_column("modified", lit(3)) + + def within_limit(df: DataFrame, limit: int) -> DataFrame: + return df.filter(col("a") < lit(limit)).distinct() + + df = df.transform(modify_df).transform(within_limit, 4) + + Args: + func: A callable function that takes a DataFrame as it's first argument + args: Zero or more arguments to pass to `func` + + Returns: + DataFrame: After applying func to the original dataframe. + """ + return func(self, *args) + + def fill_null(self, value: Any, subset: list[str] | None = None) -> DataFrame: + """Fill null values in specified columns with a value. + + Args: + value: Value to replace nulls with. Will be cast to match column type. + subset: Optional list of column names to fill. If None, fills all columns. + + Returns: + DataFrame with null values replaced where type casting is possible + + Examples: + >>> from datafusion import SessionContext, col + >>> ctx = SessionContext() + >>> df = ctx.from_pydict({"a": [1, None, 3], "b": [None, 5, 6]}) + >>> filled = df.fill_null(0) + >>> filled.sort(col("a")).collect()[0].column("a").to_pylist() + [0, 1, 3] + + Notes: + - Only fills nulls in columns where the value can be cast to the column type + - For columns where casting fails, the original column is kept unchanged + - For columns not in subset, the original column is kept unchanged + """ + return DataFrame(self.df.fill_null(value, subset)) + + +class InsertOp(Enum): + """Insert operation mode. + + These modes are used by the table writing feature to define how record + batches should be written to a table. + """ + + APPEND = InsertOpInternal.APPEND + """Appends new rows to the existing table without modifying any existing rows.""" + + REPLACE = InsertOpInternal.REPLACE + """Replace existing rows that collide with the inserted rows. + + Replacement is typically based on a unique key or primary key. + """ + + OVERWRITE = InsertOpInternal.OVERWRITE + """Overwrites all existing rows in the table with the new rows.""" + + +class DataFrameWriteOptions: + """Writer options for DataFrame. + + There is no guarantee the table provider supports all writer options. + See the individual implementation and documentation for details. + """ + + def __init__( + self, + insert_operation: InsertOp | None = None, + single_file_output: bool = False, + partition_by: str | Sequence[str] | None = None, + sort_by: Expr | SortExpr | Sequence[Expr] | Sequence[SortExpr] | None = None, + ) -> None: + """Instantiate writer options for DataFrame.""" + if isinstance(partition_by, str): + partition_by = [partition_by] + + sort_by_raw = sort_list_to_raw_sort_list(sort_by) + insert_op = insert_operation.value if insert_operation is not None else None + + self._raw_write_options = DataFrameWriteOptionsInternal( + insert_op, single_file_output, partition_by, sort_by_raw + ) diff --git a/python/datafusion/dataframe_formatter.py b/python/datafusion/dataframe_formatter.py new file mode 100644 index 000000000..b8af45a1b --- /dev/null +++ b/python/datafusion/dataframe_formatter.py @@ -0,0 +1,843 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +"""HTML formatting utilities for DataFusion DataFrames.""" + +from __future__ import annotations + +import warnings +from typing import ( + TYPE_CHECKING, + Any, + Protocol, + runtime_checkable, +) + +from datafusion._internal import DataFrame as DataFrameInternal + +if TYPE_CHECKING: + from collections.abc import Callable + + +def _validate_positive_int(value: Any, param_name: str) -> None: + """Validate that a parameter is a positive integer. + + Args: + value: The value to validate + param_name: Name of the parameter (used in error message) + + Raises: + ValueError: If the value is not a positive integer + """ + if not isinstance(value, int) or value <= 0: + msg = f"{param_name} must be a positive integer" + raise ValueError(msg) + + +def _validate_bool(value: Any, param_name: str) -> None: + """Validate that a parameter is a boolean. + + Args: + value: The value to validate + param_name: Name of the parameter (used in error message) + + Raises: + TypeError: If the value is not a boolean + """ + if not isinstance(value, bool): + msg = f"{param_name} must be a boolean" + raise TypeError(msg) + + +def _validate_formatter_parameters( + max_cell_length: int, + max_width: int, + max_height: int, + max_memory_bytes: int, + min_rows: int, + max_rows: int | None, + repr_rows: int | None, + enable_cell_expansion: bool, + show_truncation_message: bool, + use_shared_styles: bool, + custom_css: str | None, + style_provider: Any, +) -> int: + """Validate all formatter parameters and return resolved max_rows value. + + Args: + max_cell_length: Maximum cell length value to validate + max_width: Maximum width value to validate + max_height: Maximum height value to validate + max_memory_bytes: Maximum memory bytes value to validate + min_rows: Minimum rows to display value to validate + max_rows: Maximum rows value to validate (None means use default) + repr_rows: Deprecated repr_rows value to validate + enable_cell_expansion: Boolean expansion flag to validate + show_truncation_message: Boolean message flag to validate + use_shared_styles: Boolean styles flag to validate + custom_css: Custom CSS string to validate + style_provider: Style provider object to validate + + Returns: + The resolved max_rows value after handling repr_rows deprecation + + Raises: + ValueError: If any numeric parameter is invalid or constraints are violated + TypeError: If any parameter has invalid type + DeprecationWarning: If repr_rows parameter is used + """ + # Validate numeric parameters + _validate_positive_int(max_cell_length, "max_cell_length") + _validate_positive_int(max_width, "max_width") + _validate_positive_int(max_height, "max_height") + _validate_positive_int(max_memory_bytes, "max_memory_bytes") + _validate_positive_int(min_rows, "min_rows") + + # Handle deprecated repr_rows parameter + if repr_rows is not None: + warnings.warn( + "repr_rows parameter is deprecated, use max_rows instead", + DeprecationWarning, + stacklevel=4, + ) + _validate_positive_int(repr_rows, "repr_rows") + if max_rows is not None and repr_rows != max_rows: + msg = "Cannot specify both repr_rows and max_rows; use max_rows only" + raise ValueError(msg) + max_rows = repr_rows + + # Use default if max_rows was not provided + if max_rows is None: + max_rows = 10 + + _validate_positive_int(max_rows, "max_rows") + + # Validate constraint: min_rows <= max_rows + if min_rows > max_rows: + msg = "min_rows must be less than or equal to max_rows" + raise ValueError(msg) + + # Validate boolean parameters + _validate_bool(enable_cell_expansion, "enable_cell_expansion") + _validate_bool(show_truncation_message, "show_truncation_message") + _validate_bool(use_shared_styles, "use_shared_styles") + + # Validate custom_css + if custom_css is not None and not isinstance(custom_css, str): + msg = "custom_css must be None or a string" + raise TypeError(msg) + + # Validate style_provider + if style_provider is not None and not isinstance(style_provider, StyleProvider): + msg = "style_provider must implement the StyleProvider protocol" + raise TypeError(msg) + + return max_rows + + +@runtime_checkable +class CellFormatter(Protocol): + """Protocol for cell value formatters.""" + + def __call__(self, value: Any) -> str: + """Format a cell value to string representation.""" + ... + + +@runtime_checkable +class StyleProvider(Protocol): + """Protocol for HTML style providers.""" + + def get_cell_style(self) -> str: + """Get the CSS style for table cells.""" + ... + + def get_header_style(self) -> str: + """Get the CSS style for header cells.""" + ... + + +class DefaultStyleProvider: + """Default implementation of StyleProvider.""" + + def get_cell_style(self) -> str: + """Get the CSS style for table cells. + + Returns: + CSS style string + """ + return ( + "border: 1px solid black; padding: 8px; text-align: left; " + "white-space: nowrap;" + ) + + def get_header_style(self) -> str: + """Get the CSS style for header cells. + + Returns: + CSS style string + """ + return ( + "border: 1px solid black; padding: 8px; text-align: left; " + "background-color: #f2f2f2; white-space: nowrap; min-width: fit-content; " + "max-width: fit-content;" + ) + + +class DataFrameHtmlFormatter: + """Configurable HTML formatter for DataFusion DataFrames. + + This class handles the HTML rendering of DataFrames for display in + Jupyter notebooks and other rich display contexts. + + This class supports extension through composition. Key extension points: + - Provide a custom StyleProvider for styling cells and headers + - Register custom formatters for specific types + - Provide custom cell builders for specialized cell rendering + + Args: + max_cell_length: Maximum characters to display in a cell before truncation + max_width: Maximum width of the HTML table in pixels + max_height: Maximum height of the HTML table in pixels + max_memory_bytes: Maximum memory in bytes for rendered data (default: 2MB) + min_rows: Minimum number of rows to display (must be <= max_rows) + max_rows: Maximum number of rows to display in repr output + repr_rows: Deprecated alias for max_rows + enable_cell_expansion: Whether to add expand/collapse buttons for long cell + values + custom_css: Additional CSS to include in the HTML output + show_truncation_message: Whether to display a message when data is truncated + style_provider: Custom provider for cell and header styles + use_shared_styles: Whether to load styles and scripts only once per notebook + session + """ + + def __init__( + self, + max_cell_length: int = 25, + max_width: int = 1000, + max_height: int = 300, + max_memory_bytes: int = 2 * 1024 * 1024, # 2 MB + min_rows: int = 10, + max_rows: int | None = None, + repr_rows: int | None = None, + enable_cell_expansion: bool = True, + custom_css: str | None = None, + show_truncation_message: bool = True, + style_provider: StyleProvider | None = None, + use_shared_styles: bool = True, + ) -> None: + """Initialize the HTML formatter. + + Parameters + ---------- + max_cell_length + Maximum length of cell content before truncation. + max_width + Maximum width of the displayed table in pixels. + max_height + Maximum height of the displayed table in pixels. + max_memory_bytes + Maximum memory in bytes for rendered data. Helps prevent performance + issues with large datasets. + min_rows + Minimum number of rows to display even if memory limit is reached. + Must not exceed ``max_rows``. + max_rows + Maximum number of rows to display. Takes precedence over memory limits + when fewer rows are requested. + repr_rows + Deprecated alias for ``max_rows``. Use ``max_rows`` instead. + enable_cell_expansion + Whether to allow cells to expand when clicked. + custom_css + Custom CSS to apply to the HTML table. + show_truncation_message + Whether to show a message indicating that content has been truncated. + style_provider + Provider of CSS styles for the HTML table. If None, DefaultStyleProvider + is used. + use_shared_styles + Whether to use shared styles across multiple tables. This improves + performance when displaying many DataFrames in a single notebook. + + Raises: + ------ + ValueError + If max_cell_length, max_width, max_height, max_memory_bytes, + min_rows or max_rows is not a positive integer, or if min_rows + exceeds max_rows. + TypeError + If enable_cell_expansion, show_truncation_message, or use_shared_styles is + not a boolean, or if custom_css is provided but is not a string, or if + style_provider is provided but does not implement the StyleProvider + protocol. + """ + # Validate all parameters and get resolved max_rows + resolved_max_rows = _validate_formatter_parameters( + max_cell_length, + max_width, + max_height, + max_memory_bytes, + min_rows, + max_rows, + repr_rows, + enable_cell_expansion, + show_truncation_message, + use_shared_styles, + custom_css, + style_provider, + ) + + self.max_cell_length = max_cell_length + self.max_width = max_width + self.max_height = max_height + self.max_memory_bytes = max_memory_bytes + self.min_rows = min_rows + self._max_rows = resolved_max_rows + self.enable_cell_expansion = enable_cell_expansion + self.custom_css = custom_css + self.show_truncation_message = show_truncation_message + self.style_provider = style_provider or DefaultStyleProvider() + self.use_shared_styles = use_shared_styles + # Registry for custom type formatters + self._type_formatters: dict[type, CellFormatter] = {} + # Custom cell builders + self._custom_cell_builder: Callable[[Any, int, int, str], str] | None = None + self._custom_header_builder: Callable[[Any], str] | None = None + + @property + def max_rows(self) -> int: + """Get the maximum number of rows to display. + + Returns: + The maximum number of rows to display in repr output + """ + return self._max_rows + + @max_rows.setter + def max_rows(self, value: int) -> None: + """Set the maximum number of rows to display. + + Args: + value: The maximum number of rows + """ + self._max_rows = value + + @property + def repr_rows(self) -> int: + """Get the maximum number of rows (deprecated name). + + .. deprecated:: + Use :attr:`max_rows` instead. This property is provided for + backward compatibility. + + Returns: + The maximum number of rows to display + """ + return self._max_rows + + @repr_rows.setter + def repr_rows(self, value: int) -> None: + """Set the maximum number of rows using deprecated name. + + .. deprecated:: + Use :attr:`max_rows` setter instead. This property is provided for + backward compatibility. + + Args: + value: The maximum number of rows + """ + warnings.warn( + "repr_rows is deprecated, use max_rows instead", + DeprecationWarning, + stacklevel=2, + ) + self._max_rows = value + + def register_formatter(self, type_class: type, formatter: CellFormatter) -> None: + """Register a custom formatter for a specific data type. + + Args: + type_class: The type to register a formatter for + formatter: Function that takes a value of the given type and returns + a formatted string + """ + self._type_formatters[type_class] = formatter + + def set_custom_cell_builder( + self, builder: Callable[[Any, int, int, str], str] + ) -> None: + """Set a custom cell builder function. + + Args: + builder: Function that takes (value, row, col, table_id) and returns HTML + """ + self._custom_cell_builder = builder + + def set_custom_header_builder(self, builder: Callable[[Any], str]) -> None: + """Set a custom header builder function. + + Args: + builder: Function that takes a field and returns HTML + """ + self._custom_header_builder = builder + + def format_html( + self, + batches: list, + schema: Any, + has_more: bool = False, + table_uuid: str | None = None, + ) -> str: + """Format record batches as HTML. + + This method is used by DataFrame's _repr_html_ implementation and can be + called directly when custom HTML rendering is needed. + + Args: + batches: List of Arrow RecordBatch objects + schema: Arrow Schema object + has_more: Whether there are more batches not shown + table_uuid: Unique ID for the table, used for JavaScript interactions + + Returns: + HTML string representation of the data + + Raises: + TypeError: If schema is invalid and no batches are provided + """ + if not batches: + return "No data to display" + + # Validate schema + if schema is None or not hasattr(schema, "__iter__"): + msg = "Schema must be provided" + raise TypeError(msg) + + # Generate a unique ID if none provided + table_uuid = table_uuid or f"df-{id(batches)}" + + # Build HTML components + html = [] + + html.extend(self._build_html_header()) + + html.extend(self._build_table_container_start()) + + # Add table header and body + html.extend(self._build_table_header(schema)) + html.extend(self._build_table_body(batches, table_uuid)) + + html.append("") + html.append("") + + # Add footer (JavaScript and messages) + if self.enable_cell_expansion: + html.append(self._get_javascript()) + + # Always add truncation message if needed (independent of styles) + if has_more and self.show_truncation_message: + html.append("
Data truncated due to size.
") + + return "\n".join(html) + + def format_str( + self, + batches: list, + schema: Any, + has_more: bool = False, + table_uuid: str | None = None, + ) -> str: + """Format record batches as a string. + + This method is used by DataFrame's __repr__ implementation and can be + called directly when string rendering is needed. + + Args: + batches: List of Arrow RecordBatch objects + schema: Arrow Schema object + has_more: Whether there are more batches not shown + table_uuid: Unique ID for the table, used for JavaScript interactions + + Returns: + String representation of the data + + Raises: + TypeError: If schema is invalid and no batches are provided + """ + return DataFrameInternal.default_str_repr(batches, schema, has_more, table_uuid) + + def _build_html_header(self) -> list[str]: + """Build the HTML header with CSS styles.""" + default_css = self._get_default_css() if self.enable_cell_expansion else "" + script = f""" + +""" + html = [script] + if self.custom_css: + html.append(f"") + return html + + def _build_table_container_start(self) -> list[str]: + """Build the opening tags for the table container.""" + html = [] + html.append( + f'
' + ) + html.append('') + return html + + def _build_table_header(self, schema: Any) -> list[str]: + """Build the HTML table header with column names.""" + html = [] + html.append("") + html.append("") + for field in schema: + if self._custom_header_builder: + html.append(self._custom_header_builder(field)) + else: + html.append( + f"" + ) + html.append("") + html.append("") + return html + + def _build_table_body(self, batches: list, table_uuid: str) -> list[str]: + """Build the HTML table body with data rows.""" + html = [] + html.append("") + + row_count = 0 + for batch in batches: + for row_idx in range(batch.num_rows): + row_count += 1 + html.append("") + + for col_idx, column in enumerate(batch.columns): + # Get the raw value from the column + raw_value = self._get_cell_value(column, row_idx) + + # Always check for type formatters first to format the value + formatted_value = self._format_cell_value(raw_value) + + # Then apply either custom cell builder or standard cell formatting + if self._custom_cell_builder: + # Pass both the raw value and formatted value to let the + # builder decide + cell_html = self._custom_cell_builder( + raw_value, row_count, col_idx, table_uuid + ) + html.append(cell_html) + else: + # Standard cell formatting with formatted value + if ( + len(str(raw_value)) > self.max_cell_length + and self.enable_cell_expansion + ): + cell_html = self._build_expandable_cell( + formatted_value, row_count, col_idx, table_uuid + ) + else: + cell_html = self._build_regular_cell(formatted_value) + html.append(cell_html) + + html.append("") + + html.append("") + return html + + def _get_cell_value(self, column: Any, row_idx: int) -> Any: + """Extract a cell value from a column. + + Args: + column: Arrow array + row_idx: Row index + + Returns: + The raw cell value + """ + try: + value = column[row_idx] + + if hasattr(value, "as_py"): + return value.as_py() + except (AttributeError, TypeError): + pass + else: + return value + + def _format_cell_value(self, value: Any) -> str: + """Format a cell value for display. + + Uses registered type formatters if available. + + Args: + value: The cell value to format + + Returns: + Formatted cell value as string + """ + # Check for custom type formatters + for type_cls, formatter in self._type_formatters.items(): + if isinstance(value, type_cls): + return formatter(value) + + # If no formatter matched, return string representation + return str(value) + + def _build_expandable_cell( + self, formatted_value: str, row_count: int, col_idx: int, table_uuid: str + ) -> str: + """Build an expandable cell for long content.""" + short_value = str(formatted_value)[: self.max_cell_length] + return ( + f"" + ) + + def _build_regular_cell(self, formatted_value: str) -> str: + """Build a regular table cell.""" + return ( + f"" + ) + + def _build_html_footer(self, has_more: bool) -> list[str]: + """Build the HTML footer with JavaScript and messages.""" + html = [] + + # Add JavaScript for interactivity only if cell expansion is enabled + # and we're not using the shared styles approach + if self.enable_cell_expansion and not self.use_shared_styles: + html.append(self._get_javascript()) + + # Add truncation message if needed + if has_more and self.show_truncation_message: + html.append("
Data truncated due to size.
") + + return html + + def _get_default_css(self) -> str: + """Get default CSS styles for the HTML table.""" + return """ + .expandable-container { + display: inline-block; + max-width: 200px; + } + .expandable { + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + display: block; + } + .full-text { + display: none; + white-space: normal; + } + .expand-btn { + cursor: pointer; + color: blue; + text-decoration: underline; + border: none; + background: none; + font-size: inherit; + display: block; + margin-top: 5px; + } + """ + + def _get_javascript(self) -> str: + """Get JavaScript code for interactive elements.""" + return """ + +""" + + +class FormatterManager: + """Manager class for the global DataFrame HTML formatter instance.""" + + _default_formatter: DataFrameHtmlFormatter = DataFrameHtmlFormatter() + + @classmethod + def set_formatter(cls, formatter: DataFrameHtmlFormatter) -> None: + """Set the global DataFrame HTML formatter. + + Args: + formatter: The formatter instance to use globally + """ + cls._default_formatter = formatter + _refresh_formatter_reference() + + @classmethod + def get_formatter(cls) -> DataFrameHtmlFormatter: + """Get the current global DataFrame HTML formatter. + + Returns: + The global HTML formatter instance + """ + return cls._default_formatter + + +def get_formatter() -> DataFrameHtmlFormatter: + """Get the current global DataFrame HTML formatter. + + This function is used by the DataFrame._repr_html_ implementation to access + the shared formatter instance. It can also be used directly when custom + HTML rendering is needed. + + Returns: + The global HTML formatter instance + + Example: + >>> from datafusion.html_formatter import get_formatter + >>> formatter = get_formatter() + >>> formatter.max_cell_length = 50 # Increase cell length + """ + return FormatterManager.get_formatter() + + +def set_formatter(formatter: DataFrameHtmlFormatter) -> None: + """Set the global DataFrame HTML formatter. + + Args: + formatter: The formatter instance to use globally + + Example: + >>> from datafusion.html_formatter import get_formatter, set_formatter + >>> custom_formatter = DataFrameHtmlFormatter(max_cell_length=100) + >>> set_formatter(custom_formatter) + """ + FormatterManager.set_formatter(formatter) + + +def configure_formatter(**kwargs: Any) -> None: + """Configure the global DataFrame HTML formatter. + + This function creates a new formatter with the provided configuration + and sets it as the global formatter for all DataFrames. + + Args: + **kwargs: Formatter configuration parameters like max_cell_length, + max_width, max_height, enable_cell_expansion, etc. + + Raises: + ValueError: If any invalid parameters are provided + + Example: + >>> from datafusion.html_formatter import configure_formatter + >>> configure_formatter( + ... max_cell_length=50, + ... max_height=500, + ... enable_cell_expansion=True, + ... use_shared_styles=True + ... ) + """ + # Valid parameters accepted by DataFrameHtmlFormatter + valid_params = { + "max_cell_length", + "max_width", + "max_height", + "max_memory_bytes", + "min_rows", + "max_rows", + "repr_rows", + "enable_cell_expansion", + "custom_css", + "show_truncation_message", + "style_provider", + "use_shared_styles", + } + + # Check for invalid parameters + invalid_params = set(kwargs) - valid_params + if invalid_params: + msg = ( + f"Invalid formatter parameters: {', '.join(invalid_params)}. " + f"Valid parameters are: {', '.join(valid_params)}" + ) + raise ValueError(msg) + + # Create and set formatter with validated parameters + set_formatter(DataFrameHtmlFormatter(**kwargs)) + + +def reset_formatter() -> None: + """Reset the global DataFrame HTML formatter to default settings. + + This function creates a new formatter with default configuration + and sets it as the global formatter for all DataFrames. + + Example: + >>> from datafusion.html_formatter import reset_formatter + >>> reset_formatter() # Reset formatter to default settings + """ + formatter = DataFrameHtmlFormatter() + set_formatter(formatter) + + +def _refresh_formatter_reference() -> None: + """Refresh formatter reference in any modules using it. + + This helps ensure that changes to the formatter are reflected in existing + DataFrames that might be caching the formatter reference. + """ + # This is a no-op but signals modules to refresh their reference diff --git a/python/datafusion/expr.py b/python/datafusion/expr.py index e914b85d7..5760b8948 100644 --- a/python/datafusion/expr.py +++ b/python/datafusion/expr.py @@ -15,9 +15,1416 @@ # specific language governing permissions and limitations # under the License. +"""This module supports expressions, one of the core concepts in DataFusion. -from ._internal import expr +See :ref:`Expressions` in the online documentation for more details. +""" +# ruff: noqa: PLC0415 -def __getattr__(name): - return getattr(expr, name) +from __future__ import annotations + +from collections.abc import Iterable, Sequence +from typing import TYPE_CHECKING, Any, ClassVar + +try: + from warnings import deprecated # Python 3.13+ +except ImportError: + from typing_extensions import deprecated # Python 3.12 + +import pyarrow as pa + +from ._internal import expr as expr_internal +from ._internal import functions as functions_internal + +if TYPE_CHECKING: + from collections.abc import Sequence + + from datafusion.common import ( # type: ignore[import] + DataTypeMap, + NullTreatment, + RexType, + ) + from datafusion.plan import LogicalPlan + + +# Standard error message for invalid expression types +# Mention both alias forms of column and literal helpers +EXPR_TYPE_ERROR = "Use col()/column() or lit()/literal() to construct expressions" + +# The following are imported from the internal representation. We may choose to +# give these all proper wrappers, or to simply leave as is. These were added +# in order to support passing the `test_imports` unit test. +# Tim Saucer note: It is not clear to me what the use case is for exposing +# these definitions to the end user. + +Alias = expr_internal.Alias +Analyze = expr_internal.Analyze +Aggregate = expr_internal.Aggregate +AggregateFunction = expr_internal.AggregateFunction +Between = expr_internal.Between +BinaryExpr = expr_internal.BinaryExpr +Case = expr_internal.Case +Cast = expr_internal.Cast +Column = expr_internal.Column +CopyTo = expr_internal.CopyTo +CreateCatalog = expr_internal.CreateCatalog +CreateCatalogSchema = expr_internal.CreateCatalogSchema +CreateExternalTable = expr_internal.CreateExternalTable +CreateFunction = expr_internal.CreateFunction +CreateFunctionBody = expr_internal.CreateFunctionBody +CreateIndex = expr_internal.CreateIndex +CreateMemoryTable = expr_internal.CreateMemoryTable +CreateView = expr_internal.CreateView +Deallocate = expr_internal.Deallocate +DescribeTable = expr_internal.DescribeTable +Distinct = expr_internal.Distinct +DmlStatement = expr_internal.DmlStatement +DropCatalogSchema = expr_internal.DropCatalogSchema +DropFunction = expr_internal.DropFunction +DropTable = expr_internal.DropTable +DropView = expr_internal.DropView +EmptyRelation = expr_internal.EmptyRelation +Execute = expr_internal.Execute +Exists = expr_internal.Exists +Explain = expr_internal.Explain +Extension = expr_internal.Extension +FileType = expr_internal.FileType +Filter = expr_internal.Filter +GroupingSet = expr_internal.GroupingSet +Join = expr_internal.Join +ILike = expr_internal.ILike +InList = expr_internal.InList +InSubquery = expr_internal.InSubquery +IsFalse = expr_internal.IsFalse +IsNotTrue = expr_internal.IsNotTrue +IsNull = expr_internal.IsNull +IsTrue = expr_internal.IsTrue +IsUnknown = expr_internal.IsUnknown +IsNotFalse = expr_internal.IsNotFalse +IsNotNull = expr_internal.IsNotNull +IsNotUnknown = expr_internal.IsNotUnknown +JoinConstraint = expr_internal.JoinConstraint +JoinType = expr_internal.JoinType +Like = expr_internal.Like +Limit = expr_internal.Limit +Literal = expr_internal.Literal +Negative = expr_internal.Negative +Not = expr_internal.Not +OperateFunctionArg = expr_internal.OperateFunctionArg +Partitioning = expr_internal.Partitioning +Placeholder = expr_internal.Placeholder +Prepare = expr_internal.Prepare +Projection = expr_internal.Projection +RecursiveQuery = expr_internal.RecursiveQuery +Repartition = expr_internal.Repartition +ScalarSubquery = expr_internal.ScalarSubquery +ScalarVariable = expr_internal.ScalarVariable +SetVariable = expr_internal.SetVariable +SimilarTo = expr_internal.SimilarTo +Sort = expr_internal.Sort +Subquery = expr_internal.Subquery +SubqueryAlias = expr_internal.SubqueryAlias +TableScan = expr_internal.TableScan +TransactionAccessMode = expr_internal.TransactionAccessMode +TransactionConclusion = expr_internal.TransactionConclusion +TransactionEnd = expr_internal.TransactionEnd +TransactionIsolationLevel = expr_internal.TransactionIsolationLevel +TransactionStart = expr_internal.TransactionStart +TryCast = expr_internal.TryCast +Union = expr_internal.Union +Unnest = expr_internal.Unnest +UnnestExpr = expr_internal.UnnestExpr +Values = expr_internal.Values +WindowExpr = expr_internal.WindowExpr + +__all__ = [ + "EXPR_TYPE_ERROR", + "Aggregate", + "AggregateFunction", + "Alias", + "Analyze", + "Between", + "BinaryExpr", + "Case", + "CaseBuilder", + "Cast", + "Column", + "CopyTo", + "CreateCatalog", + "CreateCatalogSchema", + "CreateExternalTable", + "CreateFunction", + "CreateFunctionBody", + "CreateIndex", + "CreateMemoryTable", + "CreateView", + "Deallocate", + "DescribeTable", + "Distinct", + "DmlStatement", + "DropCatalogSchema", + "DropFunction", + "DropTable", + "DropView", + "EmptyRelation", + "Execute", + "Exists", + "Explain", + "Expr", + "Extension", + "FileType", + "Filter", + "GroupingSet", + "ILike", + "InList", + "InSubquery", + "IsFalse", + "IsNotFalse", + "IsNotNull", + "IsNotTrue", + "IsNotUnknown", + "IsNull", + "IsTrue", + "IsUnknown", + "Join", + "JoinConstraint", + "JoinType", + "Like", + "Limit", + "Literal", + "Literal", + "Negative", + "Not", + "OperateFunctionArg", + "Partitioning", + "Placeholder", + "Prepare", + "Projection", + "RecursiveQuery", + "Repartition", + "ScalarSubquery", + "ScalarVariable", + "SetVariable", + "SimilarTo", + "Sort", + "SortExpr", + "SortKey", + "Subquery", + "SubqueryAlias", + "TableScan", + "TransactionAccessMode", + "TransactionConclusion", + "TransactionEnd", + "TransactionIsolationLevel", + "TransactionStart", + "TryCast", + "Union", + "Unnest", + "UnnestExpr", + "Values", + "Window", + "WindowExpr", + "WindowFrame", + "WindowFrameBound", + "ensure_expr", + "ensure_expr_list", +] + + +def ensure_expr(value: Expr | Any) -> expr_internal.Expr: + """Return the internal expression from ``Expr`` or raise ``TypeError``. + + This helper rejects plain strings and other non-:class:`Expr` values so + higher level APIs consistently require explicit :func:`~datafusion.col` or + :func:`~datafusion.lit` expressions. + + Args: + value: Candidate expression or other object. + + Returns: + The internal expression representation. + + Raises: + TypeError: If ``value`` is not an instance of :class:`Expr`. + """ + if not isinstance(value, Expr): + raise TypeError(EXPR_TYPE_ERROR) + return value.expr + + +def ensure_expr_list( + exprs: Iterable[Expr | Iterable[Expr]], +) -> list[expr_internal.Expr]: + """Flatten an iterable of expressions, validating each via ``ensure_expr``. + + Args: + exprs: Possibly nested iterable containing expressions. + + Returns: + A flat list of raw expressions. + + Raises: + TypeError: If any item is not an instance of :class:`Expr`. + """ + + def _iter( + items: Iterable[Expr | Iterable[Expr]], + ) -> Iterable[expr_internal.Expr]: + for expr in items: + if isinstance(expr, Iterable) and not isinstance( + expr, Expr | str | bytes | bytearray + ): + # Treat string-like objects as atomic to surface standard errors + yield from _iter(expr) + else: + yield ensure_expr(expr) + + return list(_iter(exprs)) + + +def _to_raw_expr(value: Expr | str) -> expr_internal.Expr: + """Convert a Python expression or column name to its raw variant. + + Args: + value: Candidate expression or column name. + + Returns: + The internal :class:`~datafusion._internal.expr.Expr` representation. + + Raises: + TypeError: If ``value`` is neither an :class:`Expr` nor ``str``. + """ + if isinstance(value, str): + return Expr.column(value).expr + if isinstance(value, Expr): + return value.expr + error = ( + "Expected Expr or column name, found:" + f" {type(value).__name__}. {EXPR_TYPE_ERROR}." + ) + raise TypeError(error) + + +def expr_list_to_raw_expr_list( + expr_list: list[Expr] | Expr | None, +) -> list[expr_internal.Expr] | None: + """Convert a sequence of expressions or column names to raw expressions.""" + if isinstance(expr_list, Expr | str): + expr_list = [expr_list] + if expr_list is None: + return None + return [_to_raw_expr(e) for e in expr_list] + + +def sort_or_default(e: Expr | SortExpr) -> expr_internal.SortExpr: + """Helper function to return a default Sort if an Expr is provided.""" + if isinstance(e, SortExpr): + return e.raw_sort + return SortExpr(e, ascending=True, nulls_first=True).raw_sort + + +def sort_list_to_raw_sort_list( + sort_list: Sequence[SortKey] | SortKey | None, +) -> list[expr_internal.SortExpr] | None: + """Helper function to return an optional sort list to raw variant.""" + if isinstance(sort_list, Expr | SortExpr | str): + sort_list = [sort_list] + if sort_list is None: + return None + raw_sort_list = [] + for item in sort_list: + if isinstance(item, SortExpr): + raw_sort_list.append(sort_or_default(item)) + else: + raw_expr = _to_raw_expr(item) # may raise ``TypeError`` + raw_sort_list.append(sort_or_default(Expr(raw_expr))) + return raw_sort_list + + +class Expr: + """Expression object. + + Expressions are one of the core concepts in DataFusion. See + :ref:`Expressions` in the online documentation for more information. + """ + + def __init__(self, expr: expr_internal.RawExpr) -> None: + """This constructor should not be called by the end user.""" + self.expr = expr + + def to_variant(self) -> Any: + """Convert this expression into a python object if possible.""" + return self.expr.to_variant() + + @deprecated( + "display_name() is deprecated. Use :py:meth:`~Expr.schema_name` instead" + ) + def display_name(self) -> str: + """Returns the name of this expression as it should appear in a schema. + + This name will not include any CAST expressions. + """ + return self.schema_name() + + def schema_name(self) -> str: + """Returns the name of this expression as it should appear in a schema. + + This name will not include any CAST expressions. + """ + return self.expr.schema_name() + + def canonical_name(self) -> str: + """Returns a complete string representation of this expression.""" + return self.expr.canonical_name() + + def variant_name(self) -> str: + """Returns the name of the Expr variant. + + Ex: ``IsNotNull``, ``Literal``, ``BinaryExpr``, etc + """ + return self.expr.variant_name() + + def __richcmp__(self, other: Expr, op: int) -> Expr: + """Comparison operator.""" + return Expr(self.expr.__richcmp__(other.expr, op)) + + def __repr__(self) -> str: + """Generate a string representation of this expression.""" + return self.expr.__repr__() + + def __add__(self, rhs: Any) -> Expr: + """Addition operator. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__add__(rhs.expr)) + + def __sub__(self, rhs: Any) -> Expr: + """Subtraction operator. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__sub__(rhs.expr)) + + def __truediv__(self, rhs: Any) -> Expr: + """Division operator. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__truediv__(rhs.expr)) + + def __mul__(self, rhs: Any) -> Expr: + """Multiplication operator. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__mul__(rhs.expr)) + + def __mod__(self, rhs: Any) -> Expr: + """Modulo operator (%). + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__mod__(rhs.expr)) + + def __and__(self, rhs: Expr) -> Expr: + """Logical AND.""" + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__and__(rhs.expr)) + + def __or__(self, rhs: Expr) -> Expr: + """Logical OR.""" + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__or__(rhs.expr)) + + def __invert__(self) -> Expr: + """Binary not (~).""" + return Expr(self.expr.__invert__()) + + def __getitem__(self, key: str | int) -> Expr: + """Retrieve sub-object. + + If ``key`` is a string, returns the subfield of the struct. + If ``key`` is an integer, retrieves the element in the array. Note that the + element index begins at ``0``, unlike + :py:func:`~datafusion.functions.array_element` which begins at ``1``. + If ``key`` is a slice, returns an array that contains a slice of the + original array. Similar to integer indexing, this follows Python convention + where the index begins at ``0`` unlike + :py:func:`~datafusion.functions.array_slice` which begins at ``1``. + """ + if isinstance(key, int): + return Expr( + functions_internal.array_element(self.expr, Expr.literal(key + 1).expr) + ) + if isinstance(key, slice): + if isinstance(key.start, int): + start = Expr.literal(key.start + 1).expr + elif isinstance(key.start, Expr): + start = (key.start + Expr.literal(1)).expr + else: + # Default start at the first element, index 1 + start = Expr.literal(1).expr + + if isinstance(key.stop, int): + stop = Expr.literal(key.stop).expr + else: + stop = key.stop.expr + + if isinstance(key.step, int): + step = Expr.literal(key.step).expr + elif isinstance(key.step, Expr): + step = key.step.expr + else: + step = key.step + + return Expr(functions_internal.array_slice(self.expr, start, stop, step)) + return Expr(self.expr.__getitem__(key)) + + def __eq__(self, rhs: object) -> Expr: + """Equal to. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__eq__(rhs.expr)) + + def __ne__(self, rhs: object) -> Expr: + """Not equal to. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__ne__(rhs.expr)) + + def __ge__(self, rhs: Any) -> Expr: + """Greater than or equal to. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__ge__(rhs.expr)) + + def __gt__(self, rhs: Any) -> Expr: + """Greater than. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__gt__(rhs.expr)) + + def __le__(self, rhs: Any) -> Expr: + """Less than or equal to. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__le__(rhs.expr)) + + def __lt__(self, rhs: Any) -> Expr: + """Less than. + + Accepts either an expression or any valid PyArrow scalar literal value. + """ + if not isinstance(rhs, Expr): + rhs = Expr.literal(rhs) + return Expr(self.expr.__lt__(rhs.expr)) + + __radd__ = __add__ + __rand__ = __and__ + __rmod__ = __mod__ + __rmul__ = __mul__ + __ror__ = __or__ + __rsub__ = __sub__ + __rtruediv__ = __truediv__ + + @staticmethod + def literal(value: Any) -> Expr: + """Creates a new expression representing a scalar value. + + ``value`` must be a valid PyArrow scalar value or easily castable to one. + """ + if isinstance(value, str): + value = pa.scalar(value, type=pa.string_view()) + return Expr(expr_internal.RawExpr.literal(value)) + + @staticmethod + def literal_with_metadata(value: Any, metadata: dict[str, str]) -> Expr: + """Creates a new expression representing a scalar value with metadata. + + Args: + value: A valid PyArrow scalar value or easily castable to one. + metadata: Metadata to attach to the expression. + """ + if isinstance(value, str): + value = pa.scalar(value, type=pa.string_view()) + + return Expr(expr_internal.RawExpr.literal_with_metadata(value, metadata)) + + @staticmethod + def string_literal(value: str) -> Expr: + """Creates a new expression representing a UTF8 literal value. + + It is different from `literal` because it is pa.string() instead of + pa.string_view() + + This is needed for cases where DataFusion is expecting a UTF8 instead of + UTF8View literal, like in: + https://github.com/apache/datafusion/blob/86740bfd3d9831d6b7c1d0e1bf4a21d91598a0ac/datafusion/functions/src/core/arrow_cast.rs#L179 + """ + if isinstance(value, str): + value = pa.scalar(value, type=pa.string()) + return Expr(expr_internal.RawExpr.literal(value)) + return Expr.literal(value) + + @staticmethod + def column(value: str) -> Expr: + """Creates a new expression representing a column.""" + return Expr(expr_internal.RawExpr.column(value)) + + def alias(self, name: str, metadata: dict[str, str] | None = None) -> Expr: + """Assign a name to the expression. + + Args: + name: The name to assign to the expression. + metadata: Optional metadata to attach to the expression. + + Returns: + A new expression with the assigned name. + """ + return Expr(self.expr.alias(name, metadata)) + + def sort(self, ascending: bool = True, nulls_first: bool = True) -> SortExpr: + """Creates a sort :py:class:`Expr` from an existing :py:class:`Expr`. + + Args: + ascending: If true, sort in ascending order. + nulls_first: Return null values first. + """ + return SortExpr(self, ascending=ascending, nulls_first=nulls_first) + + def is_null(self) -> Expr: + """Returns ``True`` if this expression is null.""" + return Expr(self.expr.is_null()) + + def is_not_null(self) -> Expr: + """Returns ``True`` if this expression is not null.""" + return Expr(self.expr.is_not_null()) + + def fill_nan(self, value: Any | Expr | None = None) -> Expr: + """Fill NaN values with a provided value.""" + if not isinstance(value, Expr): + value = Expr.literal(value) + return Expr(functions_internal.nanvl(self.expr, value.expr)) + + def fill_null(self, value: Any | Expr | None = None) -> Expr: + """Fill NULL values with a provided value.""" + if not isinstance(value, Expr): + value = Expr.literal(value) + return Expr(functions_internal.nvl(self.expr, value.expr)) + + _to_pyarrow_types: ClassVar[dict[type, pa.DataType]] = { + float: pa.float64(), + int: pa.int64(), + str: pa.string(), + bool: pa.bool_(), + } + + def cast(self, to: pa.DataType[Any] | type) -> Expr: + """Cast to a new data type.""" + if not isinstance(to, pa.DataType): + try: + to = self._to_pyarrow_types[to] + except KeyError as err: + error_msg = "Expected instance of pyarrow.DataType or builtins.type" + raise TypeError(error_msg) from err + + return Expr(self.expr.cast(to)) + + def between(self, low: Any, high: Any, negated: bool = False) -> Expr: + """Returns ``True`` if this expression is between a given range. + + Args: + low: lower bound of the range (inclusive). + high: higher bound of the range (inclusive). + negated: negates whether the expression is between a given range + """ + if not isinstance(low, Expr): + low = Expr.literal(low) + + if not isinstance(high, Expr): + high = Expr.literal(high) + + return Expr(self.expr.between(low.expr, high.expr, negated=negated)) + + def rex_type(self) -> RexType: + """Return the Rex Type of this expression. + + A Rex (Row Expression) specifies a single row of data.That specification + could include user defined functions or types. RexType identifies the + row as one of the possible valid ``RexType``. + """ + return self.expr.rex_type() + + def types(self) -> DataTypeMap: + """Return the ``DataTypeMap``. + + Returns: + DataTypeMap which represents the PythonType, Arrow DataType, and + SqlType Enum which this expression represents. + """ + return self.expr.types() + + def python_value(self) -> Any: + """Extracts the Expr value into `Any`. + + This is only valid for literal expressions. + + Returns: + Python object representing literal value of the expression. + """ + return self.expr.python_value() + + def rex_call_operands(self) -> list[Expr]: + """Return the operands of the expression based on it's variant type. + + Row expressions, Rex(s), operate on the concept of operands. Different + variants of Expressions, Expr(s), store those operands in different + datastructures. This function examines the Expr variant and returns + the operands to the calling logic. + """ + return [Expr(e) for e in self.expr.rex_call_operands()] + + def rex_call_operator(self) -> str: + """Extracts the operator associated with a row expression type call.""" + return self.expr.rex_call_operator() + + def column_name(self, plan: LogicalPlan) -> str: + """Compute the output column name based on the provided logical plan.""" + return self.expr.column_name(plan._raw_plan) + + def order_by(self, *exprs: Expr | SortExpr) -> ExprFuncBuilder: + """Set the ordering for a window or aggregate function. + + This function will create an :py:class:`ExprFuncBuilder` that can be used to + set parameters for either window or aggregate functions. If used on any other + type of expression, an error will be generated when ``build()`` is called. + """ + return ExprFuncBuilder(self.expr.order_by([sort_or_default(e) for e in exprs])) + + def filter(self, filter: Expr) -> ExprFuncBuilder: + """Filter an aggregate function. + + This function will create an :py:class:`ExprFuncBuilder` that can be used to + set parameters for either window or aggregate functions. If used on any other + type of expression, an error will be generated when ``build()`` is called. + """ + return ExprFuncBuilder(self.expr.filter(filter.expr)) + + def distinct(self) -> ExprFuncBuilder: + """Only evaluate distinct values for an aggregate function. + + This function will create an :py:class:`ExprFuncBuilder` that can be used to + set parameters for either window or aggregate functions. If used on any other + type of expression, an error will be generated when ``build()`` is called. + """ + return ExprFuncBuilder(self.expr.distinct()) + + def null_treatment(self, null_treatment: NullTreatment) -> ExprFuncBuilder: + """Set the treatment for ``null`` values for a window or aggregate function. + + This function will create an :py:class:`ExprFuncBuilder` that can be used to + set parameters for either window or aggregate functions. If used on any other + type of expression, an error will be generated when ``build()`` is called. + """ + return ExprFuncBuilder(self.expr.null_treatment(null_treatment.value)) + + def partition_by(self, *partition_by: Expr) -> ExprFuncBuilder: + """Set the partitioning for a window function. + + This function will create an :py:class:`ExprFuncBuilder` that can be used to + set parameters for either window or aggregate functions. If used on any other + type of expression, an error will be generated when ``build()`` is called. + """ + return ExprFuncBuilder(self.expr.partition_by([e.expr for e in partition_by])) + + def window_frame(self, window_frame: WindowFrame) -> ExprFuncBuilder: + """Set the frame fora window function. + + This function will create an :py:class:`ExprFuncBuilder` that can be used to + set parameters for either window or aggregate functions. If used on any other + type of expression, an error will be generated when ``build()`` is called. + """ + return ExprFuncBuilder(self.expr.window_frame(window_frame.window_frame)) + + def over(self, window: Window) -> Expr: + """Turn an aggregate function into a window function. + + This function turns any aggregate function into a window function. With the + exception of ``partition_by``, how each of the parameters is used is determined + by the underlying aggregate function. + + Args: + window: Window definition + """ + partition_by_raw = expr_list_to_raw_expr_list(window._partition_by) + order_by_raw = window._order_by + window_frame_raw = ( + window._window_frame.window_frame + if window._window_frame is not None + else None + ) + null_treatment_raw = ( + window._null_treatment.value if window._null_treatment is not None else None + ) + + return Expr( + self.expr.over( + partition_by=partition_by_raw, + order_by=order_by_raw, + window_frame=window_frame_raw, + null_treatment=null_treatment_raw, + ) + ) + + def asin(self) -> Expr: + """Returns the arc sine or inverse sine of a number.""" + from . import functions as F + + return F.asin(self) + + def array_pop_back(self) -> Expr: + """Returns the array without the last element.""" + from . import functions as F + + return F.array_pop_back(self) + + def reverse(self) -> Expr: + """Reverse the string argument.""" + from . import functions as F + + return F.reverse(self) + + def bit_length(self) -> Expr: + """Returns the number of bits in the string argument.""" + from . import functions as F + + return F.bit_length(self) + + def array_length(self) -> Expr: + """Returns the length of the array.""" + from . import functions as F + + return F.array_length(self) + + def array_ndims(self) -> Expr: + """Returns the number of dimensions of the array.""" + from . import functions as F + + return F.array_ndims(self) + + def to_hex(self) -> Expr: + """Converts an integer to a hexadecimal string.""" + from . import functions as F + + return F.to_hex(self) + + def array_dims(self) -> Expr: + """Returns an array of the array's dimensions.""" + from . import functions as F + + return F.array_dims(self) + + def from_unixtime(self) -> Expr: + """Converts an integer to RFC3339 timestamp format string.""" + from . import functions as F + + return F.from_unixtime(self) + + def array_empty(self) -> Expr: + """Returns a boolean indicating whether the array is empty.""" + from . import functions as F + + return F.array_empty(self) + + def sin(self) -> Expr: + """Returns the sine of the argument.""" + from . import functions as F + + return F.sin(self) + + def log10(self) -> Expr: + """Base 10 logarithm of the argument.""" + from . import functions as F + + return F.log10(self) + + def initcap(self) -> Expr: + """Set the initial letter of each word to capital. + + Converts the first letter of each word in ``string`` to uppercase and the + remaining characters to lowercase. + """ + from . import functions as F + + return F.initcap(self) + + def list_distinct(self) -> Expr: + """Returns distinct values from the array after removing duplicates. + + This is an alias for :py:func:`array_distinct`. + """ + from . import functions as F + + return F.list_distinct(self) + + def iszero(self) -> Expr: + """Returns true if a given number is +0.0 or -0.0 otherwise returns false.""" + from . import functions as F + + return F.iszero(self) + + def array_distinct(self) -> Expr: + """Returns distinct values from the array after removing duplicates.""" + from . import functions as F + + return F.array_distinct(self) + + def arrow_typeof(self) -> Expr: + """Returns the Arrow type of the expression.""" + from . import functions as F + + return F.arrow_typeof(self) + + def length(self) -> Expr: + """The number of characters in the ``string``.""" + from . import functions as F + + return F.length(self) + + def lower(self) -> Expr: + """Converts a string to lowercase.""" + from . import functions as F + + return F.lower(self) + + def acos(self) -> Expr: + """Returns the arc cosine or inverse cosine of a number. + + Returns: + -------- + Expr + A new expression representing the arc cosine of the input expression. + """ + from . import functions as F + + return F.acos(self) + + def ascii(self) -> Expr: + """Returns the numeric code of the first character of the argument.""" + from . import functions as F + + return F.ascii(self) + + def sha384(self) -> Expr: + """Computes the SHA-384 hash of a binary string.""" + from . import functions as F + + return F.sha384(self) + + def isnan(self) -> Expr: + """Returns true if a given number is +NaN or -NaN otherwise returns false.""" + from . import functions as F + + return F.isnan(self) + + def degrees(self) -> Expr: + """Converts the argument from radians to degrees.""" + from . import functions as F + + return F.degrees(self) + + def cardinality(self) -> Expr: + """Returns the total number of elements in the array.""" + from . import functions as F + + return F.cardinality(self) + + def sha224(self) -> Expr: + """Computes the SHA-224 hash of a binary string.""" + from . import functions as F + + return F.sha224(self) + + def asinh(self) -> Expr: + """Returns inverse hyperbolic sine.""" + from . import functions as F + + return F.asinh(self) + + def flatten(self) -> Expr: + """Flattens an array of arrays into a single array.""" + from . import functions as F + + return F.flatten(self) + + def exp(self) -> Expr: + """Returns the exponential of the argument.""" + from . import functions as F + + return F.exp(self) + + def abs(self) -> Expr: + """Return the absolute value of a given number. + + Returns: + -------- + Expr + A new expression representing the absolute value of the input expression. + """ + from . import functions as F + + return F.abs(self) + + def btrim(self) -> Expr: + """Removes all characters, spaces by default, from both sides of a string.""" + from . import functions as F + + return F.btrim(self) + + def md5(self) -> Expr: + """Computes an MD5 128-bit checksum for a string expression.""" + from . import functions as F + + return F.md5(self) + + def octet_length(self) -> Expr: + """Returns the number of bytes of a string.""" + from . import functions as F + + return F.octet_length(self) + + def cosh(self) -> Expr: + """Returns the hyperbolic cosine of the argument.""" + from . import functions as F + + return F.cosh(self) + + def radians(self) -> Expr: + """Converts the argument from degrees to radians.""" + from . import functions as F + + return F.radians(self) + + def sqrt(self) -> Expr: + """Returns the square root of the argument.""" + from . import functions as F + + return F.sqrt(self) + + def character_length(self) -> Expr: + """Returns the number of characters in the argument.""" + from . import functions as F + + return F.character_length(self) + + def tanh(self) -> Expr: + """Returns the hyperbolic tangent of the argument.""" + from . import functions as F + + return F.tanh(self) + + def atan(self) -> Expr: + """Returns inverse tangent of a number.""" + from . import functions as F + + return F.atan(self) + + def rtrim(self) -> Expr: + """Removes all characters, spaces by default, from the end of a string.""" + from . import functions as F + + return F.rtrim(self) + + def atanh(self) -> Expr: + """Returns inverse hyperbolic tangent.""" + from . import functions as F + + return F.atanh(self) + + def list_dims(self) -> Expr: + """Returns an array of the array's dimensions. + + This is an alias for :py:func:`array_dims`. + """ + from . import functions as F + + return F.list_dims(self) + + def sha256(self) -> Expr: + """Computes the SHA-256 hash of a binary string.""" + from . import functions as F + + return F.sha256(self) + + def factorial(self) -> Expr: + """Returns the factorial of the argument.""" + from . import functions as F + + return F.factorial(self) + + def acosh(self) -> Expr: + """Returns inverse hyperbolic cosine.""" + from . import functions as F + + return F.acosh(self) + + def floor(self) -> Expr: + """Returns the nearest integer less than or equal to the argument.""" + from . import functions as F + + return F.floor(self) + + def ceil(self) -> Expr: + """Returns the nearest integer greater than or equal to argument.""" + from . import functions as F + + return F.ceil(self) + + def list_length(self) -> Expr: + """Returns the length of the array. + + This is an alias for :py:func:`array_length`. + """ + from . import functions as F + + return F.list_length(self) + + def upper(self) -> Expr: + """Converts a string to uppercase.""" + from . import functions as F + + return F.upper(self) + + def chr(self) -> Expr: + """Converts the Unicode code point to a UTF8 character.""" + from . import functions as F + + return F.chr(self) + + def ln(self) -> Expr: + """Returns the natural logarithm (base e) of the argument.""" + from . import functions as F + + return F.ln(self) + + def tan(self) -> Expr: + """Returns the tangent of the argument.""" + from . import functions as F + + return F.tan(self) + + def array_pop_front(self) -> Expr: + """Returns the array without the first element.""" + from . import functions as F + + return F.array_pop_front(self) + + def cbrt(self) -> Expr: + """Returns the cube root of a number.""" + from . import functions as F + + return F.cbrt(self) + + def sha512(self) -> Expr: + """Computes the SHA-512 hash of a binary string.""" + from . import functions as F + + return F.sha512(self) + + def char_length(self) -> Expr: + """The number of characters in the ``string``.""" + from . import functions as F + + return F.char_length(self) + + def list_ndims(self) -> Expr: + """Returns the number of dimensions of the array. + + This is an alias for :py:func:`array_ndims`. + """ + from . import functions as F + + return F.list_ndims(self) + + def trim(self) -> Expr: + """Removes all characters, spaces by default, from both sides of a string.""" + from . import functions as F + + return F.trim(self) + + def cos(self) -> Expr: + """Returns the cosine of the argument.""" + from . import functions as F + + return F.cos(self) + + def sinh(self) -> Expr: + """Returns the hyperbolic sine of the argument.""" + from . import functions as F + + return F.sinh(self) + + def empty(self) -> Expr: + """This is an alias for :py:func:`array_empty`.""" + from . import functions as F + + return F.empty(self) + + def ltrim(self) -> Expr: + """Removes all characters, spaces by default, from the beginning of a string.""" + from . import functions as F + + return F.ltrim(self) + + def signum(self) -> Expr: + """Returns the sign of the argument (-1, 0, +1).""" + from . import functions as F + + return F.signum(self) + + def log2(self) -> Expr: + """Base 2 logarithm of the argument.""" + from . import functions as F + + return F.log2(self) + + def cot(self) -> Expr: + """Returns the cotangent of the argument.""" + from . import functions as F + + return F.cot(self) + + +class ExprFuncBuilder: + def __init__(self, builder: expr_internal.ExprFuncBuilder) -> None: + self.builder = builder + + def order_by(self, *exprs: Expr) -> ExprFuncBuilder: + """Set the ordering for a window or aggregate function. + + Values given in ``exprs`` must be sort expressions. You can convert any other + expression to a sort expression using `.sort()`. + """ + return ExprFuncBuilder( + self.builder.order_by([sort_or_default(e) for e in exprs]) + ) + + def filter(self, filter: Expr) -> ExprFuncBuilder: + """Filter values during aggregation.""" + return ExprFuncBuilder(self.builder.filter(filter.expr)) + + def distinct(self) -> ExprFuncBuilder: + """Only evaluate distinct values during aggregation.""" + return ExprFuncBuilder(self.builder.distinct()) + + def null_treatment(self, null_treatment: NullTreatment) -> ExprFuncBuilder: + """Set how nulls are treated for either window or aggregate functions.""" + return ExprFuncBuilder(self.builder.null_treatment(null_treatment.value)) + + def partition_by(self, *partition_by: Expr) -> ExprFuncBuilder: + """Set partitioning for window functions.""" + return ExprFuncBuilder( + self.builder.partition_by([e.expr for e in partition_by]) + ) + + def window_frame(self, window_frame: WindowFrame) -> ExprFuncBuilder: + """Set window frame for window functions.""" + return ExprFuncBuilder(self.builder.window_frame(window_frame.window_frame)) + + def build(self) -> Expr: + """Create an expression from a Function Builder.""" + return Expr(self.builder.build()) + + +class Window: + """Define reusable window parameters.""" + + def __init__( + self, + partition_by: list[Expr] | Expr | None = None, + window_frame: WindowFrame | None = None, + order_by: list[SortExpr | Expr | str] | Expr | SortExpr | str | None = None, + null_treatment: NullTreatment | None = None, + ) -> None: + """Construct a window definition. + + Args: + partition_by: Partitions for window operation + window_frame: Define the start and end bounds of the window frame + order_by: Set ordering + null_treatment: Indicate how nulls are to be treated + """ + self._partition_by = partition_by + self._window_frame = window_frame + self._order_by = sort_list_to_raw_sort_list(order_by) + self._null_treatment = null_treatment + + +class WindowFrame: + """Defines a window frame for performing window operations.""" + + def __init__( + self, units: str, start_bound: Any | None, end_bound: Any | None + ) -> None: + """Construct a window frame using the given parameters. + + Args: + units: Should be one of ``rows``, ``range``, or ``groups``. + start_bound: Sets the preceding bound. Must be >= 0. If none, this + will be set to unbounded. If unit type is ``groups``, this + parameter must be set. + end_bound: Sets the following bound. Must be >= 0. If none, this + will be set to unbounded. If unit type is ``groups``, this + parameter must be set. + """ + if not isinstance(start_bound, pa.Scalar) and start_bound is not None: + start_bound = pa.scalar(start_bound) + if units in ("rows", "groups"): + start_bound = start_bound.cast(pa.uint64()) + if not isinstance(end_bound, pa.Scalar) and end_bound is not None: + end_bound = pa.scalar(end_bound) + if units in ("rows", "groups"): + end_bound = end_bound.cast(pa.uint64()) + self.window_frame = expr_internal.WindowFrame(units, start_bound, end_bound) + + def __repr__(self) -> str: + """Print a string representation of the window frame.""" + return self.window_frame.__repr__() + + def get_frame_units(self) -> str: + """Returns the window frame units for the bounds.""" + return self.window_frame.get_frame_units() + + def get_lower_bound(self) -> WindowFrameBound: + """Returns starting bound.""" + return WindowFrameBound(self.window_frame.get_lower_bound()) + + def get_upper_bound(self) -> WindowFrameBound: + """Returns end bound.""" + return WindowFrameBound(self.window_frame.get_upper_bound()) + + +class WindowFrameBound: + """Defines a single window frame bound. + + :py:class:`WindowFrame` typically requires a start and end bound. + """ + + def __init__(self, frame_bound: expr_internal.WindowFrameBound) -> None: + """Constructs a window frame bound.""" + self.frame_bound = frame_bound + + def get_offset(self) -> int | None: + """Returns the offset of the window frame.""" + return self.frame_bound.get_offset() + + def is_current_row(self) -> bool: + """Returns if the frame bound is current row.""" + return self.frame_bound.is_current_row() + + def is_following(self) -> bool: + """Returns if the frame bound is following.""" + return self.frame_bound.is_following() + + def is_preceding(self) -> bool: + """Returns if the frame bound is preceding.""" + return self.frame_bound.is_preceding() + + def is_unbounded(self) -> bool: + """Returns if the frame bound is unbounded.""" + return self.frame_bound.is_unbounded() + + +class CaseBuilder: + """Builder class for constructing case statements. + + An example usage would be as follows:: + + import datafusion.functions as f + from datafusion import lit, col + df.select( + f.case(col("column_a")) + .when(lit(1), lit("One")) + .when(lit(2), lit("Two")) + .otherwise(lit("Unknown")) + ) + """ + + def __init__(self, case_builder: expr_internal.CaseBuilder) -> None: + """Constructs a case builder. + + This is not typically called by the end user directly. See + :py:func:`datafusion.functions.case` instead. + """ + self.case_builder = case_builder + + def when(self, when_expr: Expr, then_expr: Expr) -> CaseBuilder: + """Add a case to match against.""" + return CaseBuilder(self.case_builder.when(when_expr.expr, then_expr.expr)) + + def otherwise(self, else_expr: Expr) -> Expr: + """Set a default value for the case statement.""" + return Expr(self.case_builder.otherwise(else_expr.expr)) + + def end(self) -> Expr: + """Finish building a case statement. + + Any non-matching cases will end in a `null` value. + """ + return Expr(self.case_builder.end()) + + +class SortExpr: + """Used to specify sorting on either a DataFrame or function.""" + + def __init__(self, expr: Expr, ascending: bool, nulls_first: bool) -> None: + """This constructor should not be called by the end user.""" + self.raw_sort = expr_internal.SortExpr(expr.expr, ascending, nulls_first) + + def expr(self) -> Expr: + """Return the raw expr backing the SortExpr.""" + return Expr(self.raw_sort.expr()) + + def ascending(self) -> bool: + """Return ascending property.""" + return self.raw_sort.ascending() + + def nulls_first(self) -> bool: + """Return nulls_first property.""" + return self.raw_sort.nulls_first() + + def __repr__(self) -> str: + """Generate a string representation of this expression.""" + return self.raw_sort.__repr__() + + +SortKey = Expr | SortExpr | str diff --git a/python/datafusion/functions.py b/python/datafusion/functions.py index 782ecba22..fd116254b 100644 --- a/python/datafusion/functions.py +++ b/python/datafusion/functions.py @@ -14,10 +14,3078 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. +"""User functions for operating on :py:class:`~datafusion.expr.Expr`.""" +from __future__ import annotations -from ._internal import functions +from typing import TYPE_CHECKING, Any +import pyarrow as pa -def __getattr__(name): - return getattr(functions, name) +from datafusion._internal import functions as f +from datafusion.common import NullTreatment +from datafusion.expr import ( + CaseBuilder, + Expr, + SortExpr, + SortKey, + WindowFrame, + expr_list_to_raw_expr_list, + sort_list_to_raw_sort_list, + sort_or_default, +) + +try: + from warnings import deprecated # Python 3.13+ +except ImportError: + from typing_extensions import deprecated # Python 3.12 + +if TYPE_CHECKING: + from datafusion.context import SessionContext +__all__ = [ + "abs", + "acos", + "acosh", + "alias", + "approx_distinct", + "approx_median", + "approx_percentile_cont", + "approx_percentile_cont_with_weight", + "array", + "array_agg", + "array_append", + "array_cat", + "array_concat", + "array_dims", + "array_distinct", + "array_element", + "array_empty", + "array_except", + "array_extract", + "array_has", + "array_has_all", + "array_has_any", + "array_indexof", + "array_intersect", + "array_join", + "array_length", + "array_ndims", + "array_pop_back", + "array_pop_front", + "array_position", + "array_positions", + "array_prepend", + "array_push_back", + "array_push_front", + "array_remove", + "array_remove_all", + "array_remove_n", + "array_repeat", + "array_replace", + "array_replace_all", + "array_replace_n", + "array_resize", + "array_slice", + "array_sort", + "array_to_string", + "array_union", + "arrow_cast", + "arrow_typeof", + "ascii", + "asin", + "asinh", + "atan", + "atan2", + "atanh", + "avg", + "bit_and", + "bit_length", + "bit_or", + "bit_xor", + "bool_and", + "bool_or", + "btrim", + "cardinality", + "case", + "cbrt", + "ceil", + "char_length", + "character_length", + "chr", + "coalesce", + "col", + "concat", + "concat_ws", + "corr", + "cos", + "cosh", + "cot", + "count", + "count_star", + "covar", + "covar_pop", + "covar_samp", + "cume_dist", + "current_date", + "current_time", + "date_bin", + "date_part", + "date_trunc", + "datepart", + "datetrunc", + "decode", + "degrees", + "dense_rank", + "digest", + "empty", + "encode", + "ends_with", + "exp", + "extract", + "factorial", + "find_in_set", + "first_value", + "flatten", + "floor", + "from_unixtime", + "gcd", + "in_list", + "initcap", + "isnan", + "iszero", + "lag", + "last_value", + "lcm", + "lead", + "left", + "length", + "levenshtein", + "list_append", + "list_cat", + "list_concat", + "list_dims", + "list_distinct", + "list_element", + "list_except", + "list_extract", + "list_indexof", + "list_intersect", + "list_join", + "list_length", + "list_ndims", + "list_position", + "list_positions", + "list_prepend", + "list_push_back", + "list_push_front", + "list_remove", + "list_remove_all", + "list_remove_n", + "list_repeat", + "list_replace", + "list_replace_all", + "list_replace_n", + "list_resize", + "list_slice", + "list_sort", + "list_to_string", + "list_union", + "ln", + "log", + "log2", + "log10", + "lower", + "lpad", + "ltrim", + "make_array", + "make_date", + "make_list", + "max", + "md5", + "mean", + "median", + "min", + "named_struct", + "nanvl", + "now", + "nth_value", + "ntile", + "nullif", + "nvl", + "octet_length", + "order_by", + "overlay", + "percent_rank", + "pi", + "pow", + "power", + "radians", + "random", + "range", + "rank", + "regexp_count", + "regexp_instr", + "regexp_like", + "regexp_match", + "regexp_replace", + "regr_avgx", + "regr_avgy", + "regr_count", + "regr_intercept", + "regr_r2", + "regr_slope", + "regr_sxx", + "regr_sxy", + "regr_syy", + "repeat", + "replace", + "reverse", + "right", + "round", + "row_number", + "rpad", + "rtrim", + "sha224", + "sha256", + "sha384", + "sha512", + "signum", + "sin", + "sinh", + "split_part", + "sqrt", + "starts_with", + "stddev", + "stddev_pop", + "stddev_samp", + "string_agg", + "strpos", + "struct", + "substr", + "substr_index", + "substring", + "sum", + "tan", + "tanh", + "to_char", + "to_date", + "to_hex", + "to_local_time", + "to_time", + "to_timestamp", + "to_timestamp_micros", + "to_timestamp_millis", + "to_timestamp_nanos", + "to_timestamp_seconds", + "to_unixtime", + "today", + "translate", + "trim", + "trunc", + "upper", + "uuid", + "var", + "var_pop", + "var_samp", + "var_sample", + "when", + # Window Functions + "window", +] + + +def isnan(expr: Expr) -> Expr: + """Returns true if a given number is +NaN or -NaN otherwise returns false.""" + return Expr(f.isnan(expr.expr)) + + +def nullif(expr1: Expr, expr2: Expr) -> Expr: + """Returns NULL if expr1 equals expr2; otherwise it returns expr1. + + This can be used to perform the inverse operation of the COALESCE expression. + """ + return Expr(f.nullif(expr1.expr, expr2.expr)) + + +def encode(expr: Expr, encoding: Expr) -> Expr: + """Encode the ``input``, using the ``encoding``. encoding can be base64 or hex.""" + return Expr(f.encode(expr.expr, encoding.expr)) + + +def decode(expr: Expr, encoding: Expr) -> Expr: + """Decode the ``input``, using the ``encoding``. encoding can be base64 or hex.""" + return Expr(f.decode(expr.expr, encoding.expr)) + + +def array_to_string(expr: Expr, delimiter: Expr) -> Expr: + """Converts each element to its text representation.""" + return Expr(f.array_to_string(expr.expr, delimiter.expr.cast(pa.string()))) + + +def array_join(expr: Expr, delimiter: Expr) -> Expr: + """Converts each element to its text representation. + + This is an alias for :py:func:`array_to_string`. + """ + return array_to_string(expr, delimiter) + + +def list_to_string(expr: Expr, delimiter: Expr) -> Expr: + """Converts each element to its text representation. + + This is an alias for :py:func:`array_to_string`. + """ + return array_to_string(expr, delimiter) + + +def list_join(expr: Expr, delimiter: Expr) -> Expr: + """Converts each element to its text representation. + + This is an alias for :py:func:`array_to_string`. + """ + return array_to_string(expr, delimiter) + + +def in_list(arg: Expr, values: list[Expr], negated: bool = False) -> Expr: + """Returns whether the argument is contained within the list ``values``.""" + values = [v.expr for v in values] + return Expr(f.in_list(arg.expr, values, negated)) + + +def digest(value: Expr, method: Expr) -> Expr: + """Computes the binary hash of an expression using the specified algorithm. + + Standard algorithms are md5, sha224, sha256, sha384, sha512, blake2s, + blake2b, and blake3. + """ + return Expr(f.digest(value.expr, method.expr)) + + +def concat(*args: Expr) -> Expr: + """Concatenates the text representations of all the arguments. + + NULL arguments are ignored. + """ + args = [arg.expr for arg in args] + return Expr(f.concat(args)) + + +def concat_ws(separator: str, *args: Expr) -> Expr: + """Concatenates the list ``args`` with the separator. + + ``NULL`` arguments are ignored. ``separator`` should not be ``NULL``. + """ + args = [arg.expr for arg in args] + return Expr(f.concat_ws(separator, args)) + + +def order_by(expr: Expr, ascending: bool = True, nulls_first: bool = True) -> SortExpr: + """Creates a new sort expression.""" + return SortExpr(expr, ascending=ascending, nulls_first=nulls_first) + + +def alias(expr: Expr, name: str, metadata: dict[str, str] | None = None) -> Expr: + """Creates an alias expression with an optional metadata dictionary. + + Args: + expr: The expression to alias + name: The alias name + metadata: Optional metadata to attach to the column + + Returns: + An expression with the given alias + """ + return Expr(f.alias(expr.expr, name, metadata)) + + +def col(name: str) -> Expr: + """Creates a column reference expression.""" + return Expr(f.col(name)) + + +def count_star(filter: Expr | None = None) -> Expr: + """Create a COUNT(1) aggregate expression. + + This aggregate function will count all of the rows in the partition. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``distinct``, and ``null_treatment``. + + Args: + filter: If provided, only count rows for which the filter is True + """ + return count(Expr.literal(1), filter=filter) + + +def case(expr: Expr) -> CaseBuilder: + """Create a case expression. + + Create a :py:class:`~datafusion.expr.CaseBuilder` to match cases for the + expression ``expr``. See :py:class:`~datafusion.expr.CaseBuilder` for + detailed usage. + """ + return CaseBuilder(f.case(expr.expr)) + + +def when(when: Expr, then: Expr) -> CaseBuilder: + """Create a case expression that has no base expression. + + Create a :py:class:`~datafusion.expr.CaseBuilder` to match cases for the + expression ``expr``. See :py:class:`~datafusion.expr.CaseBuilder` for + detailed usage. + """ + return CaseBuilder(f.when(when.expr, then.expr)) + + +@deprecated("Prefer to call Expr.over() instead") +def window( + name: str, + args: list[Expr], + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, + window_frame: WindowFrame | None = None, + filter: Expr | None = None, + distinct: bool = False, + ctx: SessionContext | None = None, +) -> Expr: + """Creates a new Window function expression. + + This interface will soon be deprecated. Instead of using this interface, + users should call the window functions directly. For example, to perform a + lag use:: + + df.select(functions.lag(col("a")).partition_by(col("b")).build()) + + The ``order_by`` parameter accepts column names or expressions, e.g.:: + + window("lag", [col("a")], order_by="ts") + """ + args = [a.expr for a in args] + partition_by_raw = expr_list_to_raw_expr_list(partition_by) + order_by_raw = sort_list_to_raw_sort_list(order_by) + window_frame = window_frame.window_frame if window_frame is not None else None + ctx = ctx.ctx if ctx is not None else None + filter_raw = filter.expr if filter is not None else None + return Expr( + f.window( + name, + args, + partition_by=partition_by_raw, + order_by=order_by_raw, + window_frame=window_frame, + ctx=ctx, + filter=filter_raw, + distinct=distinct, + ) + ) + + +# scalar functions +def abs(arg: Expr) -> Expr: + """Return the absolute value of a given number. + + Returns: + -------- + Expr + A new expression representing the absolute value of the input expression. + """ + return Expr(f.abs(arg.expr)) + + +def acos(arg: Expr) -> Expr: + """Returns the arc cosine or inverse cosine of a number. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [1.0]}) + >>> result = df.select(dfn.functions.acos(dfn.col("a")).alias("acos")) + >>> result.collect_column("acos")[0].as_py() + 0.0 + """ + return Expr(f.acos(arg.expr)) + + +def acosh(arg: Expr) -> Expr: + """Returns inverse hyperbolic cosine. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [1.0]}) + >>> result = df.select(dfn.functions.acosh(dfn.col("a")).alias("acosh")) + >>> result.collect_column("acosh")[0].as_py() + 0.0 + """ + return Expr(f.acosh(arg.expr)) + + +def ascii(arg: Expr) -> Expr: + """Returns the numeric code of the first character of the argument.""" + return Expr(f.ascii(arg.expr)) + + +def asin(arg: Expr) -> Expr: + """Returns the arc sine or inverse sine of a number. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0.0]}) + >>> result = df.select(dfn.functions.asin(dfn.col("a")).alias("asin")) + >>> result.collect_column("asin")[0].as_py() + 0.0 + """ + return Expr(f.asin(arg.expr)) + + +def asinh(arg: Expr) -> Expr: + """Returns inverse hyperbolic sine. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0.0]}) + >>> result = df.select(dfn.functions.asinh(dfn.col("a")).alias("asinh")) + >>> result.collect_column("asinh")[0].as_py() + 0.0 + """ + return Expr(f.asinh(arg.expr)) + + +def atan(arg: Expr) -> Expr: + """Returns inverse tangent of a number. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0.0]}) + >>> result = df.select(dfn.functions.atan(dfn.col("a")).alias("atan")) + >>> result.collect_column("atan")[0].as_py() + 0.0 + """ + return Expr(f.atan(arg.expr)) + + +def atanh(arg: Expr) -> Expr: + """Returns inverse hyperbolic tangent. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0.0]}) + >>> result = df.select(dfn.functions.atanh(dfn.col("a")).alias("atanh")) + >>> result.collect_column("atanh")[0].as_py() + 0.0 + """ + return Expr(f.atanh(arg.expr)) + + +def atan2(y: Expr, x: Expr) -> Expr: + """Returns inverse tangent of a division given in the argument. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"y": [0.0], "x": [1.0]}) + >>> result = df.select( + ... dfn.functions.atan2(dfn.col("y"), dfn.col("x")).alias("atan2")) + >>> result.collect_column("atan2")[0].as_py() + 0.0 + """ + return Expr(f.atan2(y.expr, x.expr)) + + +def bit_length(arg: Expr) -> Expr: + """Returns the number of bits in the string argument.""" + return Expr(f.bit_length(arg.expr)) + + +def btrim(arg: Expr) -> Expr: + """Removes all characters, spaces by default, from both sides of a string.""" + return Expr(f.btrim(arg.expr)) + + +def cbrt(arg: Expr) -> Expr: + """Returns the cube root of a number.""" + return Expr(f.cbrt(arg.expr)) + + +def ceil(arg: Expr) -> Expr: + """Returns the nearest integer greater than or equal to argument.""" + return Expr(f.ceil(arg.expr)) + + +def character_length(arg: Expr) -> Expr: + """Returns the number of characters in the argument.""" + return Expr(f.character_length(arg.expr)) + + +def length(string: Expr) -> Expr: + """The number of characters in the ``string``.""" + return Expr(f.length(string.expr)) + + +def char_length(string: Expr) -> Expr: + """The number of characters in the ``string``.""" + return Expr(f.char_length(string.expr)) + + +def chr(arg: Expr) -> Expr: + """Converts the Unicode code point to a UTF8 character.""" + return Expr(f.chr(arg.expr)) + + +def coalesce(*args: Expr) -> Expr: + """Returns the value of the first expr in ``args`` which is not NULL.""" + args = [arg.expr for arg in args] + return Expr(f.coalesce(*args)) + + +def cos(arg: Expr) -> Expr: + """Returns the cosine of the argument. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0,-1,1]}) + >>> cos_df = df.select(dfn.functions.cos(dfn.col("a")).alias("cos")) + >>> cos_df.collect_column("cos")[0].as_py() + 1.0 + """ + return Expr(f.cos(arg.expr)) + + +def cosh(arg: Expr) -> Expr: + """Returns the hyperbolic cosine of the argument. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0,-1,1]}) + >>> cosh_df = df.select(dfn.functions.cosh(dfn.col("a")).alias("cosh")) + >>> cosh_df.collect_column("cosh")[0].as_py() + 1.0 + """ + return Expr(f.cosh(arg.expr)) + + +def cot(arg: Expr) -> Expr: + """Returns the cotangent of the argument. + + Examples: + --------- + >>> from math import pi + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [pi / 4]}) + >>> import builtins + >>> result = df.select( + ... dfn.functions.cot(dfn.col("a")).alias("cot") + ... ) + >>> builtins.round( + ... result.collect_column("cot")[0].as_py(), 1 + ... ) + 1.0 + """ + return Expr(f.cot(arg.expr)) + + +def degrees(arg: Expr) -> Expr: + """Converts the argument from radians to degrees. + + Examples: + --------- + >>> from math import pi + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0,pi,2*pi]}) + >>> deg_df = df.select(dfn.functions.degrees(dfn.col("a")).alias("deg")) + >>> deg_df.collect_column("deg")[2].as_py() + 360.0 + """ + return Expr(f.degrees(arg.expr)) + + +def ends_with(arg: Expr, suffix: Expr) -> Expr: + """Returns true if the ``string`` ends with the ``suffix``, false otherwise.""" + return Expr(f.ends_with(arg.expr, suffix.expr)) + + +def exp(arg: Expr) -> Expr: + """Returns the exponential of the argument.""" + return Expr(f.exp(arg.expr)) + + +def factorial(arg: Expr) -> Expr: + """Returns the factorial of the argument.""" + return Expr(f.factorial(arg.expr)) + + +def find_in_set(string: Expr, string_list: Expr) -> Expr: + """Find a string in a list of strings. + + Returns a value in the range of 1 to N if the string is in the string list + ``string_list`` consisting of N substrings. + + The string list is a string composed of substrings separated by ``,`` characters. + """ + return Expr(f.find_in_set(string.expr, string_list.expr)) + + +def floor(arg: Expr) -> Expr: + """Returns the nearest integer less than or equal to the argument.""" + return Expr(f.floor(arg.expr)) + + +def gcd(x: Expr, y: Expr) -> Expr: + """Returns the greatest common divisor.""" + return Expr(f.gcd(x.expr, y.expr)) + + +def initcap(string: Expr) -> Expr: + """Set the initial letter of each word to capital. + + Converts the first letter of each word in ``string`` to uppercase and the remaining + characters to lowercase. + """ + return Expr(f.initcap(string.expr)) + + +def instr(string: Expr, substring: Expr) -> Expr: + """Finds the position from where the ``substring`` matches the ``string``. + + This is an alias for :py:func:`strpos`. + """ + return strpos(string, substring) + + +def iszero(arg: Expr) -> Expr: + """Returns true if a given number is +0.0 or -0.0 otherwise returns false.""" + return Expr(f.iszero(arg.expr)) + + +def lcm(x: Expr, y: Expr) -> Expr: + """Returns the least common multiple.""" + return Expr(f.lcm(x.expr, y.expr)) + + +def left(string: Expr, n: Expr) -> Expr: + """Returns the first ``n`` characters in the ``string``.""" + return Expr(f.left(string.expr, n.expr)) + + +def levenshtein(string1: Expr, string2: Expr) -> Expr: + """Returns the Levenshtein distance between the two given strings.""" + return Expr(f.levenshtein(string1.expr, string2.expr)) + + +def ln(arg: Expr) -> Expr: + """Returns the natural logarithm (base e) of the argument.""" + return Expr(f.ln(arg.expr)) + + +def log(base: Expr, num: Expr) -> Expr: + """Returns the logarithm of a number for a particular ``base``.""" + return Expr(f.log(base.expr, num.expr)) + + +def log10(arg: Expr) -> Expr: + """Base 10 logarithm of the argument.""" + return Expr(f.log10(arg.expr)) + + +def log2(arg: Expr) -> Expr: + """Base 2 logarithm of the argument.""" + return Expr(f.log2(arg.expr)) + + +def lower(arg: Expr) -> Expr: + """Converts a string to lowercase.""" + return Expr(f.lower(arg.expr)) + + +def lpad(string: Expr, count: Expr, characters: Expr | None = None) -> Expr: + """Add left padding to a string. + + Extends the string to length length by prepending the characters fill (a + space by default). If the string is already longer than length then it is + truncated (on the right). + """ + characters = characters if characters is not None else Expr.literal(" ") + return Expr(f.lpad(string.expr, count.expr, characters.expr)) + + +def ltrim(arg: Expr) -> Expr: + """Removes all characters, spaces by default, from the beginning of a string.""" + return Expr(f.ltrim(arg.expr)) + + +def md5(arg: Expr) -> Expr: + """Computes an MD5 128-bit checksum for a string expression.""" + return Expr(f.md5(arg.expr)) + + +def nanvl(x: Expr, y: Expr) -> Expr: + """Returns ``x`` if ``x`` is not ``NaN``. Otherwise returns ``y``.""" + return Expr(f.nanvl(x.expr, y.expr)) + + +def nvl(x: Expr, y: Expr) -> Expr: + """Returns ``x`` if ``x`` is not ``NULL``. Otherwise returns ``y``.""" + return Expr(f.nvl(x.expr, y.expr)) + + +def octet_length(arg: Expr) -> Expr: + """Returns the number of bytes of a string.""" + return Expr(f.octet_length(arg.expr)) + + +def overlay( + string: Expr, substring: Expr, start: Expr, length: Expr | None = None +) -> Expr: + """Replace a substring with a new substring. + + Replace the substring of string that starts at the ``start``'th character and + extends for ``length`` characters with new substring. + """ + if length is None: + return Expr(f.overlay(string.expr, substring.expr, start.expr)) + return Expr(f.overlay(string.expr, substring.expr, start.expr, length.expr)) + + +def pi() -> Expr: + """Returns an approximate value of π.""" + return Expr(f.pi()) + + +def position(string: Expr, substring: Expr) -> Expr: + """Finds the position from where the ``substring`` matches the ``string``. + + This is an alias for :py:func:`strpos`. + """ + return strpos(string, substring) + + +def power(base: Expr, exponent: Expr) -> Expr: + """Returns ``base`` raised to the power of ``exponent``.""" + return Expr(f.power(base.expr, exponent.expr)) + + +def pow(base: Expr, exponent: Expr) -> Expr: + """Returns ``base`` raised to the power of ``exponent``. + + This is an alias of :py:func:`power`. + """ + return power(base, exponent) + + +def radians(arg: Expr) -> Expr: + """Converts the argument from degrees to radians. + + Examples: + --------- + >>> from math import pi + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [180.0]}) + >>> import builtins + >>> result = df.select( + ... dfn.functions.radians(dfn.col("a")).alias("rad") + ... ) + >>> builtins.round( + ... result.collect_column("rad")[0].as_py(), 6 + ... ) + 3.141593 + """ + return Expr(f.radians(arg.expr)) + + +def regexp_like(string: Expr, regex: Expr, flags: Expr | None = None) -> Expr: + """Find if any regular expression (regex) matches exist. + + Tests a string using a regular expression returning true if at least one match, + false otherwise. + """ + if flags is not None: + flags = flags.expr + return Expr(f.regexp_like(string.expr, regex.expr, flags)) + + +def regexp_match(string: Expr, regex: Expr, flags: Expr | None = None) -> Expr: + """Perform regular expression (regex) matching. + + Returns an array with each element containing the leftmost-first match of the + corresponding index in ``regex`` to string in ``string``. + """ + if flags is not None: + flags = flags.expr + return Expr(f.regexp_match(string.expr, regex.expr, flags)) + + +def regexp_replace( + string: Expr, pattern: Expr, replacement: Expr, flags: Expr | None = None +) -> Expr: + """Replaces substring(s) matching a PCRE-like regular expression. + + The full list of supported features and syntax can be found at + + + Supported flags with the addition of 'g' can be found at + + """ + if flags is not None: + flags = flags.expr + return Expr(f.regexp_replace(string.expr, pattern.expr, replacement.expr, flags)) + + +def regexp_count( + string: Expr, pattern: Expr, start: Expr | None = None, flags: Expr | None = None +) -> Expr: + """Returns the number of matches in a string. + + Optional start position (the first position is 1) to search for the regular + expression. + """ + if flags is not None: + flags = flags.expr + start = start.expr if start is not None else start + return Expr(f.regexp_count(string.expr, pattern.expr, start, flags)) + + +def regexp_instr( + values: Expr, + regex: Expr, + start: Expr | None = None, + n: Expr | None = None, + flags: Expr | None = None, + sub_expr: Expr | None = None, +) -> Expr: + """Returns the position of a regular expression match in a string. + + Searches ``values`` for the ``n``-th occurrence of ``regex``, starting at position + ``start`` (the first position is 1). Returns the starting or ending position based + on ``end_position``. Use ``flags`` to control regex behavior and ``sub_expr`` to + return the position of a specific capture group instead of the entire match. + """ + start = start.expr if start is not None else None + n = n.expr if n is not None else None + flags = flags.expr if flags is not None else None + sub_expr = sub_expr.expr if sub_expr is not None else None + + return Expr( + f.regexp_instr( + values.expr, + regex.expr, + start, + n, + flags, + sub_expr, + ) + ) + + +def repeat(string: Expr, n: Expr) -> Expr: + """Repeats the ``string`` to ``n`` times.""" + return Expr(f.repeat(string.expr, n.expr)) + + +def replace(string: Expr, from_val: Expr, to_val: Expr) -> Expr: + """Replaces all occurrences of ``from_val`` with ``to_val`` in the ``string``.""" + return Expr(f.replace(string.expr, from_val.expr, to_val.expr)) + + +def reverse(arg: Expr) -> Expr: + """Reverse the string argument.""" + return Expr(f.reverse(arg.expr)) + + +def right(string: Expr, n: Expr) -> Expr: + """Returns the last ``n`` characters in the ``string``.""" + return Expr(f.right(string.expr, n.expr)) + + +def round(value: Expr, decimal_places: Expr | None = None) -> Expr: + """Round the argument to the nearest integer. + + If the optional ``decimal_places`` is specified, round to the nearest number of + decimal places. You can specify a negative number of decimal places. For example + ``round(lit(125.2345), lit(-2))`` would yield a value of ``100.0``. + """ + if decimal_places is None: + decimal_places = Expr.literal(0) + return Expr(f.round(value.expr, decimal_places.expr)) + + +def rpad(string: Expr, count: Expr, characters: Expr | None = None) -> Expr: + """Add right padding to a string. + + Extends the string to length length by appending the characters fill (a space + by default). If the string is already longer than length then it is truncated. + """ + characters = characters if characters is not None else Expr.literal(" ") + return Expr(f.rpad(string.expr, count.expr, characters.expr)) + + +def rtrim(arg: Expr) -> Expr: + """Removes all characters, spaces by default, from the end of a string.""" + return Expr(f.rtrim(arg.expr)) + + +def sha224(arg: Expr) -> Expr: + """Computes the SHA-224 hash of a binary string.""" + return Expr(f.sha224(arg.expr)) + + +def sha256(arg: Expr) -> Expr: + """Computes the SHA-256 hash of a binary string.""" + return Expr(f.sha256(arg.expr)) + + +def sha384(arg: Expr) -> Expr: + """Computes the SHA-384 hash of a binary string.""" + return Expr(f.sha384(arg.expr)) + + +def sha512(arg: Expr) -> Expr: + """Computes the SHA-512 hash of a binary string.""" + return Expr(f.sha512(arg.expr)) + + +def signum(arg: Expr) -> Expr: + """Returns the sign of the argument (-1, 0, +1).""" + return Expr(f.signum(arg.expr)) + + +def sin(arg: Expr) -> Expr: + """Returns the sine of the argument. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0.0]}) + >>> result = df.select(dfn.functions.sin(dfn.col("a")).alias("sin")) + >>> result.collect_column("sin")[0].as_py() + 0.0 + """ + return Expr(f.sin(arg.expr)) + + +def sinh(arg: Expr) -> Expr: + """Returns the hyperbolic sine of the argument. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0.0]}) + >>> result = df.select(dfn.functions.sinh(dfn.col("a")).alias("sinh")) + >>> result.collect_column("sinh")[0].as_py() + 0.0 + """ + return Expr(f.sinh(arg.expr)) + + +def split_part(string: Expr, delimiter: Expr, index: Expr) -> Expr: + """Split a string and return one part. + + Splits a string based on a delimiter and picks out the desired field based + on the index. + """ + return Expr(f.split_part(string.expr, delimiter.expr, index.expr)) + + +def sqrt(arg: Expr) -> Expr: + """Returns the square root of the argument.""" + return Expr(f.sqrt(arg.expr)) + + +def starts_with(string: Expr, prefix: Expr) -> Expr: + """Returns true if string starts with prefix.""" + return Expr(f.starts_with(string.expr, prefix.expr)) + + +def strpos(string: Expr, substring: Expr) -> Expr: + """Finds the position from where the ``substring`` matches the ``string``.""" + return Expr(f.strpos(string.expr, substring.expr)) + + +def substr(string: Expr, position: Expr) -> Expr: + """Substring from the ``position`` to the end.""" + return Expr(f.substr(string.expr, position.expr)) + + +def substr_index(string: Expr, delimiter: Expr, count: Expr) -> Expr: + """Returns an indexed substring. + + The return will be the ``string`` from before ``count`` occurrences of + ``delimiter``. + """ + return Expr(f.substr_index(string.expr, delimiter.expr, count.expr)) + + +def substring(string: Expr, position: Expr, length: Expr) -> Expr: + """Substring from the ``position`` with ``length`` characters.""" + return Expr(f.substring(string.expr, position.expr, length.expr)) + + +def tan(arg: Expr) -> Expr: + """Returns the tangent of the argument. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0.0]}) + >>> result = df.select(dfn.functions.tan(dfn.col("a")).alias("tan")) + >>> result.collect_column("tan")[0].as_py() + 0.0 + """ + return Expr(f.tan(arg.expr)) + + +def tanh(arg: Expr) -> Expr: + """Returns the hyperbolic tangent of the argument. + + Examples: + --------- + >>> ctx = dfn.SessionContext() + >>> df = ctx.from_pydict({"a": [0.0]}) + >>> result = df.select(dfn.functions.tanh(dfn.col("a")).alias("tanh")) + >>> result.collect_column("tanh")[0].as_py() + 0.0 + """ + return Expr(f.tanh(arg.expr)) + + +def to_hex(arg: Expr) -> Expr: + """Converts an integer to a hexadecimal string.""" + return Expr(f.to_hex(arg.expr)) + + +def now() -> Expr: + """Returns the current timestamp in nanoseconds. + + This will use the same value for all instances of now() in same statement. + """ + return Expr(f.now()) + + +def to_char(arg: Expr, formatter: Expr) -> Expr: + """Returns a string representation of a date, time, timestamp or duration. + + For usage of ``formatter`` see the rust chrono package ``strftime`` package. + + [Documentation here.](https://docs.rs/chrono/latest/chrono/format/strftime/index.html) + """ + return Expr(f.to_char(arg.expr, formatter.expr)) + + +def _unwrap_exprs(args: tuple[Expr, ...]) -> list: + return [arg.expr for arg in args] + + +def to_date(arg: Expr, *formatters: Expr) -> Expr: + """Converts a value to a date (YYYY-MM-DD). + + Supports strings, numeric and timestamp types as input. + Integers and doubles are interpreted as days since the unix epoch. + Strings are parsed as YYYY-MM-DD (e.g. '2023-07-20') + if ``formatters`` are not provided. + + For usage of ``formatters`` see the rust chrono package ``strftime`` package. + + [Documentation here.](https://docs.rs/chrono/latest/chrono/format/strftime/index.html) + """ + return Expr(f.to_date(arg.expr, *_unwrap_exprs(formatters))) + + +def to_local_time(*args: Expr) -> Expr: + """Converts a timestamp with a timezone to a timestamp without a timezone. + + This function handles daylight saving time changes. + """ + return Expr(f.to_local_time(*_unwrap_exprs(args))) + + +def to_time(arg: Expr, *formatters: Expr) -> Expr: + """Converts a value to a time. Supports strings and timestamps as input. + + If ``formatters`` is not provided strings are parsed as HH:MM:SS, HH:MM or + HH:MM:SS.nnnnnnnnn; + + For usage of ``formatters`` see the rust chrono package ``strftime`` package. + + [Documentation here.](https://docs.rs/chrono/latest/chrono/format/strftime/index.html) + """ + return Expr(f.to_time(arg.expr, *_unwrap_exprs(formatters))) + + +def to_timestamp(arg: Expr, *formatters: Expr) -> Expr: + """Converts a string and optional formats to a ``Timestamp`` in nanoseconds. + + For usage of ``formatters`` see the rust chrono package ``strftime`` package. + + [Documentation here.](https://docs.rs/chrono/latest/chrono/format/strftime/index.html) + """ + return Expr(f.to_timestamp(arg.expr, *_unwrap_exprs(formatters))) + + +def to_timestamp_millis(arg: Expr, *formatters: Expr) -> Expr: + """Converts a string and optional formats to a ``Timestamp`` in milliseconds. + + See :py:func:`to_timestamp` for a description on how to use formatters. + """ + return Expr(f.to_timestamp_millis(arg.expr, *_unwrap_exprs(formatters))) + + +def to_timestamp_micros(arg: Expr, *formatters: Expr) -> Expr: + """Converts a string and optional formats to a ``Timestamp`` in microseconds. + + See :py:func:`to_timestamp` for a description on how to use formatters. + """ + return Expr(f.to_timestamp_micros(arg.expr, *_unwrap_exprs(formatters))) + + +def to_timestamp_nanos(arg: Expr, *formatters: Expr) -> Expr: + """Converts a string and optional formats to a ``Timestamp`` in nanoseconds. + + See :py:func:`to_timestamp` for a description on how to use formatters. + """ + return Expr(f.to_timestamp_nanos(arg.expr, *_unwrap_exprs(formatters))) + + +def to_timestamp_seconds(arg: Expr, *formatters: Expr) -> Expr: + """Converts a string and optional formats to a ``Timestamp`` in seconds. + + See :py:func:`to_timestamp` for a description on how to use formatters. + """ + return Expr(f.to_timestamp_seconds(arg.expr, *_unwrap_exprs(formatters))) + + +def to_unixtime(string: Expr, *format_arguments: Expr) -> Expr: + """Converts a string and optional formats to a Unixtime.""" + return Expr(f.to_unixtime(string.expr, *_unwrap_exprs(format_arguments))) + + +def current_date() -> Expr: + """Returns current UTC date as a Date32 value.""" + return Expr(f.current_date()) + + +today = current_date + + +def current_time() -> Expr: + """Returns current UTC time as a Time64 value.""" + return Expr(f.current_time()) + + +def datepart(part: Expr, date: Expr) -> Expr: + """Return a specified part of a date. + + This is an alias for :py:func:`date_part`. + """ + return date_part(part, date) + + +def date_part(part: Expr, date: Expr) -> Expr: + """Extracts a subfield from the date.""" + return Expr(f.date_part(part.expr, date.expr)) + + +def extract(part: Expr, date: Expr) -> Expr: + """Extracts a subfield from the date. + + This is an alias for :py:func:`date_part`. + """ + return date_part(part, date) + + +def date_trunc(part: Expr, date: Expr) -> Expr: + """Truncates the date to a specified level of precision.""" + return Expr(f.date_trunc(part.expr, date.expr)) + + +def datetrunc(part: Expr, date: Expr) -> Expr: + """Truncates the date to a specified level of precision. + + This is an alias for :py:func:`date_trunc`. + """ + return date_trunc(part, date) + + +def date_bin(stride: Expr, source: Expr, origin: Expr) -> Expr: + """Coerces an arbitrary timestamp to the start of the nearest specified interval.""" + return Expr(f.date_bin(stride.expr, source.expr, origin.expr)) + + +def make_date(year: Expr, month: Expr, day: Expr) -> Expr: + """Make a date from year, month and day component parts.""" + return Expr(f.make_date(year.expr, month.expr, day.expr)) + + +def translate(string: Expr, from_val: Expr, to_val: Expr) -> Expr: + """Replaces the characters in ``from_val`` with the counterpart in ``to_val``.""" + return Expr(f.translate(string.expr, from_val.expr, to_val.expr)) + + +def trim(arg: Expr) -> Expr: + """Removes all characters, spaces by default, from both sides of a string.""" + return Expr(f.trim(arg.expr)) + + +def trunc(num: Expr, precision: Expr | None = None) -> Expr: + """Truncate the number toward zero with optional precision.""" + if precision is not None: + return Expr(f.trunc(num.expr, precision.expr)) + return Expr(f.trunc(num.expr)) + + +def upper(arg: Expr) -> Expr: + """Converts a string to uppercase.""" + return Expr(f.upper(arg.expr)) + + +def make_array(*args: Expr) -> Expr: + """Returns an array using the specified input expressions.""" + args = [arg.expr for arg in args] + return Expr(f.make_array(args)) + + +def make_list(*args: Expr) -> Expr: + """Returns an array using the specified input expressions. + + This is an alias for :py:func:`make_array`. + """ + return make_array(*args) + + +def array(*args: Expr) -> Expr: + """Returns an array using the specified input expressions. + + This is an alias for :py:func:`make_array`. + """ + return make_array(*args) + + +def range(start: Expr, stop: Expr, step: Expr) -> Expr: + """Create a list of values in the range between start and stop.""" + return Expr(f.range(start.expr, stop.expr, step.expr)) + + +def uuid() -> Expr: + """Returns uuid v4 as a string value.""" + return Expr(f.uuid()) + + +def struct(*args: Expr) -> Expr: + """Returns a struct with the given arguments.""" + args = [arg.expr for arg in args] + return Expr(f.struct(*args)) + + +def named_struct(name_pairs: list[tuple[str, Expr]]) -> Expr: + """Returns a struct with the given names and arguments pairs.""" + name_pair_exprs = [ + [Expr.literal(pa.scalar(pair[0], type=pa.string())), pair[1]] + for pair in name_pairs + ] + + # flatten + name_pairs = [x.expr for xs in name_pair_exprs for x in xs] + return Expr(f.named_struct(*name_pairs)) + + +def from_unixtime(arg: Expr) -> Expr: + """Converts an integer to RFC3339 timestamp format string.""" + return Expr(f.from_unixtime(arg.expr)) + + +def arrow_typeof(arg: Expr) -> Expr: + """Returns the Arrow type of the expression.""" + return Expr(f.arrow_typeof(arg.expr)) + + +def arrow_cast(expr: Expr, data_type: Expr) -> Expr: + """Casts an expression to a specified data type.""" + return Expr(f.arrow_cast(expr.expr, data_type.expr)) + + +def random() -> Expr: + """Returns a random value in the range ``0.0 <= x < 1.0``.""" + return Expr(f.random()) + + +def array_append(array: Expr, element: Expr) -> Expr: + """Appends an element to the end of an array.""" + return Expr(f.array_append(array.expr, element.expr)) + + +def array_push_back(array: Expr, element: Expr) -> Expr: + """Appends an element to the end of an array. + + This is an alias for :py:func:`array_append`. + """ + return array_append(array, element) + + +def list_append(array: Expr, element: Expr) -> Expr: + """Appends an element to the end of an array. + + This is an alias for :py:func:`array_append`. + """ + return array_append(array, element) + + +def list_push_back(array: Expr, element: Expr) -> Expr: + """Appends an element to the end of an array. + + This is an alias for :py:func:`array_append`. + """ + return array_append(array, element) + + +def array_concat(*args: Expr) -> Expr: + """Concatenates the input arrays.""" + args = [arg.expr for arg in args] + return Expr(f.array_concat(args)) + + +def array_cat(*args: Expr) -> Expr: + """Concatenates the input arrays. + + This is an alias for :py:func:`array_concat`. + """ + return array_concat(*args) + + +def array_dims(array: Expr) -> Expr: + """Returns an array of the array's dimensions.""" + return Expr(f.array_dims(array.expr)) + + +def array_distinct(array: Expr) -> Expr: + """Returns distinct values from the array after removing duplicates.""" + return Expr(f.array_distinct(array.expr)) + + +def list_cat(*args: Expr) -> Expr: + """Concatenates the input arrays. + + This is an alias for :py:func:`array_concat`, :py:func:`array_cat`. + """ + return array_concat(*args) + + +def list_concat(*args: Expr) -> Expr: + """Concatenates the input arrays. + + This is an alias for :py:func:`array_concat`, :py:func:`array_cat`. + """ + return array_concat(*args) + + +def list_distinct(array: Expr) -> Expr: + """Returns distinct values from the array after removing duplicates. + + This is an alias for :py:func:`array_distinct`. + """ + return array_distinct(array) + + +def list_dims(array: Expr) -> Expr: + """Returns an array of the array's dimensions. + + This is an alias for :py:func:`array_dims`. + """ + return array_dims(array) + + +def array_element(array: Expr, n: Expr) -> Expr: + """Extracts the element with the index n from the array.""" + return Expr(f.array_element(array.expr, n.expr)) + + +def array_empty(array: Expr) -> Expr: + """Returns a boolean indicating whether the array is empty.""" + return Expr(f.array_empty(array.expr)) + + +def array_extract(array: Expr, n: Expr) -> Expr: + """Extracts the element with the index n from the array. + + This is an alias for :py:func:`array_element`. + """ + return array_element(array, n) + + +def list_element(array: Expr, n: Expr) -> Expr: + """Extracts the element with the index n from the array. + + This is an alias for :py:func:`array_element`. + """ + return array_element(array, n) + + +def list_extract(array: Expr, n: Expr) -> Expr: + """Extracts the element with the index n from the array. + + This is an alias for :py:func:`array_element`. + """ + return array_element(array, n) + + +def array_length(array: Expr) -> Expr: + """Returns the length of the array.""" + return Expr(f.array_length(array.expr)) + + +def list_length(array: Expr) -> Expr: + """Returns the length of the array. + + This is an alias for :py:func:`array_length`. + """ + return array_length(array) + + +def array_has(first_array: Expr, second_array: Expr) -> Expr: + """Returns true if the element appears in the first array, otherwise false.""" + return Expr(f.array_has(first_array.expr, second_array.expr)) + + +def array_has_all(first_array: Expr, second_array: Expr) -> Expr: + """Determines if there is complete overlap ``second_array`` in ``first_array``. + + Returns true if each element of the second array appears in the first array. + Otherwise, it returns false. + """ + return Expr(f.array_has_all(first_array.expr, second_array.expr)) + + +def array_has_any(first_array: Expr, second_array: Expr) -> Expr: + """Determine if there is an overlap between ``first_array`` and ``second_array``. + + Returns true if at least one element of the second array appears in the first + array. Otherwise, it returns false. + """ + return Expr(f.array_has_any(first_array.expr, second_array.expr)) + + +def array_position(array: Expr, element: Expr, index: int | None = 1) -> Expr: + """Return the position of the first occurrence of ``element`` in ``array``.""" + return Expr(f.array_position(array.expr, element.expr, index)) + + +def array_indexof(array: Expr, element: Expr, index: int | None = 1) -> Expr: + """Return the position of the first occurrence of ``element`` in ``array``. + + This is an alias for :py:func:`array_position`. + """ + return array_position(array, element, index) + + +def list_position(array: Expr, element: Expr, index: int | None = 1) -> Expr: + """Return the position of the first occurrence of ``element`` in ``array``. + + This is an alias for :py:func:`array_position`. + """ + return array_position(array, element, index) + + +def list_indexof(array: Expr, element: Expr, index: int | None = 1) -> Expr: + """Return the position of the first occurrence of ``element`` in ``array``. + + This is an alias for :py:func:`array_position`. + """ + return array_position(array, element, index) + + +def array_positions(array: Expr, element: Expr) -> Expr: + """Searches for an element in the array and returns all occurrences.""" + return Expr(f.array_positions(array.expr, element.expr)) + + +def list_positions(array: Expr, element: Expr) -> Expr: + """Searches for an element in the array and returns all occurrences. + + This is an alias for :py:func:`array_positions`. + """ + return array_positions(array, element) + + +def array_ndims(array: Expr) -> Expr: + """Returns the number of dimensions of the array.""" + return Expr(f.array_ndims(array.expr)) + + +def list_ndims(array: Expr) -> Expr: + """Returns the number of dimensions of the array. + + This is an alias for :py:func:`array_ndims`. + """ + return array_ndims(array) + + +def array_prepend(element: Expr, array: Expr) -> Expr: + """Prepends an element to the beginning of an array.""" + return Expr(f.array_prepend(element.expr, array.expr)) + + +def array_push_front(element: Expr, array: Expr) -> Expr: + """Prepends an element to the beginning of an array. + + This is an alias for :py:func:`array_prepend`. + """ + return array_prepend(element, array) + + +def list_prepend(element: Expr, array: Expr) -> Expr: + """Prepends an element to the beginning of an array. + + This is an alias for :py:func:`array_prepend`. + """ + return array_prepend(element, array) + + +def list_push_front(element: Expr, array: Expr) -> Expr: + """Prepends an element to the beginning of an array. + + This is an alias for :py:func:`array_prepend`. + """ + return array_prepend(element, array) + + +def array_pop_back(array: Expr) -> Expr: + """Returns the array without the last element.""" + return Expr(f.array_pop_back(array.expr)) + + +def array_pop_front(array: Expr) -> Expr: + """Returns the array without the first element.""" + return Expr(f.array_pop_front(array.expr)) + + +def array_remove(array: Expr, element: Expr) -> Expr: + """Removes the first element from the array equal to the given value.""" + return Expr(f.array_remove(array.expr, element.expr)) + + +def list_remove(array: Expr, element: Expr) -> Expr: + """Removes the first element from the array equal to the given value. + + This is an alias for :py:func:`array_remove`. + """ + return array_remove(array, element) + + +def array_remove_n(array: Expr, element: Expr, max: Expr) -> Expr: + """Removes the first ``max`` elements from the array equal to the given value.""" + return Expr(f.array_remove_n(array.expr, element.expr, max.expr)) + + +def list_remove_n(array: Expr, element: Expr, max: Expr) -> Expr: + """Removes the first ``max`` elements from the array equal to the given value. + + This is an alias for :py:func:`array_remove_n`. + """ + return array_remove_n(array, element, max) + + +def array_remove_all(array: Expr, element: Expr) -> Expr: + """Removes all elements from the array equal to the given value.""" + return Expr(f.array_remove_all(array.expr, element.expr)) + + +def list_remove_all(array: Expr, element: Expr) -> Expr: + """Removes all elements from the array equal to the given value. + + This is an alias for :py:func:`array_remove_all`. + """ + return array_remove_all(array, element) + + +def array_repeat(element: Expr, count: Expr) -> Expr: + """Returns an array containing ``element`` ``count`` times.""" + return Expr(f.array_repeat(element.expr, count.expr)) + + +def list_repeat(element: Expr, count: Expr) -> Expr: + """Returns an array containing ``element`` ``count`` times. + + This is an alias for :py:func:`array_repeat`. + """ + return array_repeat(element, count) + + +def array_replace(array: Expr, from_val: Expr, to_val: Expr) -> Expr: + """Replaces the first occurrence of ``from_val`` with ``to_val``.""" + return Expr(f.array_replace(array.expr, from_val.expr, to_val.expr)) + + +def list_replace(array: Expr, from_val: Expr, to_val: Expr) -> Expr: + """Replaces the first occurrence of ``from_val`` with ``to_val``. + + This is an alias for :py:func:`array_replace`. + """ + return array_replace(array, from_val, to_val) + + +def array_replace_n(array: Expr, from_val: Expr, to_val: Expr, max: Expr) -> Expr: + """Replace ``n`` occurrences of ``from_val`` with ``to_val``. + + Replaces the first ``max`` occurrences of the specified element with another + specified element. + """ + return Expr(f.array_replace_n(array.expr, from_val.expr, to_val.expr, max.expr)) + + +def list_replace_n(array: Expr, from_val: Expr, to_val: Expr, max: Expr) -> Expr: + """Replace ``n`` occurrences of ``from_val`` with ``to_val``. + + Replaces the first ``max`` occurrences of the specified element with another + specified element. + + This is an alias for :py:func:`array_replace_n`. + """ + return array_replace_n(array, from_val, to_val, max) + + +def array_replace_all(array: Expr, from_val: Expr, to_val: Expr) -> Expr: + """Replaces all occurrences of ``from_val`` with ``to_val``.""" + return Expr(f.array_replace_all(array.expr, from_val.expr, to_val.expr)) + + +def list_replace_all(array: Expr, from_val: Expr, to_val: Expr) -> Expr: + """Replaces all occurrences of ``from_val`` with ``to_val``. + + This is an alias for :py:func:`array_replace_all`. + """ + return array_replace_all(array, from_val, to_val) + + +def array_sort(array: Expr, descending: bool = False, null_first: bool = False) -> Expr: + """Sort an array. + + Args: + array: The input array to sort. + descending: If True, sorts in descending order. + null_first: If True, nulls will be returned at the beginning of the array. + """ + desc = "DESC" if descending else "ASC" + nulls_first = "NULLS FIRST" if null_first else "NULLS LAST" + return Expr( + f.array_sort( + array.expr, + Expr.literal(pa.scalar(desc, type=pa.string())).expr, + Expr.literal(pa.scalar(nulls_first, type=pa.string())).expr, + ) + ) + + +def list_sort(array: Expr, descending: bool = False, null_first: bool = False) -> Expr: + """This is an alias for :py:func:`array_sort`.""" + return array_sort(array, descending=descending, null_first=null_first) + + +def array_slice( + array: Expr, begin: Expr, end: Expr, stride: Expr | None = None +) -> Expr: + """Returns a slice of the array.""" + if stride is not None: + stride = stride.expr + return Expr(f.array_slice(array.expr, begin.expr, end.expr, stride)) + + +def list_slice(array: Expr, begin: Expr, end: Expr, stride: Expr | None = None) -> Expr: + """Returns a slice of the array. + + This is an alias for :py:func:`array_slice`. + """ + return array_slice(array, begin, end, stride) + + +def array_intersect(array1: Expr, array2: Expr) -> Expr: + """Returns the intersection of ``array1`` and ``array2``.""" + return Expr(f.array_intersect(array1.expr, array2.expr)) + + +def list_intersect(array1: Expr, array2: Expr) -> Expr: + """Returns an the intersection of ``array1`` and ``array2``. + + This is an alias for :py:func:`array_intersect`. + """ + return array_intersect(array1, array2) + + +def array_union(array1: Expr, array2: Expr) -> Expr: + """Returns an array of the elements in the union of array1 and array2. + + Duplicate rows will not be returned. + """ + return Expr(f.array_union(array1.expr, array2.expr)) + + +def list_union(array1: Expr, array2: Expr) -> Expr: + """Returns an array of the elements in the union of array1 and array2. + + Duplicate rows will not be returned. + + This is an alias for :py:func:`array_union`. + """ + return array_union(array1, array2) + + +def array_except(array1: Expr, array2: Expr) -> Expr: + """Returns the elements that appear in ``array1`` but not in ``array2``.""" + return Expr(f.array_except(array1.expr, array2.expr)) + + +def list_except(array1: Expr, array2: Expr) -> Expr: + """Returns the elements that appear in ``array1`` but not in the ``array2``. + + This is an alias for :py:func:`array_except`. + """ + return array_except(array1, array2) + + +def array_resize(array: Expr, size: Expr, value: Expr) -> Expr: + """Returns an array with the specified size filled. + + If ``size`` is greater than the ``array`` length, the additional entries will + be filled with the given ``value``. + """ + return Expr(f.array_resize(array.expr, size.expr, value.expr)) + + +def list_resize(array: Expr, size: Expr, value: Expr) -> Expr: + """Returns an array with the specified size filled. + + If ``size`` is greater than the ``array`` length, the additional entries will be + filled with the given ``value``. This is an alias for :py:func:`array_resize`. + """ + return array_resize(array, size, value) + + +def flatten(array: Expr) -> Expr: + """Flattens an array of arrays into a single array.""" + return Expr(f.flatten(array.expr)) + + +def cardinality(array: Expr) -> Expr: + """Returns the total number of elements in the array.""" + return Expr(f.cardinality(array.expr)) + + +def empty(array: Expr) -> Expr: + """This is an alias for :py:func:`array_empty`.""" + return array_empty(array) + + +# aggregate functions +def approx_distinct( + expression: Expr, + filter: Expr | None = None, +) -> Expr: + """Returns the approximate number of distinct values. + + This aggregate function is similar to :py:func:`count` with distinct set, but it + will approximate the number of distinct entries. It may return significantly faster + than :py:func:`count` for some DataFrames. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: Values to check for distinct entries + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.approx_distinct(expression.expr, filter=filter_raw)) + + +def approx_median(expression: Expr, filter: Expr | None = None) -> Expr: + """Returns the approximate median value. + + This aggregate function is similar to :py:func:`median`, but it will only + approximate the median. It may return significantly faster for some DataFrames. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by`` and ``null_treatment``, and ``distinct``. + + Args: + expression: Values to find the median for + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.approx_median(expression.expr, filter=filter_raw)) + + +def approx_percentile_cont( + sort_expression: Expr | SortExpr, + percentile: float, + num_centroids: int | None = None, + filter: Expr | None = None, +) -> Expr: + """Returns the value that is approximately at a given percentile of ``expr``. + + This aggregate function assumes the input values form a continuous distribution. + Suppose you have a DataFrame which consists of 100 different test scores. If you + called this function with a percentile of 0.9, it would return the value of the + test score that is above 90% of the other test scores. The returned value may be + between two of the values. + + This function uses the [t-digest](https://arxiv.org/abs/1902.04023) algorithm to + compute the percentile. You can limit the number of bins used in this algorithm by + setting the ``num_centroids`` parameter. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + sort_expression: Values for which to find the approximate percentile + percentile: This must be between 0.0 and 1.0, inclusive + num_centroids: Max bin size for the t-digest algorithm + filter: If provided, only compute against rows for which the filter is True + """ + sort_expr_raw = sort_or_default(sort_expression) + filter_raw = filter.expr if filter is not None else None + return Expr( + f.approx_percentile_cont( + sort_expr_raw, percentile, num_centroids=num_centroids, filter=filter_raw + ) + ) + + +def approx_percentile_cont_with_weight( + sort_expression: Expr | SortExpr, + weight: Expr, + percentile: float, + num_centroids: int | None = None, + filter: Expr | None = None, +) -> Expr: + """Returns the value of the weighted approximate percentile. + + This aggregate function is similar to :py:func:`approx_percentile_cont` except that + it uses the associated associated weights. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + sort_expression: Values for which to find the approximate percentile + weight: Relative weight for each of the values in ``expression`` + percentile: This must be between 0.0 and 1.0, inclusive + num_centroids: Max bin size for the t-digest algorithm + filter: If provided, only compute against rows for which the filter is True + + """ + sort_expr_raw = sort_or_default(sort_expression) + filter_raw = filter.expr if filter is not None else None + return Expr( + f.approx_percentile_cont_with_weight( + sort_expr_raw, + weight.expr, + percentile, + num_centroids=num_centroids, + filter=filter_raw, + ) + ) + + +def array_agg( + expression: Expr, + distinct: bool = False, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Aggregate values into an array. + + Currently ``distinct`` and ``order_by`` cannot be used together. As a work around, + consider :py:func:`array_sort` after aggregation. + [Issue Tracker](https://github.com/apache/datafusion/issues/12371) + + If using the builder functions described in ref:`_aggregation` this function ignores + the option ``null_treatment``. + + Args: + expression: Values to combine into an array + distinct: If True, a single entry for each distinct value will be in the result + filter: If provided, only compute against rows for which the filter is True + order_by: Order the resultant array values. Accepts column names or expressions. + + For example:: + + df.aggregate([], array_agg(col("a"), order_by="b")) + """ + order_by_raw = sort_list_to_raw_sort_list(order_by) + filter_raw = filter.expr if filter is not None else None + + return Expr( + f.array_agg( + expression.expr, distinct=distinct, filter=filter_raw, order_by=order_by_raw + ) + ) + + +def avg( + expression: Expr, + filter: Expr | None = None, +) -> Expr: + """Returns the average value. + + This aggregate function expects a numeric expression and will return a float. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: Values to combine into an array + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.avg(expression.expr, filter=filter_raw)) + + +def corr(value_y: Expr, value_x: Expr, filter: Expr | None = None) -> Expr: + """Returns the correlation coefficient between ``value1`` and ``value2``. + + This aggregate function expects both values to be numeric and will return a float. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + value_y: The dependent variable for correlation + value_x: The independent variable for correlation + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.corr(value_y.expr, value_x.expr, filter=filter_raw)) + + +def count( + expressions: Expr | list[Expr] | None = None, + distinct: bool = False, + filter: Expr | None = None, +) -> Expr: + """Returns the number of rows that match the given arguments. + + This aggregate function will count the non-null rows provided in the expression. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by`` and ``null_treatment``. + + Args: + expressions: Argument to perform bitwise calculation on + distinct: If True, a single entry for each distinct value will be in the result + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + if expressions is None: + args = [Expr.literal(1).expr] + elif isinstance(expressions, list): + args = [arg.expr for arg in expressions] + else: + args = [expressions.expr] + + return Expr(f.count(*args, distinct=distinct, filter=filter_raw)) + + +def covar_pop(value_y: Expr, value_x: Expr, filter: Expr | None = None) -> Expr: + """Computes the population covariance. + + This aggregate function expects both values to be numeric and will return a float. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + value_y: The dependent variable for covariance + value_x: The independent variable for covariance + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.covar_pop(value_y.expr, value_x.expr, filter=filter_raw)) + + +def covar_samp(value_y: Expr, value_x: Expr, filter: Expr | None = None) -> Expr: + """Computes the sample covariance. + + This aggregate function expects both values to be numeric and will return a float. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + value_y: The dependent variable for covariance + value_x: The independent variable for covariance + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.covar_samp(value_y.expr, value_x.expr, filter=filter_raw)) + + +def covar(value_y: Expr, value_x: Expr, filter: Expr | None = None) -> Expr: + """Computes the sample covariance. + + This is an alias for :py:func:`covar_samp`. + """ + return covar_samp(value_y, value_x, filter) + + +def max(expression: Expr, filter: Expr | None = None) -> Expr: + """Aggregate function that returns the maximum value of the argument. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: The value to find the maximum of + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.max(expression.expr, filter=filter_raw)) + + +def mean(expression: Expr, filter: Expr | None = None) -> Expr: + """Returns the average (mean) value of the argument. + + This is an alias for :py:func:`avg`. + """ + return avg(expression, filter) + + +def median( + expression: Expr, distinct: bool = False, filter: Expr | None = None +) -> Expr: + """Computes the median of a set of numbers. + + This aggregate function returns the median value of the expression for the given + aggregate function. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by`` and ``null_treatment``. + + Args: + expression: The value to compute the median of + distinct: If True, a single entry for each distinct value will be in the result + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.median(expression.expr, distinct=distinct, filter=filter_raw)) + + +def min(expression: Expr, filter: Expr | None = None) -> Expr: + """Aggregate function that returns the minimum value of the argument. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: The value to find the minimum of + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.min(expression.expr, filter=filter_raw)) + + +def sum( + expression: Expr, + filter: Expr | None = None, +) -> Expr: + """Computes the sum of a set of numbers. + + This aggregate function expects a numeric expression. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: Values to combine into an array + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.sum(expression.expr, filter=filter_raw)) + + +def stddev(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the standard deviation of the argument. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: The value to find the minimum of + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.stddev(expression.expr, filter=filter_raw)) + + +def stddev_pop(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the population standard deviation of the argument. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: The value to find the minimum of + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.stddev_pop(expression.expr, filter=filter_raw)) + + +def stddev_samp(arg: Expr, filter: Expr | None = None) -> Expr: + """Computes the sample standard deviation of the argument. + + This is an alias for :py:func:`stddev`. + """ + return stddev(arg, filter=filter) + + +def var(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the sample variance of the argument. + + This is an alias for :py:func:`var_samp`. + """ + return var_samp(expression, filter) + + +def var_pop(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the population variance of the argument. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: The variable to compute the variance for + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.var_pop(expression.expr, filter=filter_raw)) + + +def var_samp(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the sample variance of the argument. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: The variable to compute the variance for + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.var_sample(expression.expr, filter=filter_raw)) + + +def var_sample(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the sample variance of the argument. + + This is an alias for :py:func:`var_samp`. + """ + return var_samp(expression, filter) + + +def regr_avgx( + y: Expr, + x: Expr, + filter: Expr | None = None, +) -> Expr: + """Computes the average of the independent variable ``x``. + + This is a linear regression aggregate function. Only non-null pairs of the inputs + are evaluated. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + y: The linear regression dependent variable + x: The linear regression independent variable + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.regr_avgx(y.expr, x.expr, filter=filter_raw)) + + +def regr_avgy( + y: Expr, + x: Expr, + filter: Expr | None = None, +) -> Expr: + """Computes the average of the dependent variable ``y``. + + This is a linear regression aggregate function. Only non-null pairs of the inputs + are evaluated. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + y: The linear regression dependent variable + x: The linear regression independent variable + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.regr_avgy(y.expr, x.expr, filter=filter_raw)) + + +def regr_count( + y: Expr, + x: Expr, + filter: Expr | None = None, +) -> Expr: + """Counts the number of rows in which both expressions are not null. + + This is a linear regression aggregate function. Only non-null pairs of the inputs + are evaluated. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + y: The linear regression dependent variable + x: The linear regression independent variable + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.regr_count(y.expr, x.expr, filter=filter_raw)) + + +def regr_intercept( + y: Expr, + x: Expr, + filter: Expr | None = None, +) -> Expr: + """Computes the intercept from the linear regression. + + This is a linear regression aggregate function. Only non-null pairs of the inputs + are evaluated. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + y: The linear regression dependent variable + x: The linear regression independent variable + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.regr_intercept(y.expr, x.expr, filter=filter_raw)) + + +def regr_r2( + y: Expr, + x: Expr, + filter: Expr | None = None, +) -> Expr: + """Computes the R-squared value from linear regression. + + This is a linear regression aggregate function. Only non-null pairs of the inputs + are evaluated. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + y: The linear regression dependent variable + x: The linear regression independent variable + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.regr_r2(y.expr, x.expr, filter=filter_raw)) + + +def regr_slope( + y: Expr, + x: Expr, + filter: Expr | None = None, +) -> Expr: + """Computes the slope from linear regression. + + This is a linear regression aggregate function. Only non-null pairs of the inputs + are evaluated. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + y: The linear regression dependent variable + x: The linear regression independent variable + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.regr_slope(y.expr, x.expr, filter=filter_raw)) + + +def regr_sxx( + y: Expr, + x: Expr, + filter: Expr | None = None, +) -> Expr: + """Computes the sum of squares of the independent variable ``x``. + + This is a linear regression aggregate function. Only non-null pairs of the inputs + are evaluated. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + y: The linear regression dependent variable + x: The linear regression independent variable + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.regr_sxx(y.expr, x.expr, filter=filter_raw)) + + +def regr_sxy( + y: Expr, + x: Expr, + filter: Expr | None = None, +) -> Expr: + """Computes the sum of products of pairs of numbers. + + This is a linear regression aggregate function. Only non-null pairs of the inputs + are evaluated. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + y: The linear regression dependent variable + x: The linear regression independent variable + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.regr_sxy(y.expr, x.expr, filter=filter_raw)) + + +def regr_syy( + y: Expr, + x: Expr, + filter: Expr | None = None, +) -> Expr: + """Computes the sum of squares of the dependent variable ``y``. + + This is a linear regression aggregate function. Only non-null pairs of the inputs + are evaluated. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + y: The linear regression dependent variable + x: The linear regression independent variable + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + + return Expr(f.regr_syy(y.expr, x.expr, filter=filter_raw)) + + +def first_value( + expression: Expr, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, + null_treatment: NullTreatment = NullTreatment.RESPECT_NULLS, +) -> Expr: + """Returns the first value in a group of values. + + This aggregate function will return the first value in the partition. + + If using the builder functions described in ref:`_aggregation` this function ignores + the option ``distinct``. + + Args: + expression: Argument to perform bitwise calculation on + filter: If provided, only compute against rows for which the filter is True + order_by: Set the ordering of the expression to evaluate. Accepts + column names or expressions. + null_treatment: Assign whether to respect or ignore null values. + + For example:: + + df.aggregate([], first_value(col("a"), order_by="ts")) + """ + order_by_raw = sort_list_to_raw_sort_list(order_by) + filter_raw = filter.expr if filter is not None else None + + return Expr( + f.first_value( + expression.expr, + filter=filter_raw, + order_by=order_by_raw, + null_treatment=null_treatment.value, + ) + ) + + +def last_value( + expression: Expr, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, + null_treatment: NullTreatment = NullTreatment.RESPECT_NULLS, +) -> Expr: + """Returns the last value in a group of values. + + This aggregate function will return the last value in the partition. + + If using the builder functions described in ref:`_aggregation` this function ignores + the option ``distinct``. + + Args: + expression: Argument to perform bitwise calculation on + filter: If provided, only compute against rows for which the filter is True + order_by: Set the ordering of the expression to evaluate. Accepts + column names or expressions. + null_treatment: Assign whether to respect or ignore null values. + + For example:: + + df.aggregate([], last_value(col("a"), order_by="ts")) + """ + order_by_raw = sort_list_to_raw_sort_list(order_by) + filter_raw = filter.expr if filter is not None else None + + return Expr( + f.last_value( + expression.expr, + filter=filter_raw, + order_by=order_by_raw, + null_treatment=null_treatment.value, + ) + ) + + +def nth_value( + expression: Expr, + n: int, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, + null_treatment: NullTreatment = NullTreatment.RESPECT_NULLS, +) -> Expr: + """Returns the n-th value in a group of values. + + This aggregate function will return the n-th value in the partition. + + If using the builder functions described in ref:`_aggregation` this function ignores + the option ``distinct``. + + Args: + expression: Argument to perform bitwise calculation on + n: Index of value to return. Starts at 1. + filter: If provided, only compute against rows for which the filter is True + order_by: Set the ordering of the expression to evaluate. Accepts + column names or expressions. + null_treatment: Assign whether to respect or ignore null values. + + For example:: + + df.aggregate([], nth_value(col("a"), 2, order_by="ts")) + """ + order_by_raw = sort_list_to_raw_sort_list(order_by) + filter_raw = filter.expr if filter is not None else None + + return Expr( + f.nth_value( + expression.expr, + n, + filter=filter_raw, + order_by=order_by_raw, + null_treatment=null_treatment.value, + ) + ) + + +def bit_and(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the bitwise AND of the argument. + + This aggregate function will bitwise compare every value in the input partition. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: Argument to perform bitwise calculation on + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.bit_and(expression.expr, filter=filter_raw)) + + +def bit_or(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the bitwise OR of the argument. + + This aggregate function will bitwise compare every value in the input partition. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: Argument to perform bitwise calculation on + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.bit_or(expression.expr, filter=filter_raw)) + + +def bit_xor( + expression: Expr, distinct: bool = False, filter: Expr | None = None +) -> Expr: + """Computes the bitwise XOR of the argument. + + This aggregate function will bitwise compare every value in the input partition. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by`` and ``null_treatment``. + + Args: + expression: Argument to perform bitwise calculation on + distinct: If True, evaluate each unique value of expression only once + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.bit_xor(expression.expr, distinct=distinct, filter=filter_raw)) + + +def bool_and(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the boolean AND of the argument. + + This aggregate function will compare every value in the input partition. These are + expected to be boolean values. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: Argument to perform calculation on + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.bool_and(expression.expr, filter=filter_raw)) + + +def bool_or(expression: Expr, filter: Expr | None = None) -> Expr: + """Computes the boolean OR of the argument. + + This aggregate function will compare every value in the input partition. These are + expected to be boolean values. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``order_by``, ``null_treatment``, and ``distinct``. + + Args: + expression: Argument to perform calculation on + filter: If provided, only compute against rows for which the filter is True + """ + filter_raw = filter.expr if filter is not None else None + return Expr(f.bool_or(expression.expr, filter=filter_raw)) + + +def lead( + arg: Expr, + shift_offset: int = 1, + default_value: Any | None = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Create a lead window function. + + Lead operation will return the argument that is in the next shift_offset-th row in + the partition. For example ``lead(col("b"), shift_offset=3, default_value=5)`` will + return the 3rd following value in column ``b``. At the end of the partition, where + no further values can be returned it will return the default value of 5. + + Here is an example of both the ``lead`` and :py:func:`datafusion.functions.lag` + functions on a simple DataFrame:: + + +--------+------+-----+ + | points | lead | lag | + +--------+------+-----+ + | 100 | 100 | | + | 100 | 50 | 100 | + | 50 | 25 | 100 | + | 25 | | 50 | + +--------+------+-----+ + + To set window function parameters use the window builder approach described in the + ref:`_window_functions` online documentation. + + Args: + arg: Value to return + shift_offset: Number of rows following the current row. + default_value: Value to return if shift_offet row does not exist. + partition_by: Expressions to partition the window frame on. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + lead(col("b"), order_by="ts") + """ + if not isinstance(default_value, pa.Scalar) and default_value is not None: + default_value = pa.scalar(default_value) + + partition_by_raw = expr_list_to_raw_expr_list(partition_by) + order_by_raw = sort_list_to_raw_sort_list(order_by) + + return Expr( + f.lead( + arg.expr, + shift_offset, + default_value, + partition_by=partition_by_raw, + order_by=order_by_raw, + ) + ) + + +def lag( + arg: Expr, + shift_offset: int = 1, + default_value: Any | None = None, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Create a lag window function. + + Lag operation will return the argument that is in the previous shift_offset-th row + in the partition. For example ``lag(col("b"), shift_offset=3, default_value=5)`` + will return the 3rd previous value in column ``b``. At the beginning of the + partition, where no values can be returned it will return the default value of 5. + + Here is an example of both the ``lag`` and :py:func:`datafusion.functions.lead` + functions on a simple DataFrame:: + + +--------+------+-----+ + | points | lead | lag | + +--------+------+-----+ + | 100 | 100 | | + | 100 | 50 | 100 | + | 50 | 25 | 100 | + | 25 | | 50 | + +--------+------+-----+ + + Args: + arg: Value to return + shift_offset: Number of rows before the current row. + default_value: Value to return if shift_offet row does not exist. + partition_by: Expressions to partition the window frame on. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + lag(col("b"), order_by="ts") + """ + if not isinstance(default_value, pa.Scalar): + default_value = pa.scalar(default_value) + + partition_by_raw = expr_list_to_raw_expr_list(partition_by) + order_by_raw = sort_list_to_raw_sort_list(order_by) + + return Expr( + f.lag( + arg.expr, + shift_offset, + default_value, + partition_by=partition_by_raw, + order_by=order_by_raw, + ) + ) + + +def row_number( + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Create a row number window function. + + Returns the row number of the window function. + + Here is an example of the ``row_number`` on a simple DataFrame:: + + +--------+------------+ + | points | row number | + +--------+------------+ + | 100 | 1 | + | 100 | 2 | + | 50 | 3 | + | 25 | 4 | + +--------+------------+ + + Args: + partition_by: Expressions to partition the window frame on. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + row_number(order_by="points") + """ + partition_by_raw = expr_list_to_raw_expr_list(partition_by) + order_by_raw = sort_list_to_raw_sort_list(order_by) + + return Expr( + f.row_number( + partition_by=partition_by_raw, + order_by=order_by_raw, + ) + ) + + +def rank( + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Create a rank window function. + + Returns the rank based upon the window order. Consecutive equal values will receive + the same rank, but the next different value will not be consecutive but rather the + number of rows that precede it plus one. This is similar to Olympic medals. If two + people tie for gold, the next place is bronze. There would be no silver medal. Here + is an example of a dataframe with a window ordered by descending ``points`` and the + associated rank. + + You should set ``order_by`` to produce meaningful results:: + + +--------+------+ + | points | rank | + +--------+------+ + | 100 | 1 | + | 100 | 1 | + | 50 | 3 | + | 25 | 4 | + +--------+------+ + + Args: + partition_by: Expressions to partition the window frame on. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + rank(order_by="points") + """ + partition_by_raw = expr_list_to_raw_expr_list(partition_by) + order_by_raw = sort_list_to_raw_sort_list(order_by) + + return Expr( + f.rank( + partition_by=partition_by_raw, + order_by=order_by_raw, + ) + ) + + +def dense_rank( + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Create a dense_rank window function. + + This window function is similar to :py:func:`rank` except that the returned values + will be consecutive. Here is an example of a dataframe with a window ordered by + descending ``points`` and the associated dense rank:: + + +--------+------------+ + | points | dense_rank | + +--------+------------+ + | 100 | 1 | + | 100 | 1 | + | 50 | 2 | + | 25 | 3 | + +--------+------------+ + + Args: + partition_by: Expressions to partition the window frame on. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + dense_rank(order_by="points") + """ + partition_by_raw = expr_list_to_raw_expr_list(partition_by) + order_by_raw = sort_list_to_raw_sort_list(order_by) + + return Expr( + f.dense_rank( + partition_by=partition_by_raw, + order_by=order_by_raw, + ) + ) + + +def percent_rank( + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Create a percent_rank window function. + + This window function is similar to :py:func:`rank` except that the returned values + are the percentage from 0.0 to 1.0 from first to last. Here is an example of a + dataframe with a window ordered by descending ``points`` and the associated percent + rank:: + + +--------+--------------+ + | points | percent_rank | + +--------+--------------+ + | 100 | 0.0 | + | 100 | 0.0 | + | 50 | 0.666667 | + | 25 | 1.0 | + +--------+--------------+ + + Args: + partition_by: Expressions to partition the window frame on. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + percent_rank(order_by="points") + """ + partition_by_raw = expr_list_to_raw_expr_list(partition_by) + order_by_raw = sort_list_to_raw_sort_list(order_by) + + return Expr( + f.percent_rank( + partition_by=partition_by_raw, + order_by=order_by_raw, + ) + ) + + +def cume_dist( + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Create a cumulative distribution window function. + + This window function is similar to :py:func:`rank` except that the returned values + are the ratio of the row number to the total number of rows. Here is an example of a + dataframe with a window ordered by descending ``points`` and the associated + cumulative distribution:: + + +--------+-----------+ + | points | cume_dist | + +--------+-----------+ + | 100 | 0.5 | + | 100 | 0.5 | + | 50 | 0.75 | + | 25 | 1.0 | + +--------+-----------+ + + Args: + partition_by: Expressions to partition the window frame on. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + cume_dist(order_by="points") + """ + partition_by_raw = expr_list_to_raw_expr_list(partition_by) + order_by_raw = sort_list_to_raw_sort_list(order_by) + + return Expr( + f.cume_dist( + partition_by=partition_by_raw, + order_by=order_by_raw, + ) + ) + + +def ntile( + groups: int, + partition_by: list[Expr] | Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Create a n-tile window function. + + This window function orders the window frame into a give number of groups based on + the ordering criteria. It then returns which group the current row is assigned to. + Here is an example of a dataframe with a window ordered by descending ``points`` + and the associated n-tile function:: + + +--------+-------+ + | points | ntile | + +--------+-------+ + | 120 | 1 | + | 100 | 1 | + | 80 | 2 | + | 60 | 2 | + | 40 | 3 | + | 20 | 3 | + +--------+-------+ + + Args: + groups: Number of groups for the n-tile to be divided into. + partition_by: Expressions to partition the window frame on. + order_by: Set ordering within the window frame. Accepts + column names or expressions. + + For example:: + + ntile(3, order_by="points") + """ + partition_by_raw = expr_list_to_raw_expr_list(partition_by) + order_by_raw = sort_list_to_raw_sort_list(order_by) + + return Expr( + f.ntile( + Expr.literal(groups).expr, + partition_by=partition_by_raw, + order_by=order_by_raw, + ) + ) + + +def string_agg( + expression: Expr, + delimiter: str, + filter: Expr | None = None, + order_by: list[SortKey] | SortKey | None = None, +) -> Expr: + """Concatenates the input strings. + + This aggregate function will concatenate input strings, ignoring null values, and + separating them with the specified delimiter. Non-string values will be converted to + their string equivalents. + + If using the builder functions described in ref:`_aggregation` this function ignores + the options ``distinct`` and ``null_treatment``. + + Args: + expression: Argument to perform bitwise calculation on + delimiter: Text to place between each value of expression + filter: If provided, only compute against rows for which the filter is True + order_by: Set the ordering of the expression to evaluate. Accepts + column names or expressions. + + For example:: + + df.aggregate([], string_agg(col("a"), ",", order_by="b")) + """ + order_by_raw = sort_list_to_raw_sort_list(order_by) + filter_raw = filter.expr if filter is not None else None + + return Expr( + f.string_agg( + expression.expr, + delimiter, + filter=filter_raw, + order_by=order_by_raw, + ) + ) diff --git a/python/datafusion/html_formatter.py b/python/datafusion/html_formatter.py new file mode 100644 index 000000000..65eb1f042 --- /dev/null +++ b/python/datafusion/html_formatter.py @@ -0,0 +1,29 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Deprecated module for dataframe formatting.""" + +import warnings + +from datafusion.dataframe_formatter import * # noqa: F403 + +warnings.warn( + "The module 'html_formatter' is deprecated and will be removed in the next release." + "Please use 'dataframe_formatter' instead.", + DeprecationWarning, + stacklevel=3, +) diff --git a/python/datafusion/input/__init__.py b/python/datafusion/input/__init__.py index 27e39b8ca..f0c1f42b4 100644 --- a/python/datafusion/input/__init__.py +++ b/python/datafusion/input/__init__.py @@ -15,8 +15,13 @@ # specific language governing permissions and limitations # under the License. +"""This package provides for input sources. + +The primary class used within DataFusion is ``LocationInputPlugin``. +""" + from .location import LocationInputPlugin __all__ = [ - LocationInputPlugin, + "LocationInputPlugin", ] diff --git a/python/datafusion/input/base.py b/python/datafusion/input/base.py index efcaf7697..f67dde2a1 100644 --- a/python/datafusion/input/base.py +++ b/python/datafusion/input/base.py @@ -15,6 +15,11 @@ # specific language governing permissions and limitations # under the License. +"""This module provides ``BaseInputSource``. + +A user can extend this to provide a custom input source. +""" + from abc import ABC, abstractmethod from typing import Any @@ -22,18 +27,20 @@ class BaseInputSource(ABC): - """ - If a consuming library would like to provider their own InputSource - this is the class they should extend to write their own. Once - completed the Plugin InputSource can be registered with the + """Base Input Source class. + + If a consuming library would like to provider their own InputSource this is + the class they should extend to write their own. + + Once completed the Plugin InputSource can be registered with the SessionContext to ensure that it will be used in order to obtain the SqlTable information from the custom datasource. """ @abstractmethod - def is_correct_input(self, input_item: Any, table_name: str, **kwargs) -> bool: - pass + def is_correct_input(self, input_item: Any, table_name: str, **kwargs: Any) -> bool: + """Returns `True` if the input is valid.""" @abstractmethod - def build_table(self, input_item: Any, table_name: str, **kwarg) -> SqlTable: - pass + def build_table(self, input_item: Any, table_name: str, **kwarg: Any) -> SqlTable: # type: ignore[invalid-type-form] + """Create a table from the input source.""" diff --git a/python/datafusion/input/location.py b/python/datafusion/input/location.py index 16e632d1b..b804ac18b 100644 --- a/python/datafusion/input/location.py +++ b/python/datafusion/input/location.py @@ -15,8 +15,9 @@ # specific language governing permissions and limitations # under the License. -import os -import glob +"""The default input source for DataFusion.""" + +from pathlib import Path from typing import Any from datafusion.common import DataTypeMap, SqlTable @@ -24,62 +25,65 @@ class LocationInputPlugin(BaseInputSource): - """ - Input Plugin for everything, which can be read - in from a file (on disk, remote etc.) + """Input Plugin for everything. + + This can be read in from a file (on disk, remote etc.). """ - def is_correct_input(self, input_item: Any, table_name: str, **kwargs): + def is_correct_input(self, input_item: Any, table_name: str, **kwargs: Any) -> bool: # noqa: ARG002 + """Returns `True` if the input is valid.""" return isinstance(input_item, str) def build_table( self, - input_file: str, + input_item: str, table_name: str, - **kwargs, - ) -> SqlTable: - _, extension = os.path.splitext(input_file) - format = extension.lstrip(".").lower() + **kwargs: Any, # noqa: ARG002 + ) -> SqlTable: # type: ignore[invalid-type-form] + """Create a table from the input source.""" + extension = Path(input_item).suffix + file_format = extension.lstrip(".").lower() num_rows = 0 # Total number of rows in the file. Used for statistics columns = [] - if format == "parquet": + if file_format == "parquet": import pyarrow.parquet as pq # Read the Parquet metadata - metadata = pq.read_metadata(input_file) + metadata = pq.read_metadata(input_item) num_rows = metadata.num_rows # Iterate through the schema and build the SqlTable - for col in metadata.schema: - columns.append( - ( - col.name, - DataTypeMap.from_parquet_type_str(col.physical_type), - ) + columns = [ + ( + col.name, + DataTypeMap.from_parquet_type_str(col.physical_type), ) + for col in metadata.schema + ] + elif format == "csv": import csv # Consume header row and count number of rows for statistics. # TODO: Possibly makes sense to have the eager number of rows # calculated as a configuration since you must read the entire file - # to get that information. However, this should only be occuring + # to get that information. However, this should only be occurring # at table creation time and therefore shouldn't # slow down query performance. - with open(input_file, "r") as file: + with Path(input_item).open() as file: reader = csv.reader(file) - header_row = next(reader) - print(header_row) + _header_row = next(reader) for _ in reader: num_rows += 1 - # TODO: Need to actually consume this row into resonable columns - raise RuntimeError("TODO: Currently unable to support CSV input files.") + # TODO: Need to actually consume this row into reasonable columns + msg = "TODO: Currently unable to support CSV input files." + raise RuntimeError(msg) else: - raise RuntimeError( - f"Input of format: `{format}` is currently not supported.\ + msg = f"Input of format: `{format}` is currently not supported.\ Only Parquet and CSV." - ) + raise RuntimeError(msg) # Input could possibly be multiple files. Create a list if so - input_files = glob.glob(input_file) + input_path = Path(input_item) + input_files = [str(p) for p in input_path.parent.glob(input_path.name)] return SqlTable(table_name, columns, num_rows, input_files) diff --git a/python/datafusion/io.py b/python/datafusion/io.py new file mode 100644 index 000000000..4f9c3c516 --- /dev/null +++ b/python/datafusion/io.py @@ -0,0 +1,197 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""IO read functions using global context.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +from datafusion.context import SessionContext + +if TYPE_CHECKING: + import pathlib + + import pyarrow as pa + + from datafusion.dataframe import DataFrame + from datafusion.expr import Expr + + from .options import CsvReadOptions + + +def read_parquet( + path: str | pathlib.Path, + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + parquet_pruning: bool = True, + file_extension: str = ".parquet", + skip_metadata: bool = True, + schema: pa.Schema | None = None, + file_sort_order: list[list[Expr]] | None = None, +) -> DataFrame: + """Read a Parquet source into a :py:class:`~datafusion.dataframe.Dataframe`. + + This function will use the global context. Any functions or tables registered + with another context may not be accessible when used with a DataFrame created + using this function. + + Args: + path: Path to the Parquet file. + table_partition_cols: Partition columns. + parquet_pruning: Whether the parquet reader should use the predicate + to prune row groups. + file_extension: File extension; only files with this extension are + selected for data input. + skip_metadata: Whether the parquet reader should skip any metadata + that may be in the file schema. This can help avoid schema + conflicts due to metadata. + schema: An optional schema representing the parquet files. If None, + the parquet reader will try to infer it based on data in the + file. + file_sort_order: Sort order for the file. + + Returns: + DataFrame representation of the read Parquet files + """ + if table_partition_cols is None: + table_partition_cols = [] + return SessionContext.global_ctx().read_parquet( + str(path), + table_partition_cols, + parquet_pruning, + file_extension, + skip_metadata, + schema, + file_sort_order, + ) + + +def read_json( + path: str | pathlib.Path, + schema: pa.Schema | None = None, + schema_infer_max_records: int = 1000, + file_extension: str = ".json", + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + file_compression_type: str | None = None, +) -> DataFrame: + """Read a line-delimited JSON data source. + + This function will use the global context. Any functions or tables registered + with another context may not be accessible when used with a DataFrame created + using this function. + + Args: + path: Path to the JSON file. + schema: The data source schema. + schema_infer_max_records: Maximum number of rows to read from JSON + files for schema inference if needed. + file_extension: File extension; only files with this extension are + selected for data input. + table_partition_cols: Partition columns. + file_compression_type: File compression type. + + Returns: + DataFrame representation of the read JSON files. + """ + if table_partition_cols is None: + table_partition_cols = [] + return SessionContext.global_ctx().read_json( + str(path), + schema, + schema_infer_max_records, + file_extension, + table_partition_cols, + file_compression_type, + ) + + +def read_csv( + path: str | pathlib.Path | list[str] | list[pathlib.Path], + schema: pa.Schema | None = None, + has_header: bool = True, + delimiter: str = ",", + schema_infer_max_records: int = 1000, + file_extension: str = ".csv", + table_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + file_compression_type: str | None = None, + options: CsvReadOptions | None = None, +) -> DataFrame: + """Read a CSV data source. + + This function will use the global context. Any functions or tables registered + with another context may not be accessible when used with a DataFrame created + using this function. + + Args: + path: Path to the CSV file + schema: An optional schema representing the CSV files. If None, the + CSV reader will try to infer it based on data in file. + has_header: Whether the CSV file have a header. If schema inference + is run on a file with no headers, default column names are + created. + delimiter: An optional column delimiter. + schema_infer_max_records: Maximum number of rows to read from CSV + files for schema inference if needed. + file_extension: File extension; only files with this extension are + selected for data input. + table_partition_cols: Partition columns. + file_compression_type: File compression type. + options: Set advanced options for CSV reading. This cannot be + combined with any of the other options in this method. + + Returns: + DataFrame representation of the read CSV files + """ + return SessionContext.global_ctx().read_csv( + path, + schema, + has_header, + delimiter, + schema_infer_max_records, + file_extension, + table_partition_cols, + file_compression_type, + options, + ) + + +def read_avro( + path: str | pathlib.Path, + schema: pa.Schema | None = None, + file_partition_cols: list[tuple[str, str | pa.DataType]] | None = None, + file_extension: str = ".avro", +) -> DataFrame: + """Create a :py:class:`DataFrame` for reading Avro data source. + + This function will use the global context. Any functions or tables registered + with another context may not be accessible when used with a DataFrame created + using this function. + + Args: + path: Path to the Avro file. + schema: The data source schema. + file_partition_cols: Partition columns. + file_extension: File extension to select. + + Returns: + DataFrame representation of the read Avro file + """ + if file_partition_cols is None: + file_partition_cols = [] + return SessionContext.global_ctx().read_avro( + str(path), schema, file_partition_cols, file_extension + ) diff --git a/python/datafusion/object_store.py b/python/datafusion/object_store.py index 70ecbd2bb..6298526f5 100644 --- a/python/datafusion/object_store.py +++ b/python/datafusion/object_store.py @@ -14,10 +14,14 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. - +"""Object store functionality.""" from ._internal import object_store +AmazonS3 = object_store.AmazonS3 +GoogleCloud = object_store.GoogleCloud +LocalFileSystem = object_store.LocalFileSystem +MicrosoftAzure = object_store.MicrosoftAzure +Http = object_store.Http -def __getattr__(name): - return getattr(object_store, name) +__all__ = ["AmazonS3", "GoogleCloud", "Http", "LocalFileSystem", "MicrosoftAzure"] diff --git a/python/datafusion/options.py b/python/datafusion/options.py new file mode 100644 index 000000000..ec19f37d0 --- /dev/null +++ b/python/datafusion/options.py @@ -0,0 +1,284 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Options for reading various file formats.""" + +from __future__ import annotations + +import warnings +from typing import TYPE_CHECKING + +import pyarrow as pa + +from datafusion.expr import sort_list_to_raw_sort_list + +if TYPE_CHECKING: + from datafusion.expr import SortExpr + +from ._internal import options + +__all__ = ["CsvReadOptions"] + +DEFAULT_MAX_INFER_SCHEMA = 1000 + + +class CsvReadOptions: + """Options for reading CSV files. + + This class provides a builder pattern for configuring CSV reading options. + All methods starting with ``with_`` return ``self`` to allow method chaining. + """ + + def __init__( + self, + *, + has_header: bool = True, + delimiter: str = ",", + quote: str = '"', + terminator: str | None = None, + escape: str | None = None, + comment: str | None = None, + newlines_in_values: bool = False, + schema: pa.Schema | None = None, + schema_infer_max_records: int = DEFAULT_MAX_INFER_SCHEMA, + file_extension: str = ".csv", + table_partition_cols: list[tuple[str, pa.DataType]] | None = None, + file_compression_type: str = "", + file_sort_order: list[list[SortExpr]] | None = None, + null_regex: str | None = None, + truncated_rows: bool = False, + ) -> None: + """Initialize CsvReadOptions. + + Args: + has_header: Does the CSV file have a header row? If schema inference + is run on a file with no headers, default column names are created. + delimiter: Column delimiter character. Must be a single ASCII character. + quote: Quote character for fields containing delimiters or newlines. + Must be a single ASCII character. + terminator: Optional line terminator character. If ``None``, uses CRLF. + Must be a single ASCII character. + escape: Optional escape character for quotes. Must be a single ASCII + character. + comment: If specified, lines beginning with this character are ignored. + Must be a single ASCII character. + newlines_in_values: Whether newlines in quoted values are supported. + Parsing newlines in quoted values may be affected by execution + behavior such as parallel file scanning. Setting this to ``True`` + ensures that newlines in values are parsed successfully, which may + reduce performance. + schema: Optional PyArrow schema representing the CSV files. If ``None``, + the CSV reader will try to infer it based on data in the file. + schema_infer_max_records: Maximum number of rows to read from CSV files + for schema inference if needed. + file_extension: File extension; only files with this extension are + selected for data input. + table_partition_cols: Partition columns as a list of tuples of + (column_name, data_type). + file_compression_type: File compression type. Supported values are + ``"gzip"``, ``"bz2"``, ``"xz"``, ``"zstd"``, or empty string for + uncompressed. + file_sort_order: Optional sort order of the files as a list of sort + expressions per file. + null_regex: Optional regex pattern to match null values in the CSV. + truncated_rows: Whether to allow truncated rows when parsing. By default + this is ``False`` and will error if the CSV rows have different + lengths. When set to ``True``, it will allow records with less than + the expected number of columns and fill the missing columns with + nulls. If the record's schema is not nullable, it will still return + an error. + """ + validate_single_character("delimiter", delimiter) + validate_single_character("quote", quote) + validate_single_character("terminator", terminator) + validate_single_character("escape", escape) + validate_single_character("comment", comment) + + self.has_header = has_header + self.delimiter = delimiter + self.quote = quote + self.terminator = terminator + self.escape = escape + self.comment = comment + self.newlines_in_values = newlines_in_values + self.schema = schema + self.schema_infer_max_records = schema_infer_max_records + self.file_extension = file_extension + self.table_partition_cols = table_partition_cols or [] + self.file_compression_type = file_compression_type + self.file_sort_order = file_sort_order or [] + self.null_regex = null_regex + self.truncated_rows = truncated_rows + + def with_has_header(self, has_header: bool) -> CsvReadOptions: + """Configure whether the CSV has a header row.""" + self.has_header = has_header + return self + + def with_delimiter(self, delimiter: str) -> CsvReadOptions: + """Configure the column delimiter.""" + self.delimiter = delimiter + return self + + def with_quote(self, quote: str) -> CsvReadOptions: + """Configure the quote character.""" + self.quote = quote + return self + + def with_terminator(self, terminator: str | None) -> CsvReadOptions: + """Configure the line terminator character.""" + self.terminator = terminator + return self + + def with_escape(self, escape: str | None) -> CsvReadOptions: + """Configure the escape character.""" + self.escape = escape + return self + + def with_comment(self, comment: str | None) -> CsvReadOptions: + """Configure the comment character.""" + self.comment = comment + return self + + def with_newlines_in_values(self, newlines_in_values: bool) -> CsvReadOptions: + """Configure whether newlines in values are supported.""" + self.newlines_in_values = newlines_in_values + return self + + def with_schema(self, schema: pa.Schema | None) -> CsvReadOptions: + """Configure the schema.""" + self.schema = schema + return self + + def with_schema_infer_max_records( + self, schema_infer_max_records: int + ) -> CsvReadOptions: + """Configure maximum records for schema inference.""" + self.schema_infer_max_records = schema_infer_max_records + return self + + def with_file_extension(self, file_extension: str) -> CsvReadOptions: + """Configure the file extension filter.""" + self.file_extension = file_extension + return self + + def with_table_partition_cols( + self, table_partition_cols: list[tuple[str, pa.DataType]] + ) -> CsvReadOptions: + """Configure table partition columns.""" + self.table_partition_cols = table_partition_cols + return self + + def with_file_compression_type(self, file_compression_type: str) -> CsvReadOptions: + """Configure file compression type.""" + self.file_compression_type = file_compression_type + return self + + def with_file_sort_order( + self, file_sort_order: list[list[SortExpr]] + ) -> CsvReadOptions: + """Configure file sort order.""" + self.file_sort_order = file_sort_order + return self + + def with_null_regex(self, null_regex: str | None) -> CsvReadOptions: + """Configure null value regex pattern.""" + self.null_regex = null_regex + return self + + def with_truncated_rows(self, truncated_rows: bool) -> CsvReadOptions: + """Configure whether to allow truncated rows.""" + self.truncated_rows = truncated_rows + return self + + def to_inner(self) -> options.CsvReadOptions: + """Convert this object into the underlying Rust structure. + + This is intended for internal use only. + """ + file_sort_order = ( + [] + if self.file_sort_order is None + else [ + sort_list_to_raw_sort_list(sort_list) + for sort_list in self.file_sort_order + ] + ) + + return options.CsvReadOptions( + has_header=self.has_header, + delimiter=ord(self.delimiter[0]) if self.delimiter else ord(","), + quote=ord(self.quote[0]) if self.quote else ord('"'), + terminator=ord(self.terminator[0]) if self.terminator else None, + escape=ord(self.escape[0]) if self.escape else None, + comment=ord(self.comment[0]) if self.comment else None, + newlines_in_values=self.newlines_in_values, + schema=self.schema, + schema_infer_max_records=self.schema_infer_max_records, + file_extension=self.file_extension, + table_partition_cols=_convert_table_partition_cols( + self.table_partition_cols + ), + file_compression_type=self.file_compression_type or "", + file_sort_order=file_sort_order, + null_regex=self.null_regex, + truncated_rows=self.truncated_rows, + ) + + +def validate_single_character(name: str, value: str | None) -> None: + if value is not None and len(value) != 1: + message = f"{name} must be a single character" + raise ValueError(message) + + +def _convert_table_partition_cols( + table_partition_cols: list[tuple[str, str | pa.DataType]], +) -> list[tuple[str, pa.DataType]]: + warn = False + converted_table_partition_cols = [] + + for col, data_type in table_partition_cols: + if isinstance(data_type, str): + warn = True + if data_type == "string": + converted_data_type = pa.string() + elif data_type == "int": + converted_data_type = pa.int32() + else: + message = ( + f"Unsupported literal data type '{data_type}' for partition " + "column. Supported types are 'string' and 'int'" + ) + raise ValueError(message) + else: + converted_data_type = data_type + + converted_table_partition_cols.append((col, converted_data_type)) + + if warn: + message = ( + "using literals for table_partition_cols data types is deprecated," + "use pyarrow types instead" + ) + warnings.warn( + message, + category=DeprecationWarning, + stacklevel=2, + ) + + return converted_table_partition_cols diff --git a/python/datafusion/plan.py b/python/datafusion/plan.py new file mode 100644 index 000000000..fb54fd624 --- /dev/null +++ b/python/datafusion/plan.py @@ -0,0 +1,153 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""This module supports physical and logical plans in DataFusion.""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any + +import datafusion._internal as df_internal + +if TYPE_CHECKING: + from datafusion.context import SessionContext + +__all__ = [ + "ExecutionPlan", + "LogicalPlan", +] + + +class LogicalPlan: + """Logical Plan. + + A `LogicalPlan` is a node in a tree of relational operators (such as + Projection or Filter). + + Represents transforming an input relation (table) to an output relation + (table) with a potentially different schema. Plans form a dataflow tree + where data flows from leaves up to the root to produce the query result. + + A `LogicalPlan` can be created by the SQL query planner, the DataFrame API, + or programmatically (for example custom query languages). + """ + + def __init__(self, plan: df_internal.LogicalPlan) -> None: + """This constructor should not be called by the end user.""" + self._raw_plan = plan + + def to_variant(self) -> Any: + """Convert the logical plan into its specific variant.""" + return self._raw_plan.to_variant() + + def inputs(self) -> list[LogicalPlan]: + """Returns the list of inputs to the logical plan.""" + return [LogicalPlan(p) for p in self._raw_plan.inputs()] + + def __repr__(self) -> str: + """Generate a printable representation of the plan.""" + return self._raw_plan.__repr__() + + def display(self) -> str: + """Print the logical plan.""" + return self._raw_plan.display() + + def display_indent(self) -> str: + """Print an indented form of the logical plan.""" + return self._raw_plan.display_indent() + + def display_indent_schema(self) -> str: + """Print an indented form of the schema for the logical plan.""" + return self._raw_plan.display_indent_schema() + + def display_graphviz(self) -> str: + """Print the graph visualization of the logical plan. + + Returns a `format`able structure that produces lines meant for graphical display + using the `DOT` language. This format can be visualized using software from + [`graphviz`](https://graphviz.org/) + """ + return self._raw_plan.display_graphviz() + + @staticmethod + def from_proto(ctx: SessionContext, data: bytes) -> LogicalPlan: + """Create a LogicalPlan from protobuf bytes. + + Tables created in memory from record batches are currently not supported. + """ + return LogicalPlan(df_internal.LogicalPlan.from_proto(ctx.ctx, data)) + + def to_proto(self) -> bytes: + """Convert a LogicalPlan to protobuf bytes. + + Tables created in memory from record batches are currently not supported. + """ + return self._raw_plan.to_proto() + + def __eq__(self, other: LogicalPlan) -> bool: + """Test equality.""" + if not isinstance(other, LogicalPlan): + return False + return self._raw_plan.__eq__(other._raw_plan) + + +class ExecutionPlan: + """Represent nodes in the DataFusion Physical Plan.""" + + def __init__(self, plan: df_internal.ExecutionPlan) -> None: + """This constructor should not be called by the end user.""" + self._raw_plan = plan + + def children(self) -> list[ExecutionPlan]: + """Get a list of children `ExecutionPlan` that act as inputs to this plan. + + The returned list will be empty for leaf nodes such as scans, will contain a + single value for unary nodes, or two values for binary nodes (such as joins). + """ + return [ExecutionPlan(e) for e in self._raw_plan.children()] + + def display(self) -> str: + """Print the physical plan.""" + return self._raw_plan.display() + + def display_indent(self) -> str: + """Print an indented form of the physical plan.""" + return self._raw_plan.display_indent() + + def __repr__(self) -> str: + """Print a string representation of the physical plan.""" + return self._raw_plan.__repr__() + + @property + def partition_count(self) -> int: + """Returns the number of partitions in the physical plan.""" + return self._raw_plan.partition_count + + @staticmethod + def from_proto(ctx: SessionContext, data: bytes) -> ExecutionPlan: + """Create an ExecutionPlan from protobuf bytes. + + Tables created in memory from record batches are currently not supported. + """ + return ExecutionPlan(df_internal.ExecutionPlan.from_proto(ctx.ctx, data)) + + def to_proto(self) -> bytes: + """Convert an ExecutionPlan into protobuf bytes. + + Tables created in memory from record batches are currently not supported. + """ + return self._raw_plan.to_proto() diff --git a/requirements.in b/python/datafusion/py.typed similarity index 86% rename from requirements.in rename to python/datafusion/py.typed index b2a1a48df..d216be4dd 100644 --- a/requirements.in +++ b/python/datafusion/py.typed @@ -13,13 +13,4 @@ # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations -# under the License. - -maturin>=1.5.1 -mypy -numpy -pyarrow>=11.0.0 -pytest -ruff -toml -importlib_metadata; python_version < "3.8" +# under the License. \ No newline at end of file diff --git a/python/datafusion/record_batch.py b/python/datafusion/record_batch.py new file mode 100644 index 000000000..c24cde0ac --- /dev/null +++ b/python/datafusion/record_batch.py @@ -0,0 +1,101 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""This module provides the classes for handling record batches. + +These are typically the result of dataframe +:py:func:`datafusion.dataframe.execute_stream` operations. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + import pyarrow as pa + import typing_extensions + + import datafusion._internal as df_internal + + +class RecordBatch: + """This class is essentially a wrapper for :py:class:`pa.RecordBatch`.""" + + def __init__(self, record_batch: df_internal.RecordBatch) -> None: + """This constructor is generally not called by the end user. + + See the :py:class:`RecordBatchStream` iterator for generating this class. + """ + self.record_batch = record_batch + + def to_pyarrow(self) -> pa.RecordBatch: + """Convert to :py:class:`pa.RecordBatch`.""" + return self.record_batch.to_pyarrow() + + def __arrow_c_array__( + self, requested_schema: object | None = None + ) -> tuple[object, object]: + """Export the record batch via the Arrow C Data Interface. + + This allows zero-copy interchange with libraries that support the + `Arrow PyCapsule interface `_. + + Args: + requested_schema: Attempt to provide the record batch using this + schema. Only straightforward projections such as column + selection or reordering are applied. + + Returns: + Two Arrow PyCapsule objects representing the ``ArrowArray`` and + ``ArrowSchema``. + """ + return self.record_batch.__arrow_c_array__(requested_schema) + + +class RecordBatchStream: + """This class represents a stream of record batches. + + These are typically the result of a + :py:func:`~datafusion.dataframe.DataFrame.execute_stream` operation. + """ + + def __init__(self, record_batch_stream: df_internal.RecordBatchStream) -> None: + """This constructor is typically not called by the end user.""" + self.rbs = record_batch_stream + + def next(self) -> RecordBatch: + """See :py:func:`__next__` for the iterator function.""" + return next(self) + + async def __anext__(self) -> RecordBatch: + """Return the next :py:class:`RecordBatch` in the stream asynchronously.""" + next_batch = await self.rbs.__anext__() + return RecordBatch(next_batch) + + def __next__(self) -> RecordBatch: + """Return the next :py:class:`RecordBatch` in the stream.""" + next_batch = next(self.rbs) + return RecordBatch(next_batch) + + def __aiter__(self) -> typing_extensions.Self: + """Return an asynchronous iterator over record batches.""" + return self + + def __iter__(self) -> typing_extensions.Self: + """Return an iterator over record batches.""" + return self diff --git a/python/datafusion/substrait.py b/python/datafusion/substrait.py index eff809a0c..3115238fa 100644 --- a/python/datafusion/substrait.py +++ b/python/datafusion/substrait.py @@ -15,9 +15,199 @@ # specific language governing permissions and limitations # under the License. +"""This module provides support for using substrait with datafusion. -from ._internal import substrait +For additional information about substrait, see https://substrait.io/ for more +information about substrait. +""" +from __future__ import annotations -def __getattr__(name): - return getattr(substrait, name) +from typing import TYPE_CHECKING + +try: + from warnings import deprecated # Python 3.13+ +except ImportError: + from typing_extensions import deprecated # Python 3.12 + +from datafusion.plan import LogicalPlan + +from ._internal import substrait as substrait_internal + +if TYPE_CHECKING: + import pathlib + + from datafusion.context import SessionContext + +__all__ = [ + "Consumer", + "Plan", + "Producer", + "Serde", +] + + +class Plan: + """A class representing an encodable substrait plan.""" + + def __init__(self, plan: substrait_internal.Plan) -> None: + """Create a substrait plan. + + The user should not have to call this constructor directly. Rather, it + should be created via :py:class:`Serde` or py:class:`Producer` classes + in this module. + """ + self.plan_internal = plan + + def encode(self) -> bytes: + """Encode the plan to bytes. + + Returns: + Encoded plan. + """ + return self.plan_internal.encode() + + def to_json(self) -> str: + """Get the JSON representation of the Substrait plan. + + Returns: + A JSON representation of the Substrait plan. + """ + return self.plan_internal.to_json() + + @staticmethod + def from_json(json: str) -> Plan: + """Parse a plan from a JSON string representation. + + Args: + json: JSON representation of a Substrait plan. + + Returns: + Plan object representing the Substrait plan. + """ + return Plan(substrait_internal.Plan.from_json(json)) + + +@deprecated("Use `Plan` instead.") +class plan(Plan): # noqa: N801 + """See `Plan`.""" + + +class Serde: + """Provides the ``Substrait`` serialization and deserialization.""" + + @staticmethod + def serialize(sql: str, ctx: SessionContext, path: str | pathlib.Path) -> None: + """Serialize a SQL query to a Substrait plan and write it to a file. + + Args: + sql:SQL query to serialize. + ctx: SessionContext to use. + path: Path to write the Substrait plan to. + """ + return substrait_internal.Serde.serialize(sql, ctx.ctx, str(path)) + + @staticmethod + def serialize_to_plan(sql: str, ctx: SessionContext) -> Plan: + """Serialize a SQL query to a Substrait plan. + + Args: + sql: SQL query to serialize. + ctx: SessionContext to use. + + Returns: + Substrait plan. + """ + return Plan(substrait_internal.Serde.serialize_to_plan(sql, ctx.ctx)) + + @staticmethod + def serialize_bytes(sql: str, ctx: SessionContext) -> bytes: + """Serialize a SQL query to a Substrait plan as bytes. + + Args: + sql: SQL query to serialize. + ctx: SessionContext to use. + + Returns: + Substrait plan as bytes. + """ + return substrait_internal.Serde.serialize_bytes(sql, ctx.ctx) + + @staticmethod + def deserialize(path: str | pathlib.Path) -> Plan: + """Deserialize a Substrait plan from a file. + + Args: + path: Path to read the Substrait plan from. + + Returns: + Substrait plan. + """ + return Plan(substrait_internal.Serde.deserialize(str(path))) + + @staticmethod + def deserialize_bytes(proto_bytes: bytes) -> Plan: + """Deserialize a Substrait plan from bytes. + + Args: + proto_bytes: Bytes to read the Substrait plan from. + + Returns: + Substrait plan. + """ + return Plan(substrait_internal.Serde.deserialize_bytes(proto_bytes)) + + +@deprecated("Use `Serde` instead.") +class serde(Serde): # noqa: N801 + """See `Serde` instead.""" + + +class Producer: + """Generates substrait plans from a logical plan.""" + + @staticmethod + def to_substrait_plan(logical_plan: LogicalPlan, ctx: SessionContext) -> Plan: + """Convert a DataFusion LogicalPlan to a Substrait plan. + + Args: + logical_plan: LogicalPlan to convert. + ctx: SessionContext to use. + + Returns: + Substrait plan. + """ + return Plan( + substrait_internal.Producer.to_substrait_plan( + logical_plan._raw_plan, ctx.ctx + ) + ) + + +@deprecated("Use `Producer` instead.") +class producer(Producer): # noqa: N801 + """Use `Producer` instead.""" + + +class Consumer: + """Generates a logical plan from a substrait plan.""" + + @staticmethod + def from_substrait_plan(ctx: SessionContext, plan: Plan) -> LogicalPlan: + """Convert a Substrait plan to a DataFusion LogicalPlan. + + Args: + ctx: SessionContext to use. + plan: Substrait plan to convert. + + Returns: + LogicalPlan. + """ + return LogicalPlan( + substrait_internal.Consumer.from_substrait_plan(ctx.ctx, plan.plan_internal) + ) + + +@deprecated("Use `Consumer` instead.") +class consumer(Consumer): # noqa: N801 + """Use `Consumer` instead.""" diff --git a/python/datafusion/tests/test_aggregation.py b/python/datafusion/tests/test_aggregation.py deleted file mode 100644 index 99a470b6b..000000000 --- a/python/datafusion/tests/test_aggregation.py +++ /dev/null @@ -1,143 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import numpy as np -import pyarrow as pa -import pytest - -from datafusion import SessionContext, column, lit -from datafusion import functions as f - - -@pytest.fixture -def df(): - ctx = SessionContext() - - # create a RecordBatch and a new DataFrame from it - batch = pa.RecordBatch.from_arrays( - [ - pa.array([1, 2, 3]), - pa.array([4, 4, 6]), - pa.array([9, 8, 5]), - pa.array([True, True, False]), - ], - names=["a", "b", "c", "d"], - ) - return ctx.create_dataframe([[batch]]) - - -def test_built_in_aggregation(df): - col_a = column("a") - col_b = column("b") - col_c = column("c") - - agg_df = df.aggregate( - [], - [ - f.approx_distinct(col_b), - f.approx_median(col_b), - f.approx_percentile_cont(col_b, lit(0.5)), - f.approx_percentile_cont_with_weight(col_b, lit(0.6), lit(0.5)), - f.array_agg(col_b), - f.avg(col_a), - f.corr(col_a, col_b), - f.count(col_a), - f.covar(col_a, col_b), - f.covar_pop(col_a, col_c), - f.covar_samp(col_b, col_c), - # f.grouping(col_a), # No physical plan implemented yet - f.max(col_a), - f.mean(col_b), - f.median(col_b), - f.min(col_a), - f.sum(col_b), - f.stddev(col_a), - f.stddev_pop(col_b), - f.stddev_samp(col_c), - f.var(col_a), - f.var_pop(col_b), - f.var_samp(col_c), - ], - ) - result = agg_df.collect()[0] - values_a, values_b, values_c, values_d = df.collect()[0] - - assert result.column(0) == pa.array([2], type=pa.uint64()) - assert result.column(1) == pa.array([4]) - assert result.column(2) == pa.array([4]) - assert result.column(3) == pa.array([6]) - assert result.column(4) == pa.array([[4, 4, 6]]) - np.testing.assert_array_almost_equal(result.column(5), np.average(values_a)) - np.testing.assert_array_almost_equal( - result.column(6), np.corrcoef(values_a, values_b)[0][1] - ) - assert result.column(7) == pa.array([len(values_a)]) - # Sample (co)variance -> ddof=1 - # Population (co)variance -> ddof=0 - np.testing.assert_array_almost_equal( - result.column(8), np.cov(values_a, values_b, ddof=1)[0][1] - ) - np.testing.assert_array_almost_equal( - result.column(9), np.cov(values_a, values_c, ddof=0)[0][1] - ) - np.testing.assert_array_almost_equal( - result.column(10), np.cov(values_b, values_c, ddof=1)[0][1] - ) - np.testing.assert_array_almost_equal(result.column(11), np.max(values_a)) - np.testing.assert_array_almost_equal(result.column(12), np.mean(values_b)) - np.testing.assert_array_almost_equal(result.column(13), np.median(values_b)) - np.testing.assert_array_almost_equal(result.column(14), np.min(values_a)) - np.testing.assert_array_almost_equal( - result.column(15), np.sum(values_b.to_pylist()) - ) - np.testing.assert_array_almost_equal(result.column(16), np.std(values_a, ddof=1)) - np.testing.assert_array_almost_equal(result.column(17), np.std(values_b, ddof=0)) - np.testing.assert_array_almost_equal(result.column(18), np.std(values_c, ddof=1)) - np.testing.assert_array_almost_equal(result.column(19), np.var(values_a, ddof=1)) - np.testing.assert_array_almost_equal(result.column(20), np.var(values_b, ddof=0)) - np.testing.assert_array_almost_equal(result.column(21), np.var(values_c, ddof=1)) - - -def test_bit_add_or_xor(df): - df = df.aggregate( - [], - [ - f.bit_and(column("a")), - f.bit_or(column("b")), - f.bit_xor(column("c")), - ], - ) - - result = df.collect() - result = result[0] - assert result.column(0) == pa.array([0]) - assert result.column(1) == pa.array([6]) - assert result.column(2) == pa.array([4]) - - -def test_bool_and_or(df): - df = df.aggregate( - [], - [ - f.bool_and(column("d")), - f.bool_or(column("d")), - ], - ) - result = df.collect() - result = result[0] - assert result.column(0) == pa.array([False]) - assert result.column(1) == pa.array([True]) diff --git a/python/datafusion/tests/test_context.py b/python/datafusion/tests/test_context.py deleted file mode 100644 index abc324db8..000000000 --- a/python/datafusion/tests/test_context.py +++ /dev/null @@ -1,512 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -import gzip -import os -import datetime as dt - -import pyarrow as pa -import pyarrow.dataset as ds -import pytest - -from datafusion import ( - DataFrame, - RuntimeConfig, - SessionConfig, - SessionContext, - SQLOptions, - column, - literal, -) - - -def test_create_context_no_args(): - SessionContext() - - -def test_create_context_with_all_valid_args(): - runtime = RuntimeConfig().with_disk_manager_os().with_fair_spill_pool(10000000) - config = ( - SessionConfig() - .with_create_default_catalog_and_schema(True) - .with_default_catalog_and_schema("foo", "bar") - .with_target_partitions(1) - .with_information_schema(True) - .with_repartition_joins(False) - .with_repartition_aggregations(False) - .with_repartition_windows(False) - .with_parquet_pruning(False) - ) - - ctx = SessionContext(config, runtime) - - # verify that at least some of the arguments worked - ctx.catalog("foo").database("bar") - with pytest.raises(KeyError): - ctx.catalog("datafusion") - - -def test_register_record_batches(ctx): - # create a RecordBatch and register it as memtable - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - - ctx.register_record_batches("t", [[batch]]) - - assert ctx.tables() == {"t"} - - result = ctx.sql("SELECT a+b, a-b FROM t").collect() - - assert result[0].column(0) == pa.array([5, 7, 9]) - assert result[0].column(1) == pa.array([-3, -3, -3]) - - -def test_create_dataframe_registers_unique_table_name(ctx): - # create a RecordBatch and register it as memtable - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - - df = ctx.create_dataframe([[batch]]) - tables = list(ctx.tables()) - - assert df - assert len(tables) == 1 - assert len(tables[0]) == 33 - assert tables[0].startswith("c") - # ensure that the rest of the table name contains - # only hexadecimal numbers - for c in tables[0][1:]: - assert c in "0123456789abcdef" - - -def test_create_dataframe_registers_with_defined_table_name(ctx): - # create a RecordBatch and register it as memtable - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - - df = ctx.create_dataframe([[batch]], name="tbl") - tables = list(ctx.tables()) - - assert df - assert len(tables) == 1 - assert tables[0] == "tbl" - - -def test_from_arrow_table(ctx): - # create a PyArrow table - data = {"a": [1, 2, 3], "b": [4, 5, 6]} - table = pa.Table.from_pydict(data) - - # convert to DataFrame - df = ctx.from_arrow_table(table) - tables = list(ctx.tables()) - - assert df - assert len(tables) == 1 - assert type(df) == DataFrame - assert set(df.schema().names) == {"a", "b"} - assert df.collect()[0].num_rows == 3 - - -def test_from_arrow_table_with_name(ctx): - # create a PyArrow table - data = {"a": [1, 2, 3], "b": [4, 5, 6]} - table = pa.Table.from_pydict(data) - - # convert to DataFrame with optional name - df = ctx.from_arrow_table(table, name="tbl") - tables = list(ctx.tables()) - - assert df - assert tables[0] == "tbl" - - -def test_from_arrow_table_empty(ctx): - data = {"a": [], "b": []} - schema = pa.schema([("a", pa.int32()), ("b", pa.string())]) - table = pa.Table.from_pydict(data, schema=schema) - - # convert to DataFrame - df = ctx.from_arrow_table(table) - tables = list(ctx.tables()) - - assert df - assert len(tables) == 1 - assert isinstance(df, DataFrame) - assert set(df.schema().names) == {"a", "b"} - assert len(df.collect()) == 0 - - -def test_from_arrow_table_empty_no_schema(ctx): - data = {"a": [], "b": []} - table = pa.Table.from_pydict(data) - - # convert to DataFrame - df = ctx.from_arrow_table(table) - tables = list(ctx.tables()) - - assert df - assert len(tables) == 1 - assert isinstance(df, DataFrame) - assert set(df.schema().names) == {"a", "b"} - assert len(df.collect()) == 0 - - -def test_from_pylist(ctx): - # create a dataframe from Python list - data = [ - {"a": 1, "b": 4}, - {"a": 2, "b": 5}, - {"a": 3, "b": 6}, - ] - - df = ctx.from_pylist(data) - tables = list(ctx.tables()) - - assert df - assert len(tables) == 1 - assert type(df) == DataFrame - assert set(df.schema().names) == {"a", "b"} - assert df.collect()[0].num_rows == 3 - - -def test_from_pydict(ctx): - # create a dataframe from Python dictionary - data = {"a": [1, 2, 3], "b": [4, 5, 6]} - - df = ctx.from_pydict(data) - tables = list(ctx.tables()) - - assert df - assert len(tables) == 1 - assert type(df) == DataFrame - assert set(df.schema().names) == {"a", "b"} - assert df.collect()[0].num_rows == 3 - - -def test_from_pandas(ctx): - # create a dataframe from pandas dataframe - pd = pytest.importorskip("pandas") - data = {"a": [1, 2, 3], "b": [4, 5, 6]} - pandas_df = pd.DataFrame(data) - - df = ctx.from_pandas(pandas_df) - tables = list(ctx.tables()) - - assert df - assert len(tables) == 1 - assert type(df) == DataFrame - assert set(df.schema().names) == {"a", "b"} - assert df.collect()[0].num_rows == 3 - - -def test_from_polars(ctx): - # create a dataframe from Polars dataframe - pd = pytest.importorskip("polars") - data = {"a": [1, 2, 3], "b": [4, 5, 6]} - polars_df = pd.DataFrame(data) - - df = ctx.from_polars(polars_df) - tables = list(ctx.tables()) - - assert df - assert len(tables) == 1 - assert type(df) == DataFrame - assert set(df.schema().names) == {"a", "b"} - assert df.collect()[0].num_rows == 3 - - -def test_register_table(ctx, database): - default = ctx.catalog() - public = default.database("public") - assert public.names() == {"csv", "csv1", "csv2"} - table = public.table("csv") - - ctx.register_table("csv3", table) - assert public.names() == {"csv", "csv1", "csv2", "csv3"} - - -def test_read_table(ctx, database): - default = ctx.catalog() - public = default.database("public") - assert public.names() == {"csv", "csv1", "csv2"} - - table = public.table("csv") - table_df = ctx.read_table(table) - table_df.show() - - -def test_deregister_table(ctx, database): - default = ctx.catalog() - public = default.database("public") - assert public.names() == {"csv", "csv1", "csv2"} - - ctx.deregister_table("csv") - assert public.names() == {"csv1", "csv2"} - - -def test_register_dataset(ctx): - # create a RecordBatch and register it as a pyarrow.dataset.Dataset - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - dataset = ds.dataset([batch]) - ctx.register_dataset("t", dataset) - - assert ctx.tables() == {"t"} - - result = ctx.sql("SELECT a+b, a-b FROM t").collect() - - assert result[0].column(0) == pa.array([5, 7, 9]) - assert result[0].column(1) == pa.array([-3, -3, -3]) - - -def test_dataset_filter(ctx, capfd): - # create a RecordBatch and register it as a pyarrow.dataset.Dataset - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - dataset = ds.dataset([batch]) - ctx.register_dataset("t", dataset) - - assert ctx.tables() == {"t"} - df = ctx.sql("SELECT a+b, a-b FROM t WHERE a BETWEEN 2 and 3 AND b > 5") - - # Make sure the filter was pushed down in Physical Plan - df.explain() - captured = capfd.readouterr() - assert "filter_expr=(((a >= 2) and (a <= 3)) and (b > 5))" in captured.out - - result = df.collect() - - assert result[0].column(0) == pa.array([9]) - assert result[0].column(1) == pa.array([-3]) - - -def test_pyarrow_predicate_pushdown_is_null(ctx, capfd): - """Ensure that pyarrow filter gets pushed down for `IsNull`""" - # create a RecordBatch and register it as a pyarrow.dataset.Dataset - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6]), pa.array([7, None, 9])], - names=["a", "b", "c"], - ) - dataset = ds.dataset([batch]) - ctx.register_dataset("t", dataset) - # Make sure the filter was pushed down in Physical Plan - df = ctx.sql("SELECT a FROM t WHERE c is NULL") - df.explain() - captured = capfd.readouterr() - assert "filter_expr=is_null(c, {nan_is_null=false})" in captured.out - - result = df.collect() - assert result[0].column(0) == pa.array([2]) - - -def test_pyarrow_predicate_pushdown_timestamp(ctx, tmpdir, capfd): - """Ensure that pyarrow filter gets pushed down for timestamp""" - # Ref: https://github.com/apache/datafusion-python/issues/703 - - # create pyarrow dataset with no actual files - col_type = pa.timestamp("ns", "+00:00") - nyd_2000 = pa.scalar(dt.datetime(2000, 1, 1, tzinfo=dt.timezone.utc), col_type) - pa_dataset_fs = pa.fs.SubTreeFileSystem(str(tmpdir), pa.fs.LocalFileSystem()) - pa_dataset_format = pa.dataset.ParquetFileFormat() - pa_dataset_partition = pa.dataset.field("a") <= nyd_2000 - fragments = [ - # NOTE: we never actually make this file. - # Working predicate pushdown means it never gets accessed - pa_dataset_format.make_fragment( - "1.parquet", - filesystem=pa_dataset_fs, - partition_expression=pa_dataset_partition, - ) - ] - pa_dataset = pa.dataset.FileSystemDataset( - fragments, - pa.schema([pa.field("a", col_type)]), - pa_dataset_format, - pa_dataset_fs, - ) - - ctx.register_dataset("t", pa_dataset) - - # the partition for our only fragment is for a < 2000-01-01. - # so querying for a > 2024-01-01 should not touch any files - df = ctx.sql("SELECT * FROM t WHERE a > '2024-01-01T00:00:00+00:00'") - assert df.collect() == [] - - -def test_dataset_filter_nested_data(ctx): - # create Arrow StructArrays to test nested data types - data = pa.StructArray.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - batch = pa.RecordBatch.from_arrays( - [data], - names=["nested_data"], - ) - dataset = ds.dataset([batch]) - ctx.register_dataset("t", dataset) - - assert ctx.tables() == {"t"} - - df = ctx.table("t") - - # This filter will not be pushed down to DatasetExec since it - # isn't supported - df = df.filter(column("nested_data")["b"] > literal(5)).select( - column("nested_data")["a"] + column("nested_data")["b"], - column("nested_data")["a"] - column("nested_data")["b"], - ) - - result = df.collect() - - assert result[0].column(0) == pa.array([9]) - assert result[0].column(1) == pa.array([-3]) - - -def test_table_exist(ctx): - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - dataset = ds.dataset([batch]) - ctx.register_dataset("t", dataset) - - assert ctx.table_exist("t") is True - - -def test_read_json(ctx): - path = os.path.dirname(os.path.abspath(__file__)) - - # Default - test_data_path = os.path.join(path, "data_test_context", "data.json") - df = ctx.read_json(test_data_path) - result = df.collect() - - assert result[0].column(0) == pa.array(["a", "b", "c"]) - assert result[0].column(1) == pa.array([1, 2, 3]) - - # Schema - schema = pa.schema( - [ - pa.field("A", pa.string(), nullable=True), - ] - ) - df = ctx.read_json(test_data_path, schema=schema) - result = df.collect() - - assert result[0].column(0) == pa.array(["a", "b", "c"]) - assert result[0].schema == schema - - # File extension - test_data_path = os.path.join(path, "data_test_context", "data.json") - df = ctx.read_json(test_data_path, file_extension=".json") - result = df.collect() - - assert result[0].column(0) == pa.array(["a", "b", "c"]) - assert result[0].column(1) == pa.array([1, 2, 3]) - - -def test_read_json_compressed(ctx, tmp_path): - path = os.path.dirname(os.path.abspath(__file__)) - test_data_path = os.path.join(path, "data_test_context", "data.json") - - # File compression type - gzip_path = tmp_path / "data.json.gz" - - with open(test_data_path, "rb") as csv_file: - with gzip.open(gzip_path, "wb") as gzipped_file: - gzipped_file.writelines(csv_file) - - df = ctx.read_json(gzip_path, file_extension=".gz", file_compression_type="gz") - result = df.collect() - - assert result[0].column(0) == pa.array(["a", "b", "c"]) - assert result[0].column(1) == pa.array([1, 2, 3]) - - -def test_read_csv(ctx): - csv_df = ctx.read_csv(path="testing/data/csv/aggregate_test_100.csv") - csv_df.select(column("c1")).show() - - -def test_read_csv_compressed(ctx, tmp_path): - test_data_path = "testing/data/csv/aggregate_test_100.csv" - - # File compression type - gzip_path = tmp_path / "aggregate_test_100.csv.gz" - - with open(test_data_path, "rb") as csv_file: - with gzip.open(gzip_path, "wb") as gzipped_file: - gzipped_file.writelines(csv_file) - - csv_df = ctx.read_csv(gzip_path, file_extension=".gz", file_compression_type="gz") - csv_df.select(column("c1")).show() - - -def test_read_parquet(ctx): - csv_df = ctx.read_parquet(path="parquet/data/alltypes_plain.parquet") - csv_df.show() - - -def test_read_avro(ctx): - csv_df = ctx.read_avro(path="testing/data/avro/alltypes_plain.avro") - csv_df.show() - - -def test_create_sql_options(): - SQLOptions() - - -def test_sql_with_options_no_ddl(ctx): - sql = "CREATE TABLE IF NOT EXISTS valuetable AS VALUES(1,'HELLO'),(12,'DATAFUSION')" - ctx.sql(sql) - options = SQLOptions().with_allow_ddl(False) - with pytest.raises(Exception, match="DDL"): - ctx.sql_with_options(sql, options=options) - - -def test_sql_with_options_no_dml(ctx): - table_name = "t" - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - dataset = ds.dataset([batch]) - ctx.register_dataset(table_name, dataset) - sql = f'INSERT INTO "{table_name}" VALUES (1, 2), (2, 3);' - ctx.sql(sql) - options = SQLOptions().with_allow_dml(False) - with pytest.raises(Exception, match="DML"): - ctx.sql_with_options(sql, options=options) - - -def test_sql_with_options_no_statements(ctx): - sql = "SET time zone = 1;" - ctx.sql(sql) - options = SQLOptions().with_allow_statements(False) - with pytest.raises(Exception, match="SetVariable"): - ctx.sql_with_options(sql, options=options) diff --git a/python/datafusion/tests/test_dataframe.py b/python/datafusion/tests/test_dataframe.py deleted file mode 100644 index 2f6a818ea..000000000 --- a/python/datafusion/tests/test_dataframe.py +++ /dev/null @@ -1,797 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -import os - -import pyarrow as pa -import pyarrow.parquet as pq -import pytest - -from datafusion import functions as f -from datafusion import ( - DataFrame, - SessionContext, - WindowFrame, - column, - literal, - udf, -) - - -@pytest.fixture -def ctx(): - return SessionContext() - - -@pytest.fixture -def df(): - ctx = SessionContext() - - # create a RecordBatch and a new DataFrame from it - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6]), pa.array([8, 5, 8])], - names=["a", "b", "c"], - ) - - return ctx.create_dataframe([[batch]]) - - -@pytest.fixture -def struct_df(): - ctx = SessionContext() - - # create a RecordBatch and a new DataFrame from it - batch = pa.RecordBatch.from_arrays( - [pa.array([{"c": 1}, {"c": 2}, {"c": 3}]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - - return ctx.create_dataframe([[batch]]) - - -@pytest.fixture -def nested_df(): - ctx = SessionContext() - - # create a RecordBatch and a new DataFrame from it - # Intentionally make each array of different length - batch = pa.RecordBatch.from_arrays( - [pa.array([[1], [2, 3], [4, 5, 6], None]), pa.array([7, 8, 9, 10])], - names=["a", "b"], - ) - - return ctx.create_dataframe([[batch]]) - - -@pytest.fixture -def aggregate_df(): - ctx = SessionContext() - ctx.register_csv("test", "testing/data/csv/aggregate_test_100.csv") - return ctx.sql("select c1, sum(c2) from test group by c1") - - -def test_select(df): - df = df.select( - column("a") + column("b"), - column("a") - column("b"), - ) - - # execute and collect the first (and only) batch - result = df.collect()[0] - - assert result.column(0) == pa.array([5, 7, 9]) - assert result.column(1) == pa.array([-3, -3, -3]) - - -def test_select_columns(df): - df = df.select_columns("b", "a") - - # execute and collect the first (and only) batch - result = df.collect()[0] - - assert result.column(0) == pa.array([4, 5, 6]) - assert result.column(1) == pa.array([1, 2, 3]) - - -def test_filter(df): - df = df.filter(column("a") > literal(2)).select( - column("a") + column("b"), - column("a") - column("b"), - ) - - # execute and collect the first (and only) batch - result = df.collect()[0] - - assert result.column(0) == pa.array([9]) - assert result.column(1) == pa.array([-3]) - - -def test_sort(df): - df = df.sort(column("b").sort(ascending=False)) - - table = pa.Table.from_batches(df.collect()) - expected = {"a": [3, 2, 1], "b": [6, 5, 4], "c": [8, 5, 8]} - - assert table.to_pydict() == expected - - -def test_limit(df): - df = df.limit(1) - - # execute and collect the first (and only) batch - result = df.collect()[0] - - assert len(result.column(0)) == 1 - assert len(result.column(1)) == 1 - - -def test_limit_with_offset(df): - # only 3 rows, but limit past the end to ensure that offset is working - df = df.limit(5, offset=2) - - # execute and collect the first (and only) batch - result = df.collect()[0] - - assert len(result.column(0)) == 1 - assert len(result.column(1)) == 1 - - -def test_with_column(df): - df = df.with_column("c", column("a") + column("b")) - - # execute and collect the first (and only) batch - result = df.collect()[0] - - assert result.schema.field(0).name == "a" - assert result.schema.field(1).name == "b" - assert result.schema.field(2).name == "c" - - assert result.column(0) == pa.array([1, 2, 3]) - assert result.column(1) == pa.array([4, 5, 6]) - assert result.column(2) == pa.array([5, 7, 9]) - - -def test_with_column_renamed(df): - df = df.with_column("c", column("a") + column("b")).with_column_renamed("c", "sum") - - result = df.collect()[0] - - assert result.schema.field(0).name == "a" - assert result.schema.field(1).name == "b" - assert result.schema.field(2).name == "sum" - - -def test_unnest(nested_df): - nested_df = nested_df.unnest_column("a") - - # execute and collect the first (and only) batch - result = nested_df.collect()[0] - - assert result.column(0) == pa.array([1, 2, 3, 4, 5, 6, None]) - assert result.column(1) == pa.array([7, 8, 8, 9, 9, 9, 10]) - - -def test_unnest_without_nulls(nested_df): - nested_df = nested_df.unnest_column("a", preserve_nulls=False) - - # execute and collect the first (and only) batch - result = nested_df.collect()[0] - - assert result.column(0) == pa.array([1, 2, 3, 4, 5, 6]) - assert result.column(1) == pa.array([7, 8, 8, 9, 9, 9]) - - -def test_udf(df): - # is_null is a pa function over arrays - is_null = udf( - lambda x: x.is_null(), - [pa.int64()], - pa.bool_(), - volatility="immutable", - ) - - df = df.select(is_null(column("a"))) - result = df.collect()[0].column(0) - - assert result == pa.array([False, False, False]) - - -def test_join(): - ctx = SessionContext() - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - df = ctx.create_dataframe([[batch]], "l") - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2]), pa.array([8, 10])], - names=["a", "c"], - ) - df1 = ctx.create_dataframe([[batch]], "r") - - df = df.join(df1, join_keys=(["a"], ["a"]), how="inner") - df.show() - df = df.sort(column("l.a").sort(ascending=True)) - table = pa.Table.from_batches(df.collect()) - - expected = {"a": [1, 2], "c": [8, 10], "b": [4, 5]} - assert table.to_pydict() == expected - - -def test_distinct(): - ctx = SessionContext() - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3, 1, 2, 3]), pa.array([4, 5, 6, 4, 5, 6])], - names=["a", "b"], - ) - df_a = ( - ctx.create_dataframe([[batch]]) - .distinct() - .sort(column("a").sort(ascending=True)) - ) - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - df_b = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) - - assert df_a.collect() == df_b.collect() - - -def test_window_functions(df): - df = df.select( - column("a"), - column("b"), - column("c"), - f.alias( - f.window("row_number", [], order_by=[f.order_by(column("c"))]), - "row", - ), - f.alias( - f.window("rank", [], order_by=[f.order_by(column("c"))]), - "rank", - ), - f.alias( - f.window("dense_rank", [], order_by=[f.order_by(column("c"))]), - "dense_rank", - ), - f.alias( - f.window("percent_rank", [], order_by=[f.order_by(column("c"))]), - "percent_rank", - ), - f.alias( - f.window("cume_dist", [], order_by=[f.order_by(column("b"))]), - "cume_dist", - ), - f.alias( - f.window("ntile", [literal(2)], order_by=[f.order_by(column("c"))]), - "ntile", - ), - f.alias( - f.window("lag", [column("b")], order_by=[f.order_by(column("b"))]), - "previous", - ), - f.alias( - f.window("lead", [column("b")], order_by=[f.order_by(column("b"))]), - "next", - ), - f.alias( - f.window( - "first_value", - [column("a")], - order_by=[f.order_by(column("b"))], - ), - "first_value", - ), - f.alias( - f.window("last_value", [column("b")], order_by=[f.order_by(column("b"))]), - "last_value", - ), - f.alias( - f.window( - "nth_value", - [column("b"), literal(2)], - order_by=[f.order_by(column("b"))], - ), - "2nd_value", - ), - ) - - table = pa.Table.from_batches(df.collect()) - - expected = { - "a": [1, 2, 3], - "b": [4, 5, 6], - "c": [8, 5, 8], - "row": [2, 1, 3], - "rank": [2, 1, 2], - "dense_rank": [2, 1, 2], - "percent_rank": [0.5, 0, 0.5], - "cume_dist": [0.3333333333333333, 0.6666666666666666, 1.0], - "ntile": [1, 1, 2], - "next": [5, 6, None], - "previous": [None, 4, 5], - "first_value": [1, 1, 1], - "last_value": [4, 5, 6], - "2nd_value": [None, 5, 5], - } - assert table.sort_by("a").to_pydict() == expected - - -@pytest.mark.parametrize( - ("units", "start_bound", "end_bound"), - [ - (units, start_bound, end_bound) - for units in ("rows", "range") - for start_bound in (None, 0, 1) - for end_bound in (None, 0, 1) - ] - + [ - ("groups", 0, 0), - ], -) -def test_valid_window_frame(units, start_bound, end_bound): - WindowFrame(units, start_bound, end_bound) - - -@pytest.mark.parametrize( - ("units", "start_bound", "end_bound"), - [ - ("invalid-units", 0, None), - ("invalid-units", None, 0), - ("invalid-units", None, None), - ("groups", None, 0), - ("groups", 0, None), - ("groups", None, None), - ], -) -def test_invalid_window_frame(units, start_bound, end_bound): - with pytest.raises(RuntimeError): - WindowFrame(units, start_bound, end_bound) - - -def test_get_dataframe(tmp_path): - ctx = SessionContext() - - path = tmp_path / "test.csv" - table = pa.Table.from_arrays( - [ - [1, 2, 3, 4], - ["a", "b", "c", "d"], - [1.1, 2.2, 3.3, 4.4], - ], - names=["int", "str", "float"], - ) - pa.csv.write_csv(table, path) - - ctx.register_csv("csv", path) - - df = ctx.table("csv") - assert isinstance(df, DataFrame) - - -def test_struct_select(struct_df): - df = struct_df.select( - column("a")["c"] + column("b"), - column("a")["c"] - column("b"), - ) - - # execute and collect the first (and only) batch - result = df.collect()[0] - - assert result.column(0) == pa.array([5, 7, 9]) - assert result.column(1) == pa.array([-3, -3, -3]) - - -def test_explain(df): - df = df.select( - column("a") + column("b"), - column("a") - column("b"), - ) - df.explain() - - -def test_logical_plan(aggregate_df): - plan = aggregate_df.logical_plan() - - expected = "Projection: test.c1, SUM(test.c2)" - - assert expected == plan.display() - - expected = ( - "Projection: test.c1, SUM(test.c2)\n" - " Aggregate: groupBy=[[test.c1]], aggr=[[SUM(test.c2)]]\n" - " TableScan: test" - ) - - assert expected == plan.display_indent() - - -def test_optimized_logical_plan(aggregate_df): - plan = aggregate_df.optimized_logical_plan() - - expected = "Aggregate: groupBy=[[test.c1]], aggr=[[SUM(test.c2)]]" - - assert expected == plan.display() - - expected = ( - "Aggregate: groupBy=[[test.c1]], aggr=[[SUM(test.c2)]]\n" - " TableScan: test projection=[c1, c2]" - ) - - assert expected == plan.display_indent() - - -def test_execution_plan(aggregate_df): - plan = aggregate_df.execution_plan() - - expected = ( - "AggregateExec: mode=FinalPartitioned, gby=[c1@0 as c1], aggr=[SUM(test.c2)]\n" # noqa: E501 - ) - - assert expected == plan.display() - - # Check the number of partitions is as expected. - assert isinstance(plan.partition_count, int) - - expected = ( - "ProjectionExec: expr=[c1@0 as c1, SUM(test.c2)@1 as SUM(test.c2)]\n" - " Aggregate: groupBy=[[test.c1]], aggr=[[SUM(test.c2)]]\n" - " TableScan: test projection=[c1, c2]" - ) - - indent = plan.display_indent() - - # indent plan will be different for everyone due to absolute path - # to filename, so we just check for some expected content - assert "AggregateExec:" in indent - assert "CoalesceBatchesExec:" in indent - assert "RepartitionExec:" in indent - assert "CsvExec:" in indent - - ctx = SessionContext() - stream = ctx.execute(plan, 0) - # get the one and only batch - batch = stream.next() - assert batch is not None - # there should be no more batches - batch = stream.next() - assert batch is None - - -def test_repartition(df): - df.repartition(2) - - -def test_repartition_by_hash(df): - df.repartition_by_hash(column("a"), num=2) - - -def test_intersect(): - ctx = SessionContext() - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - df_a = ctx.create_dataframe([[batch]]) - - batch = pa.RecordBatch.from_arrays( - [pa.array([3, 4, 5]), pa.array([6, 7, 8])], - names=["a", "b"], - ) - df_b = ctx.create_dataframe([[batch]]) - - batch = pa.RecordBatch.from_arrays( - [pa.array([3]), pa.array([6])], - names=["a", "b"], - ) - df_c = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) - - df_a_i_b = df_a.intersect(df_b).sort(column("a").sort(ascending=True)) - - assert df_c.collect() == df_a_i_b.collect() - - -def test_except_all(): - ctx = SessionContext() - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - df_a = ctx.create_dataframe([[batch]]) - - batch = pa.RecordBatch.from_arrays( - [pa.array([3, 4, 5]), pa.array([6, 7, 8])], - names=["a", "b"], - ) - df_b = ctx.create_dataframe([[batch]]) - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2]), pa.array([4, 5])], - names=["a", "b"], - ) - df_c = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) - - df_a_e_b = df_a.except_all(df_b).sort(column("a").sort(ascending=True)) - - assert df_c.collect() == df_a_e_b.collect() - - -def test_collect_partitioned(): - ctx = SessionContext() - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - - assert [[batch]] == ctx.create_dataframe([[batch]]).collect_partitioned() - - -def test_union(ctx): - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - df_a = ctx.create_dataframe([[batch]]) - - batch = pa.RecordBatch.from_arrays( - [pa.array([3, 4, 5]), pa.array([6, 7, 8])], - names=["a", "b"], - ) - df_b = ctx.create_dataframe([[batch]]) - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3, 3, 4, 5]), pa.array([4, 5, 6, 6, 7, 8])], - names=["a", "b"], - ) - df_c = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) - - df_a_u_b = df_a.union(df_b).sort(column("a").sort(ascending=True)) - - assert df_c.collect() == df_a_u_b.collect() - - -def test_union_distinct(ctx): - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - df_a = ctx.create_dataframe([[batch]]) - - batch = pa.RecordBatch.from_arrays( - [pa.array([3, 4, 5]), pa.array([6, 7, 8])], - names=["a", "b"], - ) - df_b = ctx.create_dataframe([[batch]]) - - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3, 4, 5]), pa.array([4, 5, 6, 7, 8])], - names=["a", "b"], - ) - df_c = ctx.create_dataframe([[batch]]).sort(column("a").sort(ascending=True)) - - df_a_u_b = df_a.union(df_b, True).sort(column("a").sort(ascending=True)) - - assert df_c.collect() == df_a_u_b.collect() - assert df_c.collect() == df_a_u_b.collect() - - -def test_cache(df): - assert df.cache().collect() == df.collect() - - -def test_count(df): - # Get number of rows - assert df.count() == 3 - - -def test_to_pandas(df): - # Skip test if pandas is not installed - pd = pytest.importorskip("pandas") - - # Convert datafusion dataframe to pandas dataframe - pandas_df = df.to_pandas() - assert type(pandas_df) == pd.DataFrame - assert pandas_df.shape == (3, 3) - assert set(pandas_df.columns) == {"a", "b", "c"} - - -def test_empty_to_pandas(df): - # Skip test if pandas is not installed - pd = pytest.importorskip("pandas") - - # Convert empty datafusion dataframe to pandas dataframe - pandas_df = df.limit(0).to_pandas() - assert type(pandas_df) == pd.DataFrame - assert pandas_df.shape == (0, 3) - assert set(pandas_df.columns) == {"a", "b", "c"} - - -def test_to_polars(df): - # Skip test if polars is not installed - pl = pytest.importorskip("polars") - - # Convert datafusion dataframe to polars dataframe - polars_df = df.to_polars() - assert type(polars_df) == pl.DataFrame - assert polars_df.shape == (3, 3) - assert set(polars_df.columns) == {"a", "b", "c"} - - -def test_empty_to_polars(df): - # Skip test if polars is not installed - pl = pytest.importorskip("polars") - - # Convert empty datafusion dataframe to polars dataframe - polars_df = df.limit(0).to_polars() - assert type(polars_df) == pl.DataFrame - assert polars_df.shape == (0, 3) - assert set(polars_df.columns) == {"a", "b", "c"} - - -def test_to_arrow_table(df): - # Convert datafusion dataframe to pyarrow Table - pyarrow_table = df.to_arrow_table() - assert type(pyarrow_table) == pa.Table - assert pyarrow_table.shape == (3, 3) - assert set(pyarrow_table.column_names) == {"a", "b", "c"} - - -def test_execute_stream(df): - stream = df.execute_stream() - assert all(batch is not None for batch in stream) - assert not list(stream) # after one iteration the generator must be exhausted - - -@pytest.mark.parametrize("schema", [True, False]) -def test_execute_stream_to_arrow_table(df, schema): - stream = df.execute_stream() - - if schema: - pyarrow_table = pa.Table.from_batches( - (batch.to_pyarrow() for batch in stream), schema=df.schema() - ) - else: - pyarrow_table = pa.Table.from_batches((batch.to_pyarrow() for batch in stream)) - - assert isinstance(pyarrow_table, pa.Table) - assert pyarrow_table.shape == (3, 3) - assert set(pyarrow_table.column_names) == {"a", "b", "c"} - - -def test_execute_stream_partitioned(df): - streams = df.execute_stream_partitioned() - assert all(batch is not None for stream in streams for batch in stream) - assert all( - not list(stream) for stream in streams - ) # after one iteration all generators must be exhausted - - -def test_empty_to_arrow_table(df): - # Convert empty datafusion dataframe to pyarrow Table - pyarrow_table = df.limit(0).to_arrow_table() - assert type(pyarrow_table) == pa.Table - assert pyarrow_table.shape == (0, 3) - assert set(pyarrow_table.column_names) == {"a", "b", "c"} - - -def test_to_pylist(df): - # Convert datafusion dataframe to Python list - pylist = df.to_pylist() - assert isinstance(pylist, list) - assert pylist == [ - {"a": 1, "b": 4, "c": 8}, - {"a": 2, "b": 5, "c": 5}, - {"a": 3, "b": 6, "c": 8}, - ] - - -def test_to_pydict(df): - # Convert datafusion dataframe to Python dictionary - pydict = df.to_pydict() - assert isinstance(pydict, dict) - assert pydict == {"a": [1, 2, 3], "b": [4, 5, 6], "c": [8, 5, 8]} - - -def test_describe(df): - # Calculate statistics - df = df.describe() - - # Collect the result - result = df.to_pydict() - - assert result == { - "describe": [ - "count", - "null_count", - "mean", - "std", - "min", - "max", - "median", - ], - "a": [3.0, 0.0, 2.0, 1.0, 1.0, 3.0, 2.0], - "b": [3.0, 0.0, 5.0, 1.0, 4.0, 6.0, 5.0], - "c": [3.0, 0.0, 7.0, 1.7320508075688772, 5.0, 8.0, 8.0], - } - - -def test_write_parquet(df, tmp_path): - path = tmp_path - - df.write_parquet(str(path)) - result = pq.read_table(str(path)).to_pydict() - expected = df.to_pydict() - - assert result == expected - - -@pytest.mark.parametrize( - "compression, compression_level", - [("gzip", 6), ("brotli", 7), ("zstd", 15)], -) -def test_write_compressed_parquet(df, tmp_path, compression, compression_level): - path = tmp_path - - df.write_parquet( - str(path), compression=compression, compression_level=compression_level - ) - - # test that the actual compression scheme is the one written - for root, dirs, files in os.walk(path): - for file in files: - if file.endswith(".parquet"): - metadata = pq.ParquetFile(tmp_path / file).metadata.to_dict() - for row_group in metadata["row_groups"]: - for columns in row_group["columns"]: - assert columns["compression"].lower() == compression - - result = pq.read_table(str(path)).to_pydict() - expected = df.to_pydict() - - assert result == expected - - -@pytest.mark.parametrize( - "compression, compression_level", - [("gzip", 12), ("brotli", 15), ("zstd", 23), ("wrong", 12)], -) -def test_write_compressed_parquet_wrong_compression_level( - df, tmp_path, compression, compression_level -): - path = tmp_path - - with pytest.raises(ValueError): - df.write_parquet( - str(path), - compression=compression, - compression_level=compression_level, - ) - - -@pytest.mark.parametrize("compression", ["brotli", "zstd", "wrong"]) -def test_write_compressed_parquet_missing_compression_level(df, tmp_path, compression): - path = tmp_path - - with pytest.raises(ValueError): - df.write_parquet(str(path), compression=compression) diff --git a/python/datafusion/tests/test_expr.py b/python/datafusion/tests/test_expr.py deleted file mode 100644 index 73f7d087a..000000000 --- a/python/datafusion/tests/test_expr.py +++ /dev/null @@ -1,118 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -from datafusion import SessionContext -from datafusion.expr import Column, Literal, BinaryExpr, AggregateFunction -from datafusion.expr import ( - Projection, - Filter, - Aggregate, - Limit, - Sort, - TableScan, -) -import pytest - - -@pytest.fixture -def test_ctx(): - ctx = SessionContext() - ctx.register_csv("test", "testing/data/csv/aggregate_test_100.csv") - return ctx - - -def test_projection(test_ctx): - df = test_ctx.sql("select c1, 123, c1 < 123 from test") - plan = df.logical_plan() - - plan = plan.to_variant() - assert isinstance(plan, Projection) - - expr = plan.projections() - - col1 = expr[0].to_variant() - assert isinstance(col1, Column) - assert col1.name() == "c1" - assert col1.qualified_name() == "test.c1" - - col2 = expr[1].to_variant() - assert isinstance(col2, Literal) - assert col2.data_type() == "Int64" - assert col2.value_i64() == 123 - - col3 = expr[2].to_variant() - assert isinstance(col3, BinaryExpr) - assert isinstance(col3.left().to_variant(), Column) - assert col3.op() == "<" - assert isinstance(col3.right().to_variant(), Literal) - - plan = plan.input()[0].to_variant() - assert isinstance(plan, TableScan) - - -def test_filter(test_ctx): - df = test_ctx.sql("select c1 from test WHERE c1 > 5") - plan = df.logical_plan() - - plan = plan.to_variant() - assert isinstance(plan, Projection) - - plan = plan.input()[0].to_variant() - assert isinstance(plan, Filter) - - -def test_limit(test_ctx): - df = test_ctx.sql("select c1 from test LIMIT 10") - plan = df.logical_plan() - - plan = plan.to_variant() - assert isinstance(plan, Limit) - assert plan.skip() == 0 - - df = test_ctx.sql("select c1 from test LIMIT 10 OFFSET 5") - plan = df.logical_plan() - - plan = plan.to_variant() - assert isinstance(plan, Limit) - assert plan.skip() == 5 - - -def test_aggregate_query(test_ctx): - df = test_ctx.sql("select c1, count(*) from test group by c1") - plan = df.logical_plan() - - projection = plan.to_variant() - assert isinstance(projection, Projection) - - aggregate = projection.input()[0].to_variant() - assert isinstance(aggregate, Aggregate) - - col1 = aggregate.group_by_exprs()[0].to_variant() - assert isinstance(col1, Column) - assert col1.name() == "c1" - assert col1.qualified_name() == "test.c1" - - col2 = aggregate.aggregate_exprs()[0].to_variant() - assert isinstance(col2, AggregateFunction) - - -def test_sort(test_ctx): - df = test_ctx.sql("select c1 from test order by c1") - plan = df.logical_plan() - - plan = plan.to_variant() - assert isinstance(plan, Sort) diff --git a/python/datafusion/tests/test_functions.py b/python/datafusion/tests/test_functions.py deleted file mode 100644 index 449f706c3..000000000 --- a/python/datafusion/tests/test_functions.py +++ /dev/null @@ -1,874 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. -import math - -import numpy as np -import pyarrow as pa -import pytest -from datetime import datetime - -from datafusion import SessionContext, column -from datafusion import functions as f -from datafusion import literal - -np.seterr(invalid="ignore") - - -@pytest.fixture -def df(): - ctx = SessionContext() - # create a RecordBatch and a new DataFrame from it - batch = pa.RecordBatch.from_arrays( - [ - pa.array(["Hello", "World", "!"]), - pa.array([4, 5, 6]), - pa.array(["hello ", " world ", " !"]), - pa.array( - [ - datetime(2022, 12, 31), - datetime(2027, 6, 26), - datetime(2020, 7, 2), - ] - ), - ], - names=["a", "b", "c", "d"], - ) - return ctx.create_dataframe([[batch]]) - - -def test_named_struct(df): - df = df.with_column( - "d", - f.named_struct( - literal("a"), - column("a"), - literal("b"), - column("b"), - literal("c"), - column("c"), - ), - ) - - expected = """DataFrame() -+-------+---+---------+------------------------------+ -| a | b | c | d | -+-------+---+---------+------------------------------+ -| Hello | 4 | hello | {a: Hello, b: 4, c: hello } | -| World | 5 | world | {a: World, b: 5, c: world } | -| ! | 6 | ! | {a: !, b: 6, c: !} | -+-------+---+---------+------------------------------+ -""".strip() - - assert str(df) == expected - - -def test_literal(df): - df = df.select( - literal(1), - literal("1"), - literal("OK"), - literal(3.14), - literal(True), - literal(b"hello world"), - ) - result = df.collect() - assert len(result) == 1 - result = result[0] - assert result.column(0) == pa.array([1] * 3) - assert result.column(1) == pa.array(["1"] * 3) - assert result.column(2) == pa.array(["OK"] * 3) - assert result.column(3) == pa.array([3.14] * 3) - assert result.column(4) == pa.array([True] * 3) - assert result.column(5) == pa.array([b"hello world"] * 3) - - -def test_lit_arith(df): - """ - Test literals with arithmetic operations - """ - df = df.select(literal(1) + column("b"), f.concat(column("a"), literal("!"))) - result = df.collect() - assert len(result) == 1 - result = result[0] - assert result.column(0) == pa.array([5, 6, 7]) - assert result.column(1) == pa.array(["Hello!", "World!", "!!"]) - - -def test_math_functions(): - ctx = SessionContext() - # create a RecordBatch and a new DataFrame from it - batch = pa.RecordBatch.from_arrays( - [pa.array([0.1, -0.7, 0.55]), pa.array([float("nan"), 0, 2.0])], - names=["value", "na_value"], - ) - df = ctx.create_dataframe([[batch]]) - - values = np.array([0.1, -0.7, 0.55]) - na_values = np.array([np.nan, 0, 2.0]) - col_v = column("value") - col_nav = column("na_value") - df = df.select( - f.abs(col_v), - f.sin(col_v), - f.cos(col_v), - f.tan(col_v), - f.asin(col_v), - f.acos(col_v), - f.exp(col_v), - f.ln(col_v + literal(pa.scalar(1))), - f.log2(col_v + literal(pa.scalar(1))), - f.log10(col_v + literal(pa.scalar(1))), - f.random(), - f.atan(col_v), - f.atan2(col_v, literal(pa.scalar(1.1))), - f.ceil(col_v), - f.floor(col_v), - f.power(col_v, literal(pa.scalar(3))), - f.pow(col_v, literal(pa.scalar(4))), - f.round(col_v), - f.sqrt(col_v), - f.signum(col_v), - f.trunc(col_v), - f.asinh(col_v), - f.acosh(col_v), - f.atanh(col_v), - f.cbrt(col_v), - f.cosh(col_v), - f.degrees(col_v), - f.gcd(literal(9), literal(3)), - f.lcm(literal(6), literal(4)), - f.nanvl(col_nav, literal(5)), - f.pi(), - f.radians(col_v), - f.sinh(col_v), - f.tanh(col_v), - f.factorial(literal(6)), - f.isnan(col_nav), - f.iszero(col_nav), - f.log(literal(3), col_v + literal(pa.scalar(1))), - ) - batches = df.collect() - assert len(batches) == 1 - result = batches[0] - - np.testing.assert_array_almost_equal(result.column(0), np.abs(values)) - np.testing.assert_array_almost_equal(result.column(1), np.sin(values)) - np.testing.assert_array_almost_equal(result.column(2), np.cos(values)) - np.testing.assert_array_almost_equal(result.column(3), np.tan(values)) - np.testing.assert_array_almost_equal(result.column(4), np.arcsin(values)) - np.testing.assert_array_almost_equal(result.column(5), np.arccos(values)) - np.testing.assert_array_almost_equal(result.column(6), np.exp(values)) - np.testing.assert_array_almost_equal(result.column(7), np.log(values + 1.0)) - np.testing.assert_array_almost_equal(result.column(8), np.log2(values + 1.0)) - np.testing.assert_array_almost_equal(result.column(9), np.log10(values + 1.0)) - np.testing.assert_array_less(result.column(10), np.ones_like(values)) - np.testing.assert_array_almost_equal(result.column(11), np.arctan(values)) - np.testing.assert_array_almost_equal(result.column(12), np.arctan2(values, 1.1)) - np.testing.assert_array_almost_equal(result.column(13), np.ceil(values)) - np.testing.assert_array_almost_equal(result.column(14), np.floor(values)) - np.testing.assert_array_almost_equal(result.column(15), np.power(values, 3)) - np.testing.assert_array_almost_equal(result.column(16), np.power(values, 4)) - np.testing.assert_array_almost_equal(result.column(17), np.round(values)) - np.testing.assert_array_almost_equal(result.column(18), np.sqrt(values)) - np.testing.assert_array_almost_equal(result.column(19), np.sign(values)) - np.testing.assert_array_almost_equal(result.column(20), np.trunc(values)) - np.testing.assert_array_almost_equal(result.column(21), np.arcsinh(values)) - np.testing.assert_array_almost_equal(result.column(22), np.arccosh(values)) - np.testing.assert_array_almost_equal(result.column(23), np.arctanh(values)) - np.testing.assert_array_almost_equal(result.column(24), np.cbrt(values)) - np.testing.assert_array_almost_equal(result.column(25), np.cosh(values)) - np.testing.assert_array_almost_equal(result.column(26), np.degrees(values)) - np.testing.assert_array_almost_equal(result.column(27), np.gcd(9, 3)) - np.testing.assert_array_almost_equal(result.column(28), np.lcm(6, 4)) - np.testing.assert_array_almost_equal( - result.column(29), np.where(np.isnan(na_values), 5, na_values) - ) - np.testing.assert_array_almost_equal(result.column(30), np.pi) - np.testing.assert_array_almost_equal(result.column(31), np.radians(values)) - np.testing.assert_array_almost_equal(result.column(32), np.sinh(values)) - np.testing.assert_array_almost_equal(result.column(33), np.tanh(values)) - np.testing.assert_array_almost_equal(result.column(34), math.factorial(6)) - np.testing.assert_array_almost_equal(result.column(35), np.isnan(na_values)) - np.testing.assert_array_almost_equal(result.column(36), na_values == 0) - np.testing.assert_array_almost_equal( - result.column(37), np.emath.logn(3, values + 1.0) - ) - - -def py_indexof(arr, v): - try: - return arr.index(v) + 1 - except ValueError: - return np.nan - - -def py_arr_remove(arr, v, n=None): - new_arr = arr[:] - found = 0 - while found != n: - try: - new_arr.remove(v) - found += 1 - except ValueError: - break - - return new_arr - - -def py_arr_replace(arr, from_, to, n=None): - new_arr = arr[:] - found = 0 - while found != n: - try: - idx = new_arr.index(from_) - new_arr[idx] = to - found += 1 - except ValueError: - break - - return new_arr - - -def py_arr_resize(arr, size, value): - arr = np.asarray(arr) - return np.pad( - arr, - [(0, size - arr.shape[0])], - "constant", - constant_values=value, - ) - - -def py_flatten(arr): - result = [] - for elem in arr: - if isinstance(elem, list): - result.extend(py_flatten(elem)) - else: - result.append(elem) - return result - - -@pytest.mark.parametrize( - ("stmt", "py_expr"), - [ - [ - lambda col: f.array_append(col, literal(99.0)), - lambda data: [np.append(arr, 99.0) for arr in data], - ], - [ - lambda col: f.array_push_back(col, literal(99.0)), - lambda data: [np.append(arr, 99.0) for arr in data], - ], - [ - lambda col: f.list_append(col, literal(99.0)), - lambda data: [np.append(arr, 99.0) for arr in data], - ], - [ - lambda col: f.list_push_back(col, literal(99.0)), - lambda data: [np.append(arr, 99.0) for arr in data], - ], - [ - lambda col: f.array_concat(col, col), - lambda data: [np.concatenate([arr, arr]) for arr in data], - ], - [ - lambda col: f.array_cat(col, col), - lambda data: [np.concatenate([arr, arr]) for arr in data], - ], - [ - lambda col: f.array_dims(col), - lambda data: [[len(r)] for r in data], - ], - [ - lambda col: f.array_distinct(col), - lambda data: [list(set(r)) for r in data], - ], - [ - lambda col: f.list_distinct(col), - lambda data: [list(set(r)) for r in data], - ], - [ - lambda col: f.list_dims(col), - lambda data: [[len(r)] for r in data], - ], - [ - lambda col: f.array_element(col, literal(1)), - lambda data: [r[0] for r in data], - ], - [ - lambda col: f.array_extract(col, literal(1)), - lambda data: [r[0] for r in data], - ], - [ - lambda col: f.list_element(col, literal(1)), - lambda data: [r[0] for r in data], - ], - [ - lambda col: f.list_extract(col, literal(1)), - lambda data: [r[0] for r in data], - ], - [ - lambda col: f.array_length(col), - lambda data: [len(r) for r in data], - ], - [ - lambda col: f.list_length(col), - lambda data: [len(r) for r in data], - ], - [ - lambda col: f.array_has(col, literal(1.0)), - lambda data: [1.0 in r for r in data], - ], - [ - lambda col: f.array_has_all( - col, f.make_array(*[literal(v) for v in [1.0, 3.0, 5.0]]) - ), - lambda data: [np.all([v in r for v in [1.0, 3.0, 5.0]]) for r in data], - ], - [ - lambda col: f.array_has_any( - col, f.make_array(*[literal(v) for v in [1.0, 3.0, 5.0]]) - ), - lambda data: [np.any([v in r for v in [1.0, 3.0, 5.0]]) for r in data], - ], - [ - lambda col: f.array_position(col, literal(1.0)), - lambda data: [py_indexof(r, 1.0) for r in data], - ], - [ - lambda col: f.array_indexof(col, literal(1.0)), - lambda data: [py_indexof(r, 1.0) for r in data], - ], - [ - lambda col: f.list_position(col, literal(1.0)), - lambda data: [py_indexof(r, 1.0) for r in data], - ], - [ - lambda col: f.list_indexof(col, literal(1.0)), - lambda data: [py_indexof(r, 1.0) for r in data], - ], - [ - lambda col: f.array_positions(col, literal(1.0)), - lambda data: [[i + 1 for i, _v in enumerate(r) if _v == 1.0] for r in data], - ], - [ - lambda col: f.list_positions(col, literal(1.0)), - lambda data: [[i + 1 for i, _v in enumerate(r) if _v == 1.0] for r in data], - ], - [ - lambda col: f.array_ndims(col), - lambda data: [np.array(r).ndim for r in data], - ], - [ - lambda col: f.list_ndims(col), - lambda data: [np.array(r).ndim for r in data], - ], - [ - lambda col: f.array_prepend(literal(99.0), col), - lambda data: [np.insert(arr, 0, 99.0) for arr in data], - ], - [ - lambda col: f.array_push_front(literal(99.0), col), - lambda data: [np.insert(arr, 0, 99.0) for arr in data], - ], - [ - lambda col: f.list_prepend(literal(99.0), col), - lambda data: [np.insert(arr, 0, 99.0) for arr in data], - ], - [ - lambda col: f.list_push_front(literal(99.0), col), - lambda data: [np.insert(arr, 0, 99.0) for arr in data], - ], - [ - lambda col: f.array_pop_back(col), - lambda data: [arr[:-1] for arr in data], - ], - [ - lambda col: f.array_pop_front(col), - lambda data: [arr[1:] for arr in data], - ], - [ - lambda col: f.array_remove(col, literal(3.0)), - lambda data: [py_arr_remove(arr, 3.0, 1) for arr in data], - ], - [ - lambda col: f.list_remove(col, literal(3.0)), - lambda data: [py_arr_remove(arr, 3.0, 1) for arr in data], - ], - [ - lambda col: f.array_remove_n(col, literal(3.0), literal(2)), - lambda data: [py_arr_remove(arr, 3.0, 2) for arr in data], - ], - [ - lambda col: f.list_remove_n(col, literal(3.0), literal(2)), - lambda data: [py_arr_remove(arr, 3.0, 2) for arr in data], - ], - [ - lambda col: f.array_remove_all(col, literal(3.0)), - lambda data: [py_arr_remove(arr, 3.0) for arr in data], - ], - [ - lambda col: f.list_remove_all(col, literal(3.0)), - lambda data: [py_arr_remove(arr, 3.0) for arr in data], - ], - [ - lambda col: f.array_repeat(col, literal(2)), - lambda data: [[arr] * 2 for arr in data], - ], - [ - lambda col: f.array_replace(col, literal(3.0), literal(4.0)), - lambda data: [py_arr_replace(arr, 3.0, 4.0, 1) for arr in data], - ], - [ - lambda col: f.list_replace(col, literal(3.0), literal(4.0)), - lambda data: [py_arr_replace(arr, 3.0, 4.0, 1) for arr in data], - ], - [ - lambda col: f.array_replace_n(col, literal(3.0), literal(4.0), literal(1)), - lambda data: [py_arr_replace(arr, 3.0, 4.0, 1) for arr in data], - ], - [ - lambda col: f.list_replace_n(col, literal(3.0), literal(4.0), literal(2)), - lambda data: [py_arr_replace(arr, 3.0, 4.0, 2) for arr in data], - ], - [ - lambda col: f.array_replace_all(col, literal(3.0), literal(4.0)), - lambda data: [py_arr_replace(arr, 3.0, 4.0) for arr in data], - ], - [ - lambda col: f.list_replace_all(col, literal(3.0), literal(4.0)), - lambda data: [py_arr_replace(arr, 3.0, 4.0) for arr in data], - ], - [ - lambda col: f.array_slice(col, literal(2), literal(4)), - lambda data: [arr[1:4] for arr in data], - ], - pytest.param( - lambda col: f.list_slice(col, literal(-1), literal(2)), - lambda data: [arr[-1:2] for arr in data], - ), - [ - lambda col: f.array_intersect(col, literal([3.0, 4.0])), - lambda data: [np.intersect1d(arr, [3.0, 4.0]) for arr in data], - ], - [ - lambda col: f.list_intersect(col, literal([3.0, 4.0])), - lambda data: [np.intersect1d(arr, [3.0, 4.0]) for arr in data], - ], - [ - lambda col: f.array_union(col, literal([12.0, 999.0])), - lambda data: [np.union1d(arr, [12.0, 999.0]) for arr in data], - ], - [ - lambda col: f.list_union(col, literal([12.0, 999.0])), - lambda data: [np.union1d(arr, [12.0, 999.0]) for arr in data], - ], - [ - lambda col: f.array_except(col, literal([3.0])), - lambda data: [np.setdiff1d(arr, [3.0]) for arr in data], - ], - [ - lambda col: f.list_except(col, literal([3.0])), - lambda data: [np.setdiff1d(arr, [3.0]) for arr in data], - ], - [ - lambda col: f.array_resize(col, literal(10), literal(0.0)), - lambda data: [py_arr_resize(arr, 10, 0.0) for arr in data], - ], - [ - lambda col: f.list_resize(col, literal(10), literal(0.0)), - lambda data: [py_arr_resize(arr, 10, 0.0) for arr in data], - ], - [ - lambda col: f.range(literal(1), literal(5), literal(2)), - lambda data: [np.arange(1, 5, 2)], - ], - ], -) -def test_array_functions(stmt, py_expr): - data = [[1.0, 2.0, 3.0, 3.0], [4.0, 5.0, 3.0], [6.0]] - ctx = SessionContext() - batch = pa.RecordBatch.from_arrays([np.array(data, dtype=object)], names=["arr"]) - df = ctx.create_dataframe([[batch]]) - - col = column("arr") - query_result = df.select(stmt(col)).collect()[0].column(0) - for a, b in zip(query_result, py_expr(data)): - np.testing.assert_array_almost_equal( - np.array(a.as_py(), dtype=float), np.array(b, dtype=float) - ) - - -def test_array_function_flatten(): - data = [[1.0, 2.0, 3.0, 3.0], [4.0, 5.0, 3.0], [6.0]] - ctx = SessionContext() - batch = pa.RecordBatch.from_arrays([np.array(data, dtype=object)], names=["arr"]) - df = ctx.create_dataframe([[batch]]) - - stmt = f.flatten(literal(data)) - py_expr = [py_flatten(data)] - query_result = df.select(stmt).collect()[0].column(0) - for a, b in zip(query_result, py_expr): - np.testing.assert_array_almost_equal( - np.array(a.as_py(), dtype=float), np.array(b, dtype=float) - ) - - -@pytest.mark.parametrize( - ("stmt", "py_expr"), - [ - [ - f.array_to_string(column("arr"), literal(",")), - lambda data: [",".join([str(int(v)) for v in r]) for r in data], - ], - [ - f.array_join(column("arr"), literal(",")), - lambda data: [",".join([str(int(v)) for v in r]) for r in data], - ], - [ - f.list_to_string(column("arr"), literal(",")), - lambda data: [",".join([str(int(v)) for v in r]) for r in data], - ], - [ - f.list_join(column("arr"), literal(",")), - lambda data: [",".join([str(int(v)) for v in r]) for r in data], - ], - ], -) -def test_array_function_obj_tests(stmt, py_expr): - data = [[1.0, 2.0, 3.0, 3.0], [4.0, 5.0, 3.0], [6.0]] - ctx = SessionContext() - batch = pa.RecordBatch.from_arrays([np.array(data, dtype=object)], names=["arr"]) - df = ctx.create_dataframe([[batch]]) - query_result = np.array(df.select(stmt).collect()[0].column(0)) - for a, b in zip(query_result, py_expr(data)): - assert a == b - - -def test_string_functions(df): - df = df.select( - f.ascii(column("a")), - f.bit_length(column("a")), - f.btrim(literal(" World ")), - f.character_length(column("a")), - f.chr(literal(68)), - f.concat_ws("-", column("a"), literal("test")), - f.concat(column("a"), literal("?")), - f.initcap(column("c")), - f.left(column("a"), literal(3)), - f.length(column("c")), - f.lower(column("a")), - f.lpad(column("a"), literal(7)), - f.ltrim(column("c")), - f.md5(column("a")), - f.octet_length(column("a")), - f.repeat(column("a"), literal(2)), - f.replace(column("a"), literal("l"), literal("?")), - f.reverse(column("a")), - f.right(column("a"), literal(4)), - f.rpad(column("a"), literal(8)), - f.rtrim(column("c")), - f.split_part(column("a"), literal("l"), literal(1)), - f.starts_with(column("a"), literal("Wor")), - f.strpos(column("a"), literal("o")), - f.substr(column("a"), literal(3)), - f.translate(column("a"), literal("or"), literal("ld")), - f.trim(column("c")), - f.upper(column("c")), - f.ends_with(column("a"), literal("llo")), - ) - result = df.collect() - assert len(result) == 1 - result = result[0] - assert result.column(0) == pa.array( - [72, 87, 33], type=pa.int32() - ) # H = 72; W = 87; ! = 33 - assert result.column(1) == pa.array([40, 40, 8], type=pa.int32()) - assert result.column(2) == pa.array(["World", "World", "World"]) - assert result.column(3) == pa.array([5, 5, 1], type=pa.int32()) - assert result.column(4) == pa.array(["D", "D", "D"]) - assert result.column(5) == pa.array(["Hello-test", "World-test", "!-test"]) - assert result.column(6) == pa.array(["Hello?", "World?", "!?"]) - assert result.column(7) == pa.array(["Hello ", " World ", " !"]) - assert result.column(8) == pa.array(["Hel", "Wor", "!"]) - assert result.column(9) == pa.array([6, 7, 2], type=pa.int32()) - assert result.column(10) == pa.array(["hello", "world", "!"]) - assert result.column(11) == pa.array([" Hello", " World", " !"]) - assert result.column(12) == pa.array(["hello ", "world ", "!"]) - assert result.column(13) == pa.array( - [ - "8b1a9953c4611296a827abf8c47804d7", - "f5a7924e621e84c9280a9a27e1bcb7f6", - "9033e0e305f247c0c3c80d0c7848c8b3", - ] - ) - assert result.column(14) == pa.array([5, 5, 1], type=pa.int32()) - assert result.column(15) == pa.array(["HelloHello", "WorldWorld", "!!"]) - assert result.column(16) == pa.array(["He??o", "Wor?d", "!"]) - assert result.column(17) == pa.array(["olleH", "dlroW", "!"]) - assert result.column(18) == pa.array(["ello", "orld", "!"]) - assert result.column(19) == pa.array(["Hello ", "World ", "! "]) - assert result.column(20) == pa.array(["hello", " world", " !"]) - assert result.column(21) == pa.array(["He", "Wor", "!"]) - assert result.column(22) == pa.array([False, True, False]) - assert result.column(23) == pa.array([5, 2, 0], type=pa.int32()) - assert result.column(24) == pa.array(["llo", "rld", ""]) - assert result.column(25) == pa.array(["Helll", "Wldld", "!"]) - assert result.column(26) == pa.array(["hello", "world", "!"]) - assert result.column(27) == pa.array(["HELLO ", " WORLD ", " !"]) - assert result.column(28) == pa.array([True, False, False]) - - -def test_hash_functions(df): - exprs = [ - f.digest(column("a"), literal(m)) - for m in ( - "md5", - "sha224", - "sha256", - "sha384", - "sha512", - "blake2s", - "blake3", - ) - ] - df = df.select( - *exprs, - f.sha224(column("a")), - f.sha256(column("a")), - f.sha384(column("a")), - f.sha512(column("a")), - ) - result = df.collect() - assert len(result) == 1 - result = result[0] - b = bytearray.fromhex - assert result.column(0) == pa.array( - [ - b("8B1A9953C4611296A827ABF8C47804D7"), - b("F5A7924E621E84C9280A9A27E1BCB7F6"), - b("9033E0E305F247C0C3C80D0C7848C8B3"), - ] - ) - assert result.column(1) == pa.array( - [ - b("4149DA18AA8BFC2B1E382C6C26556D01A92C261B6436DAD5E3BE3FCC"), - b("12972632B6D3B6AA52BD6434552F08C1303D56B817119406466E9236"), - b("6641A7E8278BCD49E476E7ACAE158F4105B2952D22AEB2E0B9A231A0"), - ] - ) - assert result.column(2) == pa.array( - [ - b("185F8DB32271FE25F561A6FC938B2E26" "4306EC304EDA518007D1764826381969"), - b("78AE647DC5544D227130A0682A51E30B" "C7777FBB6D8A8F17007463A3ECD1D524"), - b("BB7208BC9B5D7C04F1236A82A0093A5E" "33F40423D5BA8D4266F7092C3BA43B62"), - ] - ) - assert result.column(3) == pa.array( - [ - b( - "3519FE5AD2C596EFE3E276A6F351B8FC" - "0B03DB861782490D45F7598EBD0AB5FD" - "5520ED102F38C4A5EC834E98668035FC" - ), - b( - "ED7CED84875773603AF90402E42C65F3" - "B48A5E77F84ADC7A19E8F3E8D3101010" - "22F552AEC70E9E1087B225930C1D260A" - ), - b( - "1D0EC8C84EE9521E21F06774DE232367" - "B64DE628474CB5B2E372B699A1F55AE3" - "35CC37193EF823E33324DFD9A70738A6" - ), - ] - ) - assert result.column(4) == pa.array( - [ - b( - "3615F80C9D293ED7402687F94B22D58E" - "529B8CC7916F8FAC7FDDF7FBD5AF4CF7" - "77D3D795A7A00A16BF7E7F3FB9561EE9" - "BAAE480DA9FE7A18769E71886B03F315" - ), - b( - "8EA77393A42AB8FA92500FB077A9509C" - "C32BC95E72712EFA116EDAF2EDFAE34F" - "BB682EFDD6C5DD13C117E08BD4AAEF71" - "291D8AACE2F890273081D0677C16DF0F" - ), - b( - "3831A6A6155E509DEE59A7F451EB3532" - "4D8F8F2DF6E3708894740F98FDEE2388" - "9F4DE5ADB0C5010DFB555CDA77C8AB5D" - "C902094C52DE3278F35A75EBC25F093A" - ), - ] - ) - assert result.column(5) == pa.array( - [ - b("F73A5FBF881F89B814871F46E26AD3FA" "37CB2921C5E8561618639015B3CCBB71"), - b("B792A0383FB9E7A189EC150686579532" "854E44B71AC394831DAED169BA85CCC5"), - b("27988A0E51812297C77A433F63523334" "6AEE29A829DCF4F46E0F58F402C6CFCB"), - ] - ) - assert result.column(6) == pa.array( - [ - b("FBC2B0516EE8744D293B980779178A35" "08850FDCFE965985782C39601B65794F"), - b("BF73D18575A736E4037D45F9E316085B" "86C19BE6363DE6AA789E13DEAACC1C4E"), - b("C8D11B9F7237E4034ADBCD2005735F9B" "C4C597C75AD89F4492BEC8F77D15F7EB"), - ] - ) - assert result.column(7) == result.column(1) # SHA-224 - assert result.column(8) == result.column(2) # SHA-256 - assert result.column(9) == result.column(3) # SHA-384 - assert result.column(10) == result.column(4) # SHA-512 - - -def test_temporal_functions(df): - df = df.select( - f.date_part(literal("month"), column("d")), - f.datepart(literal("year"), column("d")), - f.date_trunc(literal("month"), column("d")), - f.datetrunc(literal("day"), column("d")), - f.date_bin( - literal("15 minutes"), - column("d"), - literal("2001-01-01 00:02:30"), - ), - f.from_unixtime(literal(1673383974)), - f.to_timestamp(literal("2023-09-07 05:06:14.523952")), - f.to_timestamp_seconds(literal("2023-09-07 05:06:14.523952")), - f.to_timestamp_millis(literal("2023-09-07 05:06:14.523952")), - f.to_timestamp_micros(literal("2023-09-07 05:06:14.523952")), - ) - result = df.collect() - assert len(result) == 1 - result = result[0] - assert result.column(0) == pa.array([12, 6, 7], type=pa.float64()) - assert result.column(1) == pa.array([2022, 2027, 2020], type=pa.float64()) - assert result.column(2) == pa.array( - [datetime(2022, 12, 1), datetime(2027, 6, 1), datetime(2020, 7, 1)], - type=pa.timestamp("us"), - ) - assert result.column(3) == pa.array( - [datetime(2022, 12, 31), datetime(2027, 6, 26), datetime(2020, 7, 2)], - type=pa.timestamp("us"), - ) - assert result.column(4) == pa.array( - [ - datetime(2022, 12, 30, 23, 47, 30), - datetime(2027, 6, 25, 23, 47, 30), - datetime(2020, 7, 1, 23, 47, 30), - ], - type=pa.timestamp("ns"), - ) - assert result.column(5) == pa.array( - [datetime(2023, 1, 10, 20, 52, 54)] * 3, type=pa.timestamp("s") - ) - assert result.column(6) == pa.array( - [datetime(2023, 9, 7, 5, 6, 14, 523952)] * 3, type=pa.timestamp("ns") - ) - assert result.column(7) == pa.array( - [datetime(2023, 9, 7, 5, 6, 14)] * 3, type=pa.timestamp("s") - ) - assert result.column(8) == pa.array( - [datetime(2023, 9, 7, 5, 6, 14, 523000)] * 3, type=pa.timestamp("ms") - ) - assert result.column(9) == pa.array( - [datetime(2023, 9, 7, 5, 6, 14, 523952)] * 3, type=pa.timestamp("us") - ) - - -def test_case(df): - df = df.select( - f.case(column("b")).when(literal(4), literal(10)).otherwise(literal(8)), - f.case(column("a")) - .when(literal("Hello"), literal("Hola")) - .when(literal("World"), literal("Mundo")) - .otherwise(literal("!!")), - f.case(column("a")) - .when(literal("Hello"), literal("Hola")) - .when(literal("World"), literal("Mundo")) - .end(), - ) - - result = df.collect() - result = result[0] - assert result.column(0) == pa.array([10, 8, 8]) - assert result.column(1) == pa.array(["Hola", "Mundo", "!!"]) - assert result.column(2) == pa.array(["Hola", "Mundo", None]) - - -def test_regr_funcs(df): - # test case base on - # https://github.com/apache/arrow-datafusion/blob/d1361d56b9a9e0c165d3d71a8df6795d2a5f51dd/datafusion/core/tests/sqllogictests/test_files/aggregate.slt#L2330 - ctx = SessionContext() - result = ctx.sql( - "select regr_slope(1,1), regr_intercept(1,1), " - "regr_count(1,1), regr_r2(1,1), regr_avgx(1,1), " - "regr_avgy(1,1), regr_sxx(1,1), regr_syy(1,1), " - "regr_sxy(1,1);" - ).collect() - - assert result[0].column(0) == pa.array([None], type=pa.float64()) - assert result[0].column(1) == pa.array([None], type=pa.float64()) - assert result[0].column(2) == pa.array([1], type=pa.float64()) - assert result[0].column(3) == pa.array([None], type=pa.float64()) - assert result[0].column(4) == pa.array([1], type=pa.float64()) - assert result[0].column(5) == pa.array([1], type=pa.float64()) - assert result[0].column(6) == pa.array([0], type=pa.float64()) - assert result[0].column(7) == pa.array([0], type=pa.float64()) - assert result[0].column(8) == pa.array([0], type=pa.float64()) - - -def test_first_last_value(df): - df = df.aggregate( - [], - [ - f.first_value(column("a")), - f.first_value(column("b")), - f.first_value(column("d")), - f.last_value(column("a")), - f.last_value(column("b")), - f.last_value(column("d")), - ], - ) - - result = df.collect() - result = result[0] - assert result.column(0) == pa.array(["Hello"]) - assert result.column(1) == pa.array([4]) - assert result.column(2) == pa.array([datetime(2022, 12, 31)]) - assert result.column(3) == pa.array(["!"]) - assert result.column(4) == pa.array([6]) - assert result.column(5) == pa.array([datetime(2020, 7, 2)]) - - -def test_binary_string_functions(df): - df = df.select( - f.encode(column("a"), literal("base64")), - f.decode(f.encode(column("a"), literal("base64")), literal("base64")), - ) - result = df.collect() - assert len(result) == 1 - result = result[0] - assert result.column(0) == pa.array(["SGVsbG8", "V29ybGQ", "IQ"]) - assert pa.array(result.column(1)).cast(pa.string()) == pa.array( - ["Hello", "World", "!"] - ) diff --git a/python/datafusion/tests/test_substrait.py b/python/datafusion/tests/test_substrait.py deleted file mode 100644 index 62f6413a3..000000000 --- a/python/datafusion/tests/test_substrait.py +++ /dev/null @@ -1,51 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -import pyarrow as pa - -from datafusion import SessionContext -from datafusion import substrait as ss -import pytest - - -@pytest.fixture -def ctx(): - return SessionContext() - - -def test_substrait_serialization(ctx): - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 5, 6])], - names=["a", "b"], - ) - - ctx.register_record_batches("t", [[batch]]) - - assert ctx.tables() == {"t"} - - # For now just make sure the method calls blow up - substrait_plan = ss.substrait.serde.serialize_to_plan("SELECT * FROM t", ctx) - substrait_bytes = substrait_plan.encode() - assert isinstance(substrait_bytes, bytes) - substrait_bytes = ss.substrait.serde.serialize_bytes("SELECT * FROM t", ctx) - substrait_plan = ss.substrait.serde.deserialize_bytes(substrait_bytes) - logical_plan = ss.substrait.consumer.from_substrait_plan(ctx, substrait_plan) - - # demonstrate how to create a DataFrame from a deserialized logical plan - df = ctx.create_dataframe_from_logical_plan(logical_plan) - - substrait_plan = ss.substrait.producer.to_substrait_plan(df.logical_plan(), ctx) diff --git a/python/datafusion/tests/test_udaf.py b/python/datafusion/tests/test_udaf.py deleted file mode 100644 index c2b29d199..000000000 --- a/python/datafusion/tests/test_udaf.py +++ /dev/null @@ -1,136 +0,0 @@ -# Licensed to the Apache Software Foundation (ASF) under one -# or more contributor license agreements. See the NOTICE file -# distributed with this work for additional information -# regarding copyright ownership. The ASF licenses this file -# to you under the Apache License, Version 2.0 (the -# "License"); you may not use this file except in compliance -# with the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, -# software distributed under the License is distributed on an -# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -# KIND, either express or implied. See the License for the -# specific language governing permissions and limitations -# under the License. - -from typing import List - -import pyarrow as pa -import pyarrow.compute as pc -import pytest - -from datafusion import Accumulator, SessionContext, column, udaf - - -class Summarize(Accumulator): - """ - Interface of a user-defined accumulation. - """ - - def __init__(self): - self._sum = pa.scalar(0.0) - - def state(self) -> List[pa.Scalar]: - return [self._sum] - - def update(self, values: pa.Array) -> None: - # Not nice since pyarrow scalars can't be summed yet. - # This breaks on `None` - self._sum = pa.scalar(self._sum.as_py() + pc.sum(values).as_py()) - - def merge(self, states: pa.Array) -> None: - # Not nice since pyarrow scalars can't be summed yet. - # This breaks on `None` - self._sum = pa.scalar(self._sum.as_py() + pc.sum(states).as_py()) - - def evaluate(self) -> pa.Scalar: - return self._sum - - -class NotSubclassOfAccumulator: - pass - - -class MissingMethods(Accumulator): - def __init__(self): - self._sum = pa.scalar(0) - - def state(self) -> List[pa.Scalar]: - return [self._sum] - - -@pytest.fixture -def df(): - ctx = SessionContext() - - # create a RecordBatch and a new DataFrame from it - batch = pa.RecordBatch.from_arrays( - [pa.array([1, 2, 3]), pa.array([4, 4, 6])], - names=["a", "b"], - ) - return ctx.create_dataframe([[batch]]) - - -@pytest.mark.skip(reason="df.collect() will hang, need more investigations") -def test_errors(df): - with pytest.raises(TypeError): - udaf( - NotSubclassOfAccumulator, - pa.float64(), - pa.float64(), - [pa.float64()], - volatility="immutable", - ) - - accum = udaf( - MissingMethods, - pa.int64(), - pa.int64(), - [pa.int64()], - volatility="immutable", - ) - df = df.aggregate([], [accum(column("a"))]) - - msg = ( - "Can't instantiate abstract class MissingMethods with abstract " - "methods evaluate, merge, update" - ) - with pytest.raises(Exception, match=msg): - df.collect() - - -def test_aggregate(df): - summarize = udaf( - Summarize, - pa.float64(), - pa.float64(), - [pa.float64()], - volatility="immutable", - ) - - df = df.aggregate([], [summarize(column("a"))]) - - # execute and collect the first (and only) batch - result = df.collect()[0] - - assert result.column(0) == pa.array([1.0 + 2.0 + 3.0]) - - -def test_group_by(df): - summarize = udaf( - Summarize, - pa.float64(), - pa.float64(), - [pa.float64()], - volatility="immutable", - ) - - df = df.aggregate([column("b")], [summarize(column("a"))]) - - batches = df.collect() - - arrays = [batch.column(1) for batch in batches] - joined = pa.concat_arrays(arrays) - assert joined == pa.array([1.0 + 2.0, 3.0]) diff --git a/conda/recipes/bld.bat b/python/datafusion/udf.py similarity index 71% rename from conda/recipes/bld.bat rename to python/datafusion/udf.py index 90626a637..c7265fa09 100644 --- a/conda/recipes/bld.bat +++ b/python/datafusion/udf.py @@ -1,4 +1,3 @@ -# # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information @@ -15,12 +14,16 @@ # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. -# -maturin build -vv -j %CPU_COUNT% --release --strip --features substrait --manylinux off --interpreter=%PYTHON% +"""Deprecated module for user defined functions.""" -FOR /F "delims=" %%i IN ('dir /s /b target\wheels\*.whl') DO set datafusion_wheel=%%i +import warnings -%PYTHON% -m pip install --no-deps %datafusion_wheel% -vv +from datafusion.user_defined import * # noqa: F403 -cargo-bundle-licenses --format yaml --output THIRDPARTY.yml +warnings.warn( + "The module 'udf' is deprecated and will be removed in the next release. " + "Please use 'user_defined' instead.", + DeprecationWarning, + stacklevel=2, +) diff --git a/python/datafusion/unparser.py b/python/datafusion/unparser.py new file mode 100644 index 000000000..7ca5b9190 --- /dev/null +++ b/python/datafusion/unparser.py @@ -0,0 +1,80 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""This module provides support for unparsing datafusion plans to SQL. + +For additional information about unparsing, see https://docs.rs/datafusion-sql/latest/datafusion_sql/unparser/index.html +""" + +from ._internal import unparser as unparser_internal +from .plan import LogicalPlan + + +class Dialect: + """DataFusion data catalog.""" + + def __init__(self, dialect: unparser_internal.Dialect) -> None: + """This constructor is not typically called by the end user.""" + self.dialect = dialect + + @staticmethod + def default() -> "Dialect": + """Create a new default dialect.""" + return Dialect(unparser_internal.Dialect.default()) + + @staticmethod + def mysql() -> "Dialect": + """Create a new MySQL dialect.""" + return Dialect(unparser_internal.Dialect.mysql()) + + @staticmethod + def postgres() -> "Dialect": + """Create a new PostgreSQL dialect.""" + return Dialect(unparser_internal.Dialect.postgres()) + + @staticmethod + def sqlite() -> "Dialect": + """Create a new SQLite dialect.""" + return Dialect(unparser_internal.Dialect.sqlite()) + + @staticmethod + def duckdb() -> "Dialect": + """Create a new DuckDB dialect.""" + return Dialect(unparser_internal.Dialect.duckdb()) + + +class Unparser: + """DataFusion unparser.""" + + def __init__(self, dialect: Dialect) -> None: + """This constructor is not typically called by the end user.""" + self.unparser = unparser_internal.Unparser(dialect.dialect) + + def plan_to_sql(self, plan: LogicalPlan) -> str: + """Convert a logical plan to a SQL string.""" + return self.unparser.plan_to_sql(plan._raw_plan) + + def with_pretty(self, pretty: bool) -> "Unparser": + """Set the pretty flag.""" + self.unparser = self.unparser.with_pretty(pretty) + return self + + +__all__ = [ + "Dialect", + "Unparser", +] diff --git a/python/datafusion/user_defined.py b/python/datafusion/user_defined.py new file mode 100644 index 000000000..eef23e741 --- /dev/null +++ b/python/datafusion/user_defined.py @@ -0,0 +1,1044 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +"""Provides the user-defined functions for evaluation of dataframes.""" + +from __future__ import annotations + +import functools +from abc import ABCMeta, abstractmethod +from enum import Enum +from typing import TYPE_CHECKING, Any, Protocol, TypeGuard, TypeVar, cast, overload + +import pyarrow as pa + +import datafusion._internal as df_internal +from datafusion import SessionContext +from datafusion.expr import Expr + +if TYPE_CHECKING: + from _typeshed import CapsuleType as _PyCapsule + + _R = TypeVar("_R", bound=pa.DataType) + from collections.abc import Callable, Sequence + + +class Volatility(Enum): + """Defines how stable or volatile a function is. + + When setting the volatility of a function, you can either pass this + enumeration or a ``str``. The ``str`` equivalent is the lower case value of the + name (`"immutable"`, `"stable"`, or `"volatile"`). + """ + + Immutable = 1 + """An immutable function will always return the same output when given the + same input. + + DataFusion will attempt to inline immutable functions during planning. + """ + + Stable = 2 + """ + Returns the same value for a given input within a single queries. + + A stable function may return different values given the same input across + different queries but must return the same value for a given input within a + query. An example of this is the ``Now`` function. DataFusion will attempt to + inline ``Stable`` functions during planning, when possible. For query + ``select col1, now() from t1``, it might take a while to execute but ``now()`` + column will be the same for each output row, which is evaluated during + planning. + """ + + Volatile = 3 + """A volatile function may change the return value from evaluation to + evaluation. + + Multiple invocations of a volatile function may return different results + when used in the same query. An example of this is the random() function. + DataFusion can not evaluate such functions during planning. In the query + ``select col1, random() from t1``, ``random()`` function will be evaluated + for each output row, resulting in a unique random value for each row. + """ + + def __str__(self) -> str: + """Returns the string equivalent.""" + return self.name.lower() + + +def data_type_or_field_to_field(value: pa.DataType | pa.Field, name: str) -> pa.Field: + """Helper function to return a Field from either a Field or DataType.""" + if isinstance(value, pa.Field): + return value + return pa.field(name, type=value) + + +def data_types_or_fields_to_field_list( + inputs: Sequence[pa.Field | pa.DataType] | pa.Field | pa.DataType, +) -> list[pa.Field]: + """Helper function to return a list of Fields.""" + if isinstance(inputs, pa.DataType): + return [pa.field("value", type=inputs)] + if isinstance(inputs, pa.Field): + return [inputs] + + return [ + data_type_or_field_to_field(v, f"value_{idx}") for (idx, v) in enumerate(inputs) + ] + + +class ScalarUDFExportable(Protocol): + """Type hint for object that has __datafusion_scalar_udf__ PyCapsule.""" + + def __datafusion_scalar_udf__(self) -> object: ... # noqa: D105 + + +def _is_pycapsule(value: object) -> TypeGuard[_PyCapsule]: + """Return ``True`` when ``value`` is a CPython ``PyCapsule``.""" + return value.__class__.__name__ == "PyCapsule" + + +class ScalarUDF: + """Class for performing scalar user-defined functions (UDF). + + Scalar UDFs operate on a row by row basis. See also :py:class:`AggregateUDF` for + operating on a group of rows. + """ + + def __init__( + self, + name: str, + func: Callable[..., _R], + input_fields: list[pa.Field], + return_field: _R, + volatility: Volatility | str, + ) -> None: + """Instantiate a scalar user-defined function (UDF). + + See helper method :py:func:`udf` for argument details. + """ + if hasattr(func, "__datafusion_scalar_udf__"): + self._udf = df_internal.ScalarUDF.from_pycapsule(func) + return + if isinstance(input_fields, pa.DataType): + input_fields = [input_fields] + self._udf = df_internal.ScalarUDF( + name, func, input_fields, return_field, str(volatility) + ) + + def __repr__(self) -> str: + """Print a string representation of the Scalar UDF.""" + return self._udf.__repr__() + + def __call__(self, *args: Expr) -> Expr: + """Execute the UDF. + + This function is not typically called by an end user. These calls will + occur during the evaluation of the dataframe. + """ + args_raw = [arg.expr for arg in args] + return Expr(self._udf.__call__(*args_raw)) + + @overload + @staticmethod + def udf( + input_fields: Sequence[pa.DataType | pa.Field] | pa.DataType | pa.Field, + return_field: pa.DataType | pa.Field, + volatility: Volatility | str, + name: str | None = None, + ) -> Callable[..., ScalarUDF]: ... + + @overload + @staticmethod + def udf( + func: Callable[..., _R], + input_fields: Sequence[pa.DataType | pa.Field] | pa.DataType | pa.Field, + return_field: pa.DataType | pa.Field, + volatility: Volatility | str, + name: str | None = None, + ) -> ScalarUDF: ... + + @overload + @staticmethod + def udf(func: ScalarUDFExportable) -> ScalarUDF: ... + + @staticmethod + def udf(*args: Any, **kwargs: Any): # noqa: D417 + """Create a new User-Defined Function (UDF). + + This class can be used both as either a function or a decorator. + + Usage: + - As a function: ``udf(func, input_fields, return_field, volatility, name)``. + - As a decorator: ``@udf(input_fields, return_field, volatility, name)``. + When used a decorator, do **not** pass ``func`` explicitly. + + In lieu of passing a PyArrow Field, you can pass a DataType for simplicity. + When you do so, it will be assumed that the nullability of the inputs and + output are True and that they have no metadata. + + Args: + func (Callable, optional): Only needed when calling as a function. + Skip this argument when using `udf` as a decorator. If you have a Rust + backed ScalarUDF within a PyCapsule, you can pass this parameter + and ignore the rest. They will be determined directly from the + underlying function. See the online documentation for more information. + input_fields (list[pa.Field | pa.DataType]): The data types or Fields + of the arguments to ``func``. This list must be of the same length + as the number of arguments. + return_field (_R): The field of the return value from the function. + volatility (Volatility | str): See `Volatility` for allowed values. + name (Optional[str]): A descriptive name for the function. + + Returns: + A user-defined function that can be used in SQL expressions, + data aggregation, or window function calls. + + Example: Using ``udf`` as a function:: + + def double_func(x): + return x * 2 + double_udf = udf(double_func, [pa.int32()], pa.int32(), + "volatile", "double_it") + + Example: Using ``udf`` as a decorator:: + + @udf([pa.int32()], pa.int32(), "volatile", "double_it") + def double_udf(x): + return x * 2 + """ # noqa: W505 E501 + + def _function( + func: Callable[..., _R], + input_fields: Sequence[pa.DataType | pa.Field] | pa.DataType | pa.Field, + return_field: pa.DataType | pa.Field, + volatility: Volatility | str, + name: str | None = None, + ) -> ScalarUDF: + if not callable(func): + msg = "`func` argument must be callable" + raise TypeError(msg) + if name is None: + if hasattr(func, "__qualname__"): + name = func.__qualname__.lower() + else: + name = func.__class__.__name__.lower() + input_fields = data_types_or_fields_to_field_list(input_fields) + return_field = data_type_or_field_to_field(return_field, "value") + return ScalarUDF( + name=name, + func=func, + input_fields=input_fields, + return_field=return_field, + volatility=volatility, + ) + + def _decorator( + input_fields: Sequence[pa.DataType | pa.Field] | pa.DataType | pa.Field, + return_field: _R, + volatility: Volatility | str, + name: str | None = None, + ) -> Callable: + def decorator(func: Callable) -> Callable: + udf_caller = ScalarUDF.udf( + func, input_fields, return_field, volatility, name + ) + + @functools.wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Callable: + return udf_caller(*args, **kwargs) + + return wrapper + + return decorator + + if hasattr(args[0], "__datafusion_scalar_udf__"): + return ScalarUDF.from_pycapsule(args[0]) + + if args and callable(args[0]): + # Case 1: Used as a function, require the first parameter to be callable + return _function(*args, **kwargs) + # Case 2: Used as a decorator with parameters + return _decorator(*args, **kwargs) + + @staticmethod + def from_pycapsule(func: ScalarUDFExportable) -> ScalarUDF: + """Create a Scalar UDF from ScalarUDF PyCapsule object. + + This function will instantiate a Scalar UDF that uses a DataFusion + ScalarUDF that is exported via the FFI bindings. + """ + name = str(func.__class__) + return ScalarUDF( + name=name, + func=func, + input_fields=None, + return_field=None, + volatility=None, + ) + + +class Accumulator(metaclass=ABCMeta): + """Defines how an :py:class:`AggregateUDF` accumulates values.""" + + @abstractmethod + def state(self) -> list[pa.Scalar]: + """Return the current state. + + While this function template expects PyArrow Scalar values return type, + you can return any value that can be converted into a Scalar. This + includes basic Python data types such as integers and strings. In + addition to primitive types, we currently support PyArrow, nanoarrow, + and arro3 objects in addition to primitive data types. Other objects + that support the Arrow FFI standard will be given a "best attempt" at + conversion to scalar objects. + """ + + @abstractmethod + def update(self, *values: pa.Array) -> None: + """Evaluate an array of values and update state.""" + + @abstractmethod + def merge(self, states: list[pa.Array]) -> None: + """Merge a set of states.""" + + @abstractmethod + def evaluate(self) -> pa.Scalar: + """Return the resultant value. + + While this function template expects a PyArrow Scalar value return type, + you can return any value that can be converted into a Scalar. This + includes basic Python data types such as integers and strings. In + addition to primitive types, we currently support PyArrow, nanoarrow, + and arro3 objects in addition to primitive data types. Other objects + that support the Arrow FFI standard will be given a "best attempt" at + conversion to scalar objects. + """ + + +class AggregateUDFExportable(Protocol): + """Type hint for object that has __datafusion_aggregate_udf__ PyCapsule.""" + + def __datafusion_aggregate_udf__(self) -> object: ... # noqa: D105 + + +class AggregateUDF: + """Class for performing scalar user-defined functions (UDF). + + Aggregate UDFs operate on a group of rows and return a single value. See + also :py:class:`ScalarUDF` for operating on a row by row basis. + """ + + @overload + def __init__( + self, + name: str, + accumulator: Callable[[], Accumulator], + input_types: list[pa.DataType], + return_type: pa.DataType, + state_type: list[pa.DataType], + volatility: Volatility | str, + ) -> None: ... + + @overload + def __init__( + self, + name: str, + accumulator: AggregateUDFExportable, + input_types: None = ..., + return_type: None = ..., + state_type: None = ..., + volatility: None = ..., + ) -> None: ... + + def __init__( + self, + name: str, + accumulator: Callable[[], Accumulator] | AggregateUDFExportable, + input_types: list[pa.DataType] | None, + return_type: pa.DataType | None, + state_type: list[pa.DataType] | None, + volatility: Volatility | str | None, + ) -> None: + """Instantiate a user-defined aggregate function (UDAF). + + See :py:func:`udaf` for a convenience function and argument + descriptions. + """ + if hasattr(accumulator, "__datafusion_aggregate_udf__"): + self._udaf = df_internal.AggregateUDF.from_pycapsule(accumulator) + return + if ( + input_types is None + or return_type is None + or state_type is None + or volatility is None + ): + msg = ( + "`input_types`, `return_type`, `state_type`, and `volatility` " + "must be provided when `accumulator` is callable." + ) + raise TypeError(msg) + + self._udaf = df_internal.AggregateUDF( + name, + accumulator, + input_types, + return_type, + state_type, + str(volatility), + ) + + def __repr__(self) -> str: + """Print a string representation of the Aggregate UDF.""" + return self._udaf.__repr__() + + def __call__(self, *args: Expr) -> Expr: + """Execute the UDAF. + + This function is not typically called by an end user. These calls will + occur during the evaluation of the dataframe. + """ + args_raw = [arg.expr for arg in args] + return Expr(self._udaf.__call__(*args_raw)) + + @overload + @staticmethod + def udaf( + input_types: pa.DataType | list[pa.DataType], + return_type: pa.DataType, + state_type: list[pa.DataType], + volatility: Volatility | str, + name: str | None = None, + ) -> Callable[..., AggregateUDF]: ... + + @overload + @staticmethod + def udaf( + accum: Callable[[], Accumulator], + input_types: pa.DataType | list[pa.DataType], + return_type: pa.DataType, + state_type: list[pa.DataType], + volatility: Volatility | str, + name: str | None = None, + ) -> AggregateUDF: ... + + @overload + @staticmethod + def udaf(accum: AggregateUDFExportable) -> AggregateUDF: ... + + @overload + @staticmethod + def udaf(accum: _PyCapsule) -> AggregateUDF: ... + + @staticmethod + def udaf(*args: Any, **kwargs: Any): # noqa: D417, C901 + """Create a new User-Defined Aggregate Function (UDAF). + + This class allows you to define an aggregate function that can be used in + data aggregation or window function calls. + + Usage: + - As a function: ``udaf(accum, input_types, return_type, state_type, volatility, name)``. + - As a decorator: ``@udaf(input_types, return_type, state_type, volatility, name)``. + When using ``udaf`` as a decorator, do not pass ``accum`` explicitly. + + Function example: + + If your :py:class:`Accumulator` can be instantiated with no arguments, you + can simply pass it's type as `accum`. If you need to pass additional + arguments to it's constructor, you can define a lambda or a factory method. + During runtime the :py:class:`Accumulator` will be constructed for every + instance in which this UDAF is used. The following examples are all valid:: + + import pyarrow as pa + import pyarrow.compute as pc + + class Summarize(Accumulator): + def __init__(self, bias: float = 0.0): + self._sum = pa.scalar(bias) + + def state(self) -> list[pa.Scalar]: + return [self._sum] + + def update(self, values: pa.Array) -> None: + self._sum = pa.scalar(self._sum.as_py() + pc.sum(values).as_py()) + + def merge(self, states: list[pa.Array]) -> None: + self._sum = pa.scalar(self._sum.as_py() + pc.sum(states[0]).as_py()) + + def evaluate(self) -> pa.Scalar: + return self._sum + + def sum_bias_10() -> Summarize: + return Summarize(10.0) + + udaf1 = udaf(Summarize, pa.float64(), pa.float64(), [pa.float64()], + "immutable") + udaf2 = udaf(sum_bias_10, pa.float64(), pa.float64(), [pa.float64()], + "immutable") + udaf3 = udaf(lambda: Summarize(20.0), pa.float64(), pa.float64(), + [pa.float64()], "immutable") + + Decorator example::: + + @udaf(pa.float64(), pa.float64(), [pa.float64()], "immutable") + def udf4() -> Summarize: + return Summarize(10.0) + + Args: + accum: The accumulator python function. Only needed when calling as a + function. Skip this argument when using ``udaf`` as a decorator. + If you have a Rust backed AggregateUDF within a PyCapsule, you can + pass this parameter and ignore the rest. They will be determined + directly from the underlying function. See the online documentation + for more information. + input_types: The data types of the arguments to ``accum``. + return_type: The data type of the return value. + state_type: The data types of the intermediate accumulation. + volatility: See :py:class:`Volatility` for allowed values. + name: A descriptive name for the function. + + Returns: + A user-defined aggregate function, which can be used in either data + aggregation or window function calls. + """ # noqa: E501 W505 + + def _function( + accum: Callable[[], Accumulator], + input_types: pa.DataType | list[pa.DataType], + return_type: pa.DataType, + state_type: list[pa.DataType], + volatility: Volatility | str, + name: str | None = None, + ) -> AggregateUDF: + if not callable(accum): + msg = "`func` must be callable." + raise TypeError(msg) + if not isinstance(accum(), Accumulator): + msg = "Accumulator must implement the abstract base class Accumulator" + raise TypeError(msg) + if name is None: + name = accum().__class__.__qualname__.lower() + if isinstance(input_types, pa.DataType): + input_types = [input_types] + return AggregateUDF( + name=name, + accumulator=accum, + input_types=input_types, + return_type=return_type, + state_type=state_type, + volatility=volatility, + ) + + def _decorator( + input_types: pa.DataType | list[pa.DataType], + return_type: pa.DataType, + state_type: list[pa.DataType], + volatility: Volatility | str, + name: str | None = None, + ) -> Callable[..., Callable[..., Expr]]: + def decorator(accum: Callable[[], Accumulator]) -> Callable[..., Expr]: + udaf_caller = AggregateUDF.udaf( + accum, input_types, return_type, state_type, volatility, name + ) + + @functools.wraps(accum) + def wrapper(*args: Any, **kwargs: Any) -> Expr: + return udaf_caller(*args, **kwargs) + + return wrapper + + return decorator + + if hasattr(args[0], "__datafusion_aggregate_udf__") or _is_pycapsule(args[0]): + return AggregateUDF.from_pycapsule(args[0]) + + if args and callable(args[0]): + # Case 1: Used as a function, require the first parameter to be callable + return _function(*args, **kwargs) + # Case 2: Used as a decorator with parameters + return _decorator(*args, **kwargs) + + @staticmethod + def from_pycapsule(func: AggregateUDFExportable | _PyCapsule) -> AggregateUDF: + """Create an Aggregate UDF from AggregateUDF PyCapsule object. + + This function will instantiate a Aggregate UDF that uses a DataFusion + AggregateUDF that is exported via the FFI bindings. + """ + if _is_pycapsule(func): + aggregate = cast("AggregateUDF", object.__new__(AggregateUDF)) + aggregate._udaf = df_internal.AggregateUDF.from_pycapsule(func) + return aggregate + + capsule = cast("AggregateUDFExportable", func) + name = str(capsule.__class__) + return AggregateUDF( + name=name, + accumulator=capsule, + input_types=None, + return_type=None, + state_type=None, + volatility=None, + ) + + +class WindowEvaluator: + """Evaluator class for user-defined window functions (UDWF). + + It is up to the user to decide which evaluate function is appropriate. + + +------------------------+--------------------------------+------------------+---------------------------+ + | ``uses_window_frame`` | ``supports_bounded_execution`` | ``include_rank`` | function_to_implement | + +========================+================================+==================+===========================+ + | False (default) | False (default) | False (default) | ``evaluate_all`` | + +------------------------+--------------------------------+------------------+---------------------------+ + | False | True | False | ``evaluate`` | + +------------------------+--------------------------------+------------------+---------------------------+ + | False | True/False | True | ``evaluate_all_with_rank``| + +------------------------+--------------------------------+------------------+---------------------------+ + | True | True/False | True/False | ``evaluate`` | + +------------------------+--------------------------------+------------------+---------------------------+ + """ # noqa: W505, E501 + + def memoize(self) -> None: + """Perform a memoize operation to improve performance. + + When the window frame has a fixed beginning (e.g UNBOUNDED + PRECEDING), some functions such as FIRST_VALUE and + NTH_VALUE do not need the (unbounded) input once they have + seen a certain amount of input. + + `memoize` is called after each input batch is processed, and + such functions can save whatever they need + """ + + def get_range(self, idx: int, num_rows: int) -> tuple[int, int]: # noqa: ARG002 + """Return the range for the window function. + + If `uses_window_frame` flag is `false`. This method is used to + calculate required range for the window function during + stateful execution. + + Generally there is no required range, hence by default this + returns smallest range(current row). e.g seeing current row is + enough to calculate window result (such as row_number, rank, + etc) + + Args: + idx:: Current index + num_rows: Number of rows. + """ + return (idx, idx + 1) + + def is_causal(self) -> bool: + """Get whether evaluator needs future data for its result.""" + return False + + def evaluate_all(self, values: list[pa.Array], num_rows: int) -> pa.Array: + """Evaluate a window function on an entire input partition. + + This function is called once per input *partition* for window functions that + *do not use* values from the window frame, such as + :py:func:`~datafusion.functions.row_number`, + :py:func:`~datafusion.functions.rank`, + :py:func:`~datafusion.functions.dense_rank`, + :py:func:`~datafusion.functions.percent_rank`, + :py:func:`~datafusion.functions.cume_dist`, + :py:func:`~datafusion.functions.lead`, + and :py:func:`~datafusion.functions.lag`. + + It produces the result of all rows in a single pass. It + expects to receive the entire partition as the ``value`` and + must produce an output column with one output row for every + input row. + + ``num_rows`` is required to correctly compute the output in case + ``len(values) == 0`` + + Implementing this function is an optimization. Certain window + functions are not affected by the window frame definition or + the query doesn't have a frame, and ``evaluate`` skips the + (costly) window frame boundary calculation and the overhead of + calling ``evaluate`` for each output row. + + For example, the `LAG` built in window function does not use + the values of its window frame (it can be computed in one shot + on the entire partition with ``Self::evaluate_all`` regardless of the + window defined in the ``OVER`` clause) + + .. code-block:: text + + lag(x, 1) OVER (ORDER BY z ROWS BETWEEN 2 PRECEDING AND 3 FOLLOWING) + + However, ``avg()`` computes the average in the window and thus + does use its window frame. + + .. code-block:: text + + avg(x) OVER (PARTITION BY y ORDER BY z ROWS BETWEEN 2 PRECEDING AND 3 FOLLOWING) + """ # noqa: W505, E501 + + def evaluate( + self, values: list[pa.Array], eval_range: tuple[int, int] + ) -> pa.Scalar: + """Evaluate window function on a range of rows in an input partition. + + This is the simplest and most general function to implement + but also the least performant as it creates output one row at + a time. It is typically much faster to implement stateful + evaluation using one of the other specialized methods on this + trait. + + Returns a [`ScalarValue`] that is the value of the window + function within `range` for the entire partition. Argument + `values` contains the evaluation result of function arguments + and evaluation results of ORDER BY expressions. If function has a + single argument, `values[1..]` will contain ORDER BY expression results. + """ + + def evaluate_all_with_rank( + self, num_rows: int, ranks_in_partition: list[tuple[int, int]] + ) -> pa.Array: + """Called for window functions that only need the rank of a row. + + Evaluate the partition evaluator against the partition using + the row ranks. For example, ``rank(col("a"))`` produces + + .. code-block:: text + + a | rank + - + ---- + A | 1 + A | 1 + C | 3 + D | 4 + D | 4 + + For this case, `num_rows` would be `5` and the + `ranks_in_partition` would be called with + + .. code-block:: text + + [ + (0,1), + (2,2), + (3,4), + ] + + The user must implement this method if ``include_rank`` returns True. + """ + + def supports_bounded_execution(self) -> bool: + """Can the window function be incrementally computed using bounded memory?""" + return False + + def uses_window_frame(self) -> bool: + """Does the window function use the values from the window frame?""" + return False + + def include_rank(self) -> bool: + """Can this function be evaluated with (only) rank?""" + return False + + +class WindowUDFExportable(Protocol): + """Type hint for object that has __datafusion_window_udf__ PyCapsule.""" + + def __datafusion_window_udf__(self) -> object: ... # noqa: D105 + + +class WindowUDF: + """Class for performing window user-defined functions (UDF). + + Window UDFs operate on a partition of rows. See + also :py:class:`ScalarUDF` for operating on a row by row basis. + """ + + def __init__( + self, + name: str, + func: Callable[[], WindowEvaluator], + input_types: list[pa.DataType], + return_type: pa.DataType, + volatility: Volatility | str, + ) -> None: + """Instantiate a user-defined window function (UDWF). + + See :py:func:`udwf` for a convenience function and argument + descriptions. + """ + if hasattr(func, "__datafusion_window_udf__"): + self._udwf = df_internal.WindowUDF.from_pycapsule(func) + return + self._udwf = df_internal.WindowUDF( + name, func, input_types, return_type, str(volatility) + ) + + def __repr__(self) -> str: + """Print a string representation of the Window UDF.""" + return self._udwf.__repr__() + + def __call__(self, *args: Expr) -> Expr: + """Execute the UDWF. + + This function is not typically called by an end user. These calls will + occur during the evaluation of the dataframe. + """ + args_raw = [arg.expr for arg in args] + return Expr(self._udwf.__call__(*args_raw)) + + @overload + @staticmethod + def udwf( + input_types: pa.DataType | list[pa.DataType], + return_type: pa.DataType, + volatility: Volatility | str, + name: str | None = None, + ) -> Callable[..., WindowUDF]: ... + + @overload + @staticmethod + def udwf( + func: Callable[[], WindowEvaluator], + input_types: pa.DataType | list[pa.DataType], + return_type: pa.DataType, + volatility: Volatility | str, + name: str | None = None, + ) -> WindowUDF: ... + + @staticmethod + def udwf(*args: Any, **kwargs: Any): # noqa: D417 + """Create a new User-Defined Window Function (UDWF). + + This class can be used both as either a function or a decorator. + + Usage: + - As a function: ``udwf(func, input_types, return_type, volatility, name)``. + - As a decorator: ``@udwf(input_types, return_type, volatility, name)``. + When using ``udwf`` as a decorator, do not pass ``func`` explicitly. + + Function example:: + + import pyarrow as pa + + class BiasedNumbers(WindowEvaluator): + def __init__(self, start: int = 0) -> None: + self.start = start + + def evaluate_all(self, values: list[pa.Array], + num_rows: int) -> pa.Array: + return pa.array([self.start + i for i in range(num_rows)]) + + def bias_10() -> BiasedNumbers: + return BiasedNumbers(10) + + udwf1 = udwf(BiasedNumbers, pa.int64(), pa.int64(), "immutable") + udwf2 = udwf(bias_10, pa.int64(), pa.int64(), "immutable") + udwf3 = udwf(lambda: BiasedNumbers(20), pa.int64(), pa.int64(), "immutable") + + + Decorator example:: + + @udwf(pa.int64(), pa.int64(), "immutable") + def biased_numbers() -> BiasedNumbers: + return BiasedNumbers(10) + + Args: + func: Only needed when calling as a function. Skip this argument when + using ``udwf`` as a decorator. If you have a Rust backed WindowUDF + within a PyCapsule, you can pass this parameter and ignore the rest. + They will be determined directly from the underlying function. See + the online documentation for more information. + input_types: The data types of the arguments. + return_type: The data type of the return value. + volatility: See :py:class:`Volatility` for allowed values. + name: A descriptive name for the function. + + Returns: + A user-defined window function that can be used in window function calls. + """ + if hasattr(args[0], "__datafusion_window_udf__"): + return WindowUDF.from_pycapsule(args[0]) + + if args and callable(args[0]): + # Case 1: Used as a function, require the first parameter to be callable + return WindowUDF._create_window_udf(*args, **kwargs) + # Case 2: Used as a decorator with parameters + return WindowUDF._create_window_udf_decorator(*args, **kwargs) + + @staticmethod + def _create_window_udf( + func: Callable[[], WindowEvaluator], + input_types: pa.DataType | list[pa.DataType], + return_type: pa.DataType, + volatility: Volatility | str, + name: str | None = None, + ) -> WindowUDF: + """Create a WindowUDF instance from function arguments.""" + if not callable(func): + msg = "`func` must be callable." + raise TypeError(msg) + if not isinstance(func(), WindowEvaluator): + msg = "`func` must implement the abstract base class WindowEvaluator" + raise TypeError(msg) + + name = name or func.__qualname__.lower() + input_types = ( + [input_types] if isinstance(input_types, pa.DataType) else input_types + ) + + return WindowUDF(name, func, input_types, return_type, volatility) + + @staticmethod + def _get_default_name(func: Callable) -> str: + """Get the default name for a function based on its attributes.""" + if hasattr(func, "__qualname__"): + return func.__qualname__.lower() + return func.__class__.__name__.lower() + + @staticmethod + def _normalize_input_types( + input_types: pa.DataType | list[pa.DataType], + ) -> list[pa.DataType]: + """Convert a single DataType to a list if needed.""" + if isinstance(input_types, pa.DataType): + return [input_types] + return input_types + + @staticmethod + def _create_window_udf_decorator( + input_types: pa.DataType | list[pa.DataType], + return_type: pa.DataType, + volatility: Volatility | str, + name: str | None = None, + ) -> Callable[[Callable[[], WindowEvaluator]], Callable[..., Expr]]: + """Create a decorator for a WindowUDF.""" + + def decorator(func: Callable[[], WindowEvaluator]) -> Callable[..., Expr]: + udwf_caller = WindowUDF._create_window_udf( + func, input_types, return_type, volatility, name + ) + + @functools.wraps(func) + def wrapper(*args: Any, **kwargs: Any) -> Expr: + return udwf_caller(*args, **kwargs) + + return wrapper + + return decorator + + @staticmethod + def from_pycapsule(func: WindowUDFExportable) -> WindowUDF: + """Create a Window UDF from WindowUDF PyCapsule object. + + This function will instantiate a Window UDF that uses a DataFusion + WindowUDF that is exported via the FFI bindings. + """ + name = str(func.__class__) + return WindowUDF( + name=name, + func=func, + input_types=None, + return_type=None, + volatility=None, + ) + + +class TableFunction: + """Class for performing user-defined table functions (UDTF). + + Table functions generate new table providers based on the + input expressions. + """ + + def __init__( + self, name: str, func: Callable[[], any], ctx: SessionContext | None = None + ) -> None: + """Instantiate a user-defined table function (UDTF). + + See :py:func:`udtf` for a convenience function and argument + descriptions. + """ + self._udtf = df_internal.TableFunction(name, func, ctx) + + def __call__(self, *args: Expr) -> Any: + """Execute the UDTF and return a table provider.""" + args_raw = [arg.expr for arg in args] + return self._udtf.__call__(*args_raw) + + @overload + @staticmethod + def udtf( + name: str, + ) -> Callable[..., Any]: ... + + @overload + @staticmethod + def udtf( + func: Callable[[], Any], + name: str, + ) -> TableFunction: ... + + @staticmethod + def udtf(*args: Any, **kwargs: Any): + """Create a new User-Defined Table Function (UDTF).""" + if args and callable(args[0]): + # Case 1: Used as a function, require the first parameter to be callable + return TableFunction._create_table_udf(*args, **kwargs) + if args and hasattr(args[0], "__datafusion_table_function__"): + # Case 2: We have a datafusion FFI provided function + return TableFunction(args[1], args[0]) + # Case 3: Used as a decorator with parameters + return TableFunction._create_table_udf_decorator(*args, **kwargs) + + @staticmethod + def _create_table_udf( + func: Callable[..., Any], + name: str, + ) -> TableFunction: + """Create a TableFunction instance from function arguments.""" + if not callable(func): + msg = "`func` must be callable." + raise TypeError(msg) + + return TableFunction(name, func) + + @staticmethod + def _create_table_udf_decorator( + name: str | None = None, + ) -> Callable[[Callable[[], WindowEvaluator]], Callable[..., Expr]]: + """Create a decorator for a WindowUDF.""" + + def decorator(func: Callable[[], WindowEvaluator]) -> Callable[..., Expr]: + return TableFunction._create_table_udf(func, name) + + return decorator + + def __repr__(self) -> str: + """User printable representation.""" + return self._udtf.__repr__() + + +# Convenience exports so we can import instead of treating as +# variables at the package root +udf = ScalarUDF.udf +udaf = AggregateUDF.udaf +udwf = WindowUDF.udwf +udtf = TableFunction.udtf diff --git a/python/datafusion/tests/__init__.py b/python/tests/__init__.py similarity index 100% rename from python/datafusion/tests/__init__.py rename to python/tests/__init__.py diff --git a/python/datafusion/tests/conftest.py b/python/tests/conftest.py similarity index 78% rename from python/datafusion/tests/conftest.py rename to python/tests/conftest.py index a4eec41e2..26ed7281d 100644 --- a/python/datafusion/tests/conftest.py +++ b/python/tests/conftest.py @@ -15,9 +15,10 @@ # specific language governing permissions and limitations # under the License. -import pytest -from datafusion import SessionContext import pyarrow as pa +import pytest +from datafusion import DataFrame, SessionContext +from pyarrow.csv import write_csv @pytest.fixture @@ -37,7 +38,7 @@ def database(ctx, tmp_path): ], names=["int", "str", "float"], ) - pa.csv.write_csv(table, path) + write_csv(table, path) ctx.register_csv("csv", path) ctx.register_csv("csv1", str(path)) @@ -48,3 +49,12 @@ def database(ctx, tmp_path): delimiter=",", schema_infer_max_records=10, ) + + +@pytest.fixture +def fail_collect(monkeypatch): + def _fail_collect(self, *args, **kwargs): # pragma: no cover - failure path + msg = "collect should not be called" + raise AssertionError(msg) + + monkeypatch.setattr(DataFrame, "collect", _fail_collect) diff --git a/python/datafusion/tests/data_test_context/data.json b/python/tests/data_test_context/data.json similarity index 100% rename from python/datafusion/tests/data_test_context/data.json rename to python/tests/data_test_context/data.json diff --git a/python/datafusion/tests/generic.py b/python/tests/generic.py similarity index 80% rename from python/datafusion/tests/generic.py rename to python/tests/generic.py index 0177e2df0..1b98fdf9e 100644 --- a/python/datafusion/tests/generic.py +++ b/python/tests/generic.py @@ -16,6 +16,7 @@ # under the License. import datetime +from datetime import timezone import numpy as np import pyarrow as pa @@ -26,29 +27,29 @@ def data(): - np.random.seed(1) + rng = np.random.default_rng(1) data = np.concatenate( [ - np.random.normal(0, 0.01, size=50), - np.random.normal(50, 0.01, size=50), + rng.normal(0, 0.01, size=50), + rng.normal(50, 0.01, size=50), ] ) return pa.array(data) def data_with_nans(): - np.random.seed(0) - data = np.random.normal(0, 0.01, size=50) - mask = np.random.randint(0, 2, size=50) + rng = np.random.default_rng(0) + data = rng.normal(0, 0.01, size=50) + mask = rng.normal(0, 2, size=50) data[mask == 0] = np.nan return data def data_datetime(f): data = [ - datetime.datetime.now(), - datetime.datetime.now() - datetime.timedelta(days=1), - datetime.datetime.now() + datetime.timedelta(days=1), + datetime.datetime.now(tz=timezone.utc), + datetime.datetime.now(tz=timezone.utc) - datetime.timedelta(days=1), + datetime.datetime.now(tz=timezone.utc) + datetime.timedelta(days=1), ] return pa.array(data, type=pa.timestamp(f), mask=np.array([False, True, False])) diff --git a/python/tests/test_aggregation.py b/python/tests/test_aggregation.py new file mode 100644 index 000000000..240332848 --- /dev/null +++ b/python/tests/test_aggregation.py @@ -0,0 +1,480 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +import numpy as np +import pyarrow as pa +import pytest +from datafusion import SessionContext, column, lit +from datafusion import functions as f +from datafusion.common import NullTreatment + + +@pytest.fixture +def df(): + ctx = SessionContext() + + # create a RecordBatch and a new DataFrame from it + batch = pa.RecordBatch.from_arrays( + [ + pa.array([1, 2, 3]), + pa.array([4, 4, 6]), + pa.array([9, 8, 5]), + pa.array([True, True, False]), + pa.array([1, 2, None]), + ], + names=["a", "b", "c", "d", "e"], + ) + return ctx.create_dataframe([[batch]]) + + +@pytest.fixture +def df_partitioned(): + ctx = SessionContext() + + # create a RecordBatch and a new DataFrame from it + batch = pa.RecordBatch.from_arrays( + [ + pa.array([0, 1, 2, 3, 4, 5, 6]), + pa.array([7, None, 7, 8, 9, None, 9]), + pa.array(["A", "A", "A", "A", "B", "B", "B"]), + ], + names=["a", "b", "c"], + ) + + return ctx.create_dataframe([[batch]]) + + +@pytest.fixture +def df_aggregate_100(): + ctx = SessionContext() + ctx.register_csv("aggregate_test_data", "./testing/data/csv/aggregate_test_100.csv") + return ctx.table("aggregate_test_data") + + +@pytest.mark.parametrize( + ("agg_expr", "calc_expected"), + [ + (f.avg(column("a")), lambda a, b, c, d: np.array(np.average(a))), + ( + f.corr(column("a"), column("b")), + lambda a, b, c, d: np.array(np.corrcoef(a, b)[0][1]), + ), + (f.count(column("a")), lambda a, b, c, d: pa.array([len(a)])), + # Sample (co)variance -> ddof=1 + # Population (co)variance -> ddof=0 + ( + f.covar(column("a"), column("b")), + lambda a, b, c, d: np.array(np.cov(a, b, ddof=1)[0][1]), + ), + ( + f.covar_pop(column("a"), column("c")), + lambda a, b, c, d: np.array(np.cov(a, c, ddof=0)[0][1]), + ), + ( + f.covar_samp(column("b"), column("c")), + lambda a, b, c, d: np.array(np.cov(b, c, ddof=1)[0][1]), + ), + # f.grouping(col_a), # noqa: ERA001 No physical plan implemented yet + (f.max(column("a")), lambda a, b, c, d: np.array(np.max(a))), + (f.mean(column("b")), lambda a, b, c, d: np.array(np.mean(b))), + (f.median(column("b")), lambda a, b, c, d: np.array(np.median(b))), + (f.min(column("a")), lambda a, b, c, d: np.array(np.min(a))), + (f.sum(column("b")), lambda a, b, c, d: np.array(np.sum(b.to_pylist()))), + # Sample stdev -> ddof=1 + # Population stdev -> ddof=0 + (f.stddev(column("a")), lambda a, b, c, d: np.array(np.std(a, ddof=1))), + (f.stddev_pop(column("b")), lambda a, b, c, d: np.array(np.std(b, ddof=0))), + (f.stddev_samp(column("c")), lambda a, b, c, d: np.array(np.std(c, ddof=1))), + (f.var(column("a")), lambda a, b, c, d: np.array(np.var(a, ddof=1))), + (f.var_pop(column("b")), lambda a, b, c, d: np.array(np.var(b, ddof=0))), + (f.var_samp(column("c")), lambda a, b, c, d: np.array(np.var(c, ddof=1))), + ], +) +def test_aggregation_stats(df, agg_expr, calc_expected): + df = df.select("a", "b", "c", "d") + agg_df = df.aggregate([], [agg_expr]) + result = agg_df.collect()[0] + values_a, values_b, values_c, values_d = df.collect()[0] + expected = calc_expected(values_a, values_b, values_c, values_d) + np.testing.assert_array_almost_equal(result.column(0), expected) + + +@pytest.mark.parametrize( + ("agg_expr", "expected", "array_sort"), + [ + (f.approx_distinct(column("b")), pa.array([2], type=pa.uint64()), False), + ( + f.approx_distinct( + column("b"), + filter=column("a") != lit(3), + ), + pa.array([1], type=pa.uint64()), + False, + ), + (f.approx_median(column("b")), pa.array([4]), False), + (f.median(column("b"), distinct=True), pa.array([5]), False), + (f.median(column("b"), filter=column("a") != 2), pa.array([5]), False), + (f.approx_median(column("b"), filter=column("a") != 2), pa.array([5]), False), + (f.approx_percentile_cont(column("b"), 0.5), pa.array([4]), False), + ( + f.approx_percentile_cont( + column("b").sort(ascending=True, nulls_first=False), + 0.5, + num_centroids=2, + ), + pa.array([4]), + False, + ), + ( + f.approx_percentile_cont_with_weight(column("b"), lit(0.6), 0.5), + pa.array([4], type=pa.float64()), + False, + ), + ( + f.approx_percentile_cont_with_weight( + column("b").sort(ascending=False, nulls_first=False), lit(0.6), 0.5 + ), + pa.array([4], type=pa.float64()), + False, + ), + ( + f.approx_percentile_cont_with_weight( + column("b"), lit(0.6), 0.5, filter=column("a") != lit(3) + ), + pa.array([4], type=pa.float64()), + False, + ), + (f.array_agg(column("b")), pa.array([[4, 4, 6]]), False), + (f.array_agg(column("b"), distinct=True), pa.array([[4, 6]]), True), + ( + f.array_agg(column("e"), filter=column("e").is_not_null()), + pa.array([[1, 2]]), + False, + ), + ( + f.array_agg(column("b"), order_by=[column("c")]), + pa.array([[6, 4, 4]]), + False, + ), + ( + f.array_agg(column("b"), order_by=column("c")), + pa.array([[6, 4, 4]]), + False, + ), + (f.avg(column("b"), filter=column("a") != lit(1)), pa.array([5.0]), False), + (f.sum(column("b"), filter=column("a") != lit(1)), pa.array([10]), False), + (f.count(column("b"), distinct=True), pa.array([2]), False), + (f.count(column("b"), filter=column("a") != 3), pa.array([2]), False), + (f.count(), pa.array([3]), False), + (f.count(column("e")), pa.array([2]), False), + (f.count_star(filter=column("a") != 3), pa.array([2]), False), + (f.max(column("a"), filter=column("a") != lit(3)), pa.array([2]), False), + (f.min(column("a"), filter=column("a") != lit(1)), pa.array([2]), False), + ( + f.stddev(column("a"), filter=column("a") != lit(2)), + pa.array([np.sqrt(2)]), + False, + ), + ( + f.stddev_pop(column("a"), filter=column("a") != lit(2)), + pa.array([1.0]), + False, + ), + ], +) +def test_aggregation(df, agg_expr, expected, array_sort): + agg_df = df.aggregate([], [agg_expr.alias("agg_expr")]) + if array_sort: + agg_df = agg_df.select(f.array_sort(column("agg_expr"))) + agg_df.show() + result = agg_df.collect()[0] + + assert result.column(0) == expected + + +@pytest.mark.parametrize( + ("name", "expr", "expected"), + [ + ( + "approx_percentile_cont", + f.approx_percentile_cont(column("c3"), 0.95, num_centroids=200), + [73, 68, 122, 124, 115], + ), + ( + "approx_perc_cont_few_centroids", + f.approx_percentile_cont(column("c3"), 0.95, num_centroids=5), + [72, 68, 119, 124, 115], + ), + ( + "approx_perc_cont_filtered", + f.approx_percentile_cont( + column("c3"), 0.95, num_centroids=200, filter=column("c3") > lit(0) + ), + [83, 68, 122, 124, 117], + ), + ( + "corr", + f.corr(column("c3"), column("c2")), + [-0.1056, -0.2808, 0.0023, 0.0022, -0.2473], + ), + ( + "corr_w_filter", + f.corr(column("c3"), column("c2"), filter=column("c3") > lit(0)), + [-0.3298, 0.2925, 0.2467, -0.2269, 0.0358], + ), + ( + "covar_pop", + f.covar_pop(column("c3"), column("c2")), + [-7.2857, -25.6731, 0.2222, 0.2469, -20.2857], + ), + ( + "covar_pop_w_filter", + f.covar_pop(column("c3"), column("c2"), filter=column("c3") > lit(0)), + [-9.25, 9.0579, 13.7521, -9.9669, 1.1641], + ), + ( + "covar_samp", + f.covar_samp(column("c3"), column("c2")), + [-7.65, -27.0994, 0.2333, 0.2614, -21.3], + ), + ( + "covar_samp_w_filter", + f.covar_samp(column("c3"), column("c2"), filter=column("c3") > lit(0)), + [-10.5714, 9.9636, 15.1273, -10.9636, 1.2417], + ), + ( + "var_samp", + f.var_samp(column("c2")), + [1.9286, 2.2047, 1.6333, 2.1438, 1.6], + ), + ( + "var_samp_w_filter", + f.var_samp(column("c2"), filter=column("c3") > lit(0)), + [1.4286, 2.4182, 1.8545, 1.4727, 1.6292], + ), + ( + "var_pop", + f.var_pop(column("c2")), + [1.8367, 2.0886, 1.5556, 2.0247, 1.5238], + ), + ( + "var_pop_w_filter", + f.var_pop(column("c2"), filter=column("c3") > lit(0)), + [1.25, 2.1983, 1.686, 1.3388, 1.5273], + ), + ], +) +def test_aggregate_100(df_aggregate_100, name, expr, expected): + # https://github.com/apache/datafusion/blob/bddb6415a50746d2803dd908d19c3758952d74f9/datafusion/sqllogictest/test_files/aggregate.slt#L1490-L1498 + + df = ( + df_aggregate_100.aggregate( + [column("c1")], + [expr.alias(name)], + ) + .select("c1", f.round(column(name), lit(4)).alias(name)) + .sort(column("c1").sort(ascending=True)) + ) + df.show() + + expected_dict = { + "c1": ["a", "b", "c", "d", "e"], + name: expected, + } + + assert df.collect()[0].to_pydict() == expected_dict + + +data_test_bitwise_and_boolean_functions = [ + ("bit_and", f.bit_and(column("a")), [0]), + ("bit_and_filter", f.bit_and(column("a"), filter=column("a") != lit(2)), [1]), + ("bit_or", f.bit_or(column("b")), [6]), + ("bit_or_filter", f.bit_or(column("b"), filter=column("a") != lit(3)), [4]), + ("bit_xor", f.bit_xor(column("c")), [4]), + ("bit_xor_distinct", f.bit_xor(column("b"), distinct=True), [2]), + ("bit_xor_filter", f.bit_xor(column("b"), filter=column("a") != lit(3)), [0]), + ( + "bit_xor_filter_distinct", + f.bit_xor(column("b"), distinct=True, filter=column("a") != lit(3)), + [4], + ), + ("bool_and", f.bool_and(column("d")), [False]), + ("bool_and_filter", f.bool_and(column("d"), filter=column("a") != lit(3)), [True]), + ("bool_or", f.bool_or(column("d")), [True]), + ("bool_or_filter", f.bool_or(column("d"), filter=column("a") == lit(3)), [False]), +] + + +@pytest.mark.parametrize( + ("name", "expr", "result"), data_test_bitwise_and_boolean_functions +) +def test_bit_and_bool_fns(df, name, expr, result): + df = df.aggregate([], [expr.alias(name)]) + + expected = { + name: result, + } + + assert df.collect()[0].to_pydict() == expected + + +@pytest.mark.parametrize( + ("name", "expr", "result"), + [ + ("first_value", f.first_value(column("a")), [0, 4]), + ( + "first_value_ordered", + f.first_value(column("a"), order_by=[column("a").sort(ascending=False)]), + [3, 6], + ), + ( + "first_value_with_null", + f.first_value( + column("b"), + order_by=[column("b").sort(ascending=True)], + null_treatment=NullTreatment.RESPECT_NULLS, + ), + [None, None], + ), + ( + "first_value_no_list_order_by", + f.first_value( + column("b"), + order_by=column("b"), + null_treatment=NullTreatment.RESPECT_NULLS, + ), + [None, None], + ), + ( + "first_value_ignore_null", + f.first_value( + column("b"), + order_by=[column("b").sort(ascending=True)], + null_treatment=NullTreatment.IGNORE_NULLS, + ), + [7, 9], + ), + ( + "last_value_ordered", + f.last_value(column("a"), order_by=[column("a").sort(ascending=False)]), + [0, 4], + ), + ( + "last_value_no_list_ordered", + f.last_value(column("a"), order_by=column("a")), + [3, 6], + ), + ( + "last_value_with_null", + f.last_value( + column("b"), + order_by=[column("b").sort(ascending=True, nulls_first=False)], + null_treatment=NullTreatment.RESPECT_NULLS, + ), + [None, None], + ), + ( + "last_value_ignore_null", + f.last_value( + column("b"), + order_by=[column("b").sort(ascending=True)], + null_treatment=NullTreatment.IGNORE_NULLS, + ), + [8, 9], + ), + ( + "nth_value_ordered", + f.nth_value(column("a"), 2, order_by=[column("a").sort(ascending=False)]), + [2, 5], + ), + ( + "nth_value_no_list_ordered", + f.nth_value(column("a"), 2, order_by=column("a").sort(ascending=False)), + [2, 5], + ), + ( + "nth_value_with_null", + f.nth_value( + column("b"), + 3, + order_by=[column("b").sort(ascending=True, nulls_first=False)], + null_treatment=NullTreatment.RESPECT_NULLS, + ), + [8, None], + ), + ( + "nth_value_ignore_null", + f.nth_value( + column("b"), + 2, + order_by=[column("b").sort(ascending=True)], + null_treatment=NullTreatment.IGNORE_NULLS, + ), + [7, 9], + ), + ], +) +def test_first_last_value(df_partitioned, name, expr, result) -> None: + df = df_partitioned.aggregate([column("c")], [expr.alias(name)]).sort(column("c")) + + expected = { + "c": ["A", "B"], + name: result, + } + + assert df.collect()[0].to_pydict() == expected + + +@pytest.mark.parametrize( + ("name", "expr", "result"), + [ + ("string_agg", f.string_agg(column("a"), ","), "one,two,three,two"), + ("string_agg", f.string_agg(column("b"), ""), "03124"), + ( + "string_agg", + f.string_agg(column("a"), ",", filter=column("b") != lit(3)), + "one,three,two", + ), + ( + "string_agg", + f.string_agg(column("a"), ",", order_by=[column("b")]), + "one,three,two,two", + ), + ( + "string_agg", + f.string_agg(column("a"), ",", order_by=column("b")), + "one,three,two,two", + ), + ], +) +def test_string_agg(name, expr, result) -> None: + ctx = SessionContext() + + df = ctx.from_pydict( + { + "a": ["one", "two", None, "three", "two"], + "b": [0, 3, 1, 2, 4], + } + ) + + df = df.aggregate([], [expr.alias(name)]) + + expected = { + name: [result], + } + df.show() + assert df.collect()[0].to_pydict() == expected diff --git a/python/tests/test_catalog.py b/python/tests/test_catalog.py new file mode 100644 index 000000000..9310da506 --- /dev/null +++ b/python/tests/test_catalog.py @@ -0,0 +1,316 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +from __future__ import annotations + +from typing import TYPE_CHECKING + +import datafusion as dfn +import pyarrow as pa +import pyarrow.dataset as ds +import pytest +from datafusion import Catalog, SessionContext, Table, udtf + +if TYPE_CHECKING: + from datafusion.catalog import CatalogProvider, CatalogProviderExportable + + +# Note we take in `database` as a variable even though we don't use +# it because that will cause the fixture to set up the context with +# the tables we need. +def test_basic(ctx, database): + with pytest.raises(KeyError): + ctx.catalog("non-existent") + + default = ctx.catalog() + assert default.names() == {"public"} + + for db in [default.schema("public"), default.schema()]: + assert db.names() == {"csv1", "csv", "csv2"} + + table = db.table("csv") + assert table.kind == "physical" + assert table.schema == pa.schema( + [ + pa.field("int", pa.int64(), nullable=True), + pa.field("str", pa.string(), nullable=True), + pa.field("float", pa.float64(), nullable=True), + ] + ) + + +def create_dataset() -> Table: + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + return Table(dataset) + + +class CustomSchemaProvider(dfn.catalog.SchemaProvider): + def __init__(self): + self.tables = {"table1": create_dataset()} + + def table_names(self) -> set[str]: + return set(self.tables.keys()) + + def register_table(self, name: str, table: Table): + self.tables[name] = table + + def deregister_table(self, name, cascade: bool = True): + del self.tables[name] + + def table(self, name: str) -> Table | None: + return self.tables[name] + + def table_exist(self, name: str) -> bool: + return name in self.tables + + +class CustomErrorSchemaProvider(CustomSchemaProvider): + def table(self, name: str) -> Table | None: + message = f"{name} is not an acceptable name" + raise ValueError(message) + + +class CustomCatalogProvider(dfn.catalog.CatalogProvider): + def __init__(self): + self.schemas = {"my_schema": CustomSchemaProvider()} + + def schema_names(self) -> set[str]: + return set(self.schemas.keys()) + + def schema(self, name: str): + return self.schemas[name] + + def register_schema(self, name: str, schema: dfn.catalog.Schema): + self.schemas[name] = schema + + def deregister_schema(self, name, cascade: bool): + del self.schemas[name] + + +class CustomCatalogProviderList(dfn.catalog.CatalogProviderList): + def __init__(self): + self.catalogs = {"my_catalog": CustomCatalogProvider()} + + def catalog_names(self) -> set[str]: + return set(self.catalogs.keys()) + + def catalog(self, name: str) -> Catalog | None: + return self.catalogs[name] + + def register_catalog( + self, name: str, catalog: CatalogProviderExportable | CatalogProvider | Catalog + ) -> None: + self.catalogs[name] = catalog + + +def test_python_catalog_provider_list(ctx: SessionContext): + ctx.register_catalog_provider_list(CustomCatalogProviderList()) + + # Ensure `datafusion` catalog does not exist since + # we replaced the catalog list + assert ctx.catalog_names() == {"my_catalog"} + + # Ensure registering works + ctx.register_catalog_provider("second_catalog", Catalog.memory_catalog()) + assert ctx.catalog_names() == {"my_catalog", "second_catalog"} + + +def test_python_catalog_provider(ctx: SessionContext): + ctx.register_catalog_provider("my_catalog", CustomCatalogProvider()) + + # Check the default catalog provider + assert ctx.catalog("datafusion").names() == {"public"} + + my_catalog = ctx.catalog("my_catalog") + assert my_catalog.names() == {"my_schema"} + + my_catalog.register_schema("second_schema", CustomSchemaProvider()) + assert my_catalog.schema_names() == {"my_schema", "second_schema"} + + my_catalog.deregister_schema("my_schema") + assert my_catalog.schema_names() == {"second_schema"} + + +def test_in_memory_providers(ctx: SessionContext): + catalog = dfn.catalog.Catalog.memory_catalog() + ctx.register_catalog_provider("in_mem_catalog", catalog) + + assert ctx.catalog_names() == {"datafusion", "in_mem_catalog"} + + schema = dfn.catalog.Schema.memory_schema() + catalog.register_schema("in_mem_schema", schema) + + schema.register_table("my_table", create_dataset()) + + batches = ctx.sql("select * from in_mem_catalog.in_mem_schema.my_table").collect() + + assert len(batches) == 1 + assert batches[0].column(0) == pa.array([1, 2, 3]) + assert batches[0].column(1) == pa.array([4, 5, 6]) + + +def test_python_schema_provider(ctx: SessionContext): + catalog = ctx.catalog() + + catalog.deregister_schema("public") + + catalog.register_schema("test_schema1", CustomSchemaProvider()) + assert catalog.names() == {"test_schema1"} + + catalog.register_schema("test_schema2", CustomSchemaProvider()) + catalog.deregister_schema("test_schema1") + assert catalog.names() == {"test_schema2"} + + +def test_python_table_provider(ctx: SessionContext): + catalog = ctx.catalog() + + catalog.register_schema("custom_schema", CustomSchemaProvider()) + schema = catalog.schema("custom_schema") + + assert schema.table_names() == {"table1"} + + schema.deregister_table("table1") + schema.register_table("table2", create_dataset()) + assert schema.table_names() == {"table2"} + + # Use the default schema instead of our custom schema + + schema = catalog.schema() + + schema.register_table("table3", create_dataset()) + assert schema.table_names() == {"table3"} + + schema.deregister_table("table3") + schema.register_table("table4", create_dataset()) + assert schema.table_names() == {"table4"} + + +def test_schema_register_table_with_pyarrow_dataset(ctx: SessionContext): + schema = ctx.catalog().schema() + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + table_name = "pa_dataset" + + try: + schema.register_table(table_name, dataset) + assert table_name in schema.table_names() + + result = ctx.sql(f"SELECT a, b FROM {table_name}").collect() + + assert len(result) == 1 + assert result[0].column(0) == pa.array([1, 2, 3]) + assert result[0].column(1) == pa.array([4, 5, 6]) + finally: + schema.deregister_table(table_name) + + +def test_exception_not_mangled(ctx: SessionContext): + """Test registering all python providers and running a query against them.""" + + catalog_name = "custom_catalog" + schema_name = "custom_schema" + + ctx.register_catalog_provider(catalog_name, CustomCatalogProvider()) + + catalog = ctx.catalog(catalog_name) + + # Clean out previous schemas if they exist so we can start clean + for schema_name in catalog.schema_names(): + catalog.deregister_schema(schema_name, cascade=False) + + catalog.register_schema(schema_name, CustomErrorSchemaProvider()) + + schema = catalog.schema(schema_name) + + for table_name in schema.table_names(): + schema.deregister_table(table_name) + + schema.register_table("test_table", create_dataset()) + + with pytest.raises(ValueError, match=r"^test_table is not an acceptable name$"): + ctx.sql(f"select * from {catalog_name}.{schema_name}.test_table") + + +def test_in_end_to_end_python_providers(ctx: SessionContext): + """Test registering all python providers and running a query against them.""" + + all_catalog_names = [ + "datafusion", + "custom_catalog", + "in_mem_catalog", + ] + + all_schema_names = [ + "custom_schema", + "in_mem_schema", + ] + + ctx.register_catalog_provider(all_catalog_names[1], CustomCatalogProvider()) + ctx.register_catalog_provider( + all_catalog_names[2], dfn.catalog.Catalog.memory_catalog() + ) + + for catalog_name in all_catalog_names: + catalog = ctx.catalog(catalog_name) + + # Clean out previous schemas if they exist so we can start clean + for schema_name in catalog.schema_names(): + catalog.deregister_schema(schema_name, cascade=False) + + catalog.register_schema(all_schema_names[0], CustomSchemaProvider()) + catalog.register_schema(all_schema_names[1], dfn.catalog.Schema.memory_schema()) + + for schema_name in all_schema_names: + schema = catalog.schema(schema_name) + + for table_name in schema.table_names(): + schema.deregister_table(table_name) + + schema.register_table("test_table", create_dataset()) + + for catalog_name in all_catalog_names: + for schema_name in all_schema_names: + table_full_name = f"{catalog_name}.{schema_name}.test_table" + + batches = ctx.sql(f"select * from {table_full_name}").collect() + + assert len(batches) == 1 + assert batches[0].column(0) == pa.array([1, 2, 3]) + assert batches[0].column(1) == pa.array([4, 5, 6]) + + +def test_register_python_function_as_udtf(ctx: SessionContext): + basic_table = Table(ctx.sql("SELECT 3 AS value")) + + @udtf("my_table_function") + def my_table_function_udtf() -> Table: + return basic_table + + ctx.register_udtf(my_table_function_udtf) + + result = ctx.sql("SELECT * FROM my_table_function()").collect() + assert len(result) == 1 + assert len(result[0]) == 1 + assert len(result[0][0]) == 1 + assert result[0][0][0].as_py() == 3 diff --git a/python/tests/test_concurrency.py b/python/tests/test_concurrency.py new file mode 100644 index 000000000..f790f9473 --- /dev/null +++ b/python/tests/test_concurrency.py @@ -0,0 +1,126 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +from __future__ import annotations + +from concurrent.futures import ThreadPoolExecutor + +import pyarrow as pa +from datafusion import Config, SessionContext, col, lit +from datafusion import functions as f +from datafusion.common import SqlSchema + + +def _run_in_threads(fn, count: int = 8) -> None: + with ThreadPoolExecutor(max_workers=count) as executor: + futures = [executor.submit(fn, i) for i in range(count)] + for future in futures: + # Propagate any exception raised in the worker thread. + future.result() + + +def test_concurrent_access_to_shared_structures() -> None: + """Exercise SqlSchema, Config, and DataFrame concurrently.""" + + schema = SqlSchema("concurrency") + config = Config() + ctx = SessionContext() + + batch = pa.record_batch([pa.array([1, 2, 3], type=pa.int32())], names=["value"]) + df = ctx.create_dataframe([[batch]]) + + config_key = "datafusion.execution.batch_size" + expected_rows = batch.num_rows + + def worker(index: int) -> None: + schema.name = f"concurrency-{index}" + assert schema.name.startswith("concurrency-") + # Exercise getters that use internal locks. + assert isinstance(schema.tables, list) + assert isinstance(schema.views, list) + assert isinstance(schema.functions, list) + + config.set(config_key, str(1024 + index)) + assert config.get(config_key) is not None + # Access the full config map to stress lock usage. + assert config_key in config.get_all() + + batches = df.collect() + assert sum(batch.num_rows for batch in batches) == expected_rows + + _run_in_threads(worker, count=12) + + +def test_config_set_during_get_all() -> None: + """Ensure config writes proceed while another thread reads all entries.""" + + config = Config() + key = "datafusion.execution.batch_size" + + def reader() -> None: + for _ in range(200): + # get_all should not hold the lock while converting to Python objects + config.get_all() + + def writer() -> None: + for index in range(200): + config.set(key, str(1024 + index)) + + with ThreadPoolExecutor(max_workers=2) as executor: + reader_future = executor.submit(reader) + writer_future = executor.submit(writer) + reader_future.result(timeout=10) + writer_future.result(timeout=10) + + assert config.get(key) is not None + + +def test_case_builder_reuse_from_multiple_threads() -> None: + """Ensure the case builder can be safely reused across threads.""" + + ctx = SessionContext() + values = pa.array([0, 1, 2, 3, 4], type=pa.int32()) + df = ctx.create_dataframe([[pa.record_batch([values], names=["value"])]]) + + base_builder = f.case(col("value")) + + def add_case(i: int) -> None: + nonlocal base_builder + base_builder = base_builder.when(lit(i), lit(f"value-{i}")) + + _run_in_threads(add_case, count=8) + + with ThreadPoolExecutor(max_workers=2) as executor: + otherwise_future = executor.submit(base_builder.otherwise, lit("default")) + case_expr = otherwise_future.result() + + result = df.select(case_expr.alias("label")).collect() + assert sum(batch.num_rows for batch in result) == len(values) + + predicate_builder = f.when(col("value") == lit(0), lit("zero")) + + def add_predicate(i: int) -> None: + predicate_builder.when(col("value") == lit(i + 1), lit(f"value-{i + 1}")) + + _run_in_threads(add_predicate, count=4) + + with ThreadPoolExecutor(max_workers=2) as executor: + end_future = executor.submit(predicate_builder.end) + predicate_expr = end_future.result() + + result = df.select(predicate_expr.alias("label")).collect() + assert sum(batch.num_rows for batch in result) == len(values) diff --git a/python/datafusion/tests/test_config.py b/python/tests/test_config.py similarity index 100% rename from python/datafusion/tests/test_config.py rename to python/tests/test_config.py index 12d9fc3ff..c1d7f97e1 100644 --- a/python/datafusion/tests/test_config.py +++ b/python/tests/test_config.py @@ -15,8 +15,8 @@ # specific language governing permissions and limitations # under the License. -from datafusion import Config import pytest +from datafusion import Config @pytest.fixture diff --git a/python/tests/test_context.py b/python/tests/test_context.py new file mode 100644 index 000000000..5df6ed20f --- /dev/null +++ b/python/tests/test_context.py @@ -0,0 +1,872 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import datetime as dt +import gzip +import pathlib + +import pyarrow as pa +import pyarrow.dataset as ds +import pytest +from datafusion import ( + CsvReadOptions, + DataFrame, + RuntimeEnvBuilder, + SessionConfig, + SessionContext, + SQLOptions, + Table, + column, + literal, +) + + +def test_create_context_no_args(): + SessionContext() + + +def test_create_context_session_config_only(): + SessionContext(config=SessionConfig()) + + +def test_create_context_runtime_config_only(): + SessionContext(runtime=RuntimeEnvBuilder()) + + +@pytest.mark.parametrize("path_to_str", [True, False]) +def test_runtime_configs(tmp_path, path_to_str): + path1 = tmp_path / "dir1" + path2 = tmp_path / "dir2" + + path1 = str(path1) if path_to_str else path1 + path2 = str(path2) if path_to_str else path2 + + runtime = RuntimeEnvBuilder().with_disk_manager_specified(path1, path2) + config = SessionConfig().with_default_catalog_and_schema("foo", "bar") + ctx = SessionContext(config, runtime) + assert ctx is not None + + db = ctx.catalog("foo").schema("bar") + assert db is not None + + +@pytest.mark.parametrize("path_to_str", [True, False]) +def test_temporary_files(tmp_path, path_to_str): + path = str(tmp_path) if path_to_str else tmp_path + + runtime = RuntimeEnvBuilder().with_temp_file_path(path) + config = SessionConfig().with_default_catalog_and_schema("foo", "bar") + ctx = SessionContext(config, runtime) + assert ctx is not None + + db = ctx.catalog("foo").schema("bar") + assert db is not None + + +def test_create_context_with_all_valid_args(): + runtime = RuntimeEnvBuilder().with_disk_manager_os().with_fair_spill_pool(10000000) + config = ( + SessionConfig() + .with_create_default_catalog_and_schema(enabled=True) + .with_default_catalog_and_schema("foo", "bar") + .with_target_partitions(1) + .with_information_schema(enabled=True) + .with_repartition_joins(enabled=False) + .with_repartition_aggregations(enabled=False) + .with_repartition_windows(enabled=False) + .with_parquet_pruning(enabled=False) + ) + + ctx = SessionContext(config, runtime) + + # verify that at least some of the arguments worked + ctx.catalog("foo").schema("bar") + with pytest.raises(KeyError): + ctx.catalog("datafusion") + + +def test_register_record_batches(ctx): + # create a RecordBatch and register it as memtable + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + + ctx.register_record_batches("t", [[batch]]) + + assert ctx.catalog().schema().names() == {"t"} + + result = ctx.sql("SELECT a+b, a-b FROM t").collect() + + assert result[0].column(0) == pa.array([5, 7, 9]) + assert result[0].column(1) == pa.array([-3, -3, -3]) + + +def test_create_dataframe_registers_unique_table_name(ctx): + # create a RecordBatch and register it as memtable + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + + df = ctx.create_dataframe([[batch]]) + tables = list(ctx.catalog().schema().names()) + + assert df + assert len(tables) == 1 + assert len(tables[0]) == 33 + assert tables[0].startswith("c") + # ensure that the rest of the table name contains + # only hexadecimal numbers + for c in tables[0][1:]: + assert c in "0123456789abcdef" + + +def test_create_dataframe_registers_with_defined_table_name(ctx): + # create a RecordBatch and register it as memtable + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + + df = ctx.create_dataframe([[batch]], name="tbl") + tables = list(ctx.catalog().schema().names()) + + assert df + assert len(tables) == 1 + assert tables[0] == "tbl" + + +def test_from_arrow_table(ctx): + # create a PyArrow table + data = {"a": [1, 2, 3], "b": [4, 5, 6]} + table = pa.Table.from_pydict(data) + + # convert to DataFrame + df = ctx.from_arrow(table) + tables = list(ctx.catalog().schema().names()) + + assert df + assert len(tables) == 1 + assert isinstance(df, DataFrame) + assert set(df.schema().names) == {"a", "b"} + assert df.collect()[0].num_rows == 3 + + +def record_batch_generator(num_batches: int): + schema = pa.schema([("a", pa.int64()), ("b", pa.int64())]) + for _i in range(num_batches): + yield pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], schema=schema + ) + + +@pytest.mark.parametrize( + "source", + [ + # __arrow_c_array__ sources + pa.array([{"a": 1, "b": 4}, {"a": 2, "b": 5}, {"a": 3, "b": 6}]), + # __arrow_c_stream__ sources + pa.RecordBatch.from_pydict({"a": [1, 2, 3], "b": [4, 5, 6]}), + pa.RecordBatchReader.from_batches( + pa.schema([("a", pa.int64()), ("b", pa.int64())]), record_batch_generator(1) + ), + pa.Table.from_pydict({"a": [1, 2, 3], "b": [4, 5, 6]}), + ], +) +def test_from_arrow_sources(ctx, source) -> None: + df = ctx.from_arrow(source) + assert df + assert isinstance(df, DataFrame) + assert df.schema().names == ["a", "b"] + assert df.count() == 3 + + +def test_from_arrow_table_with_name(ctx): + # create a PyArrow table + data = {"a": [1, 2, 3], "b": [4, 5, 6]} + table = pa.Table.from_pydict(data) + + # convert to DataFrame with optional name + df = ctx.from_arrow(table, name="tbl") + tables = list(ctx.catalog().schema().names()) + + assert df + assert tables[0] == "tbl" + + +def test_from_arrow_table_empty(ctx): + data = {"a": [], "b": []} + schema = pa.schema([("a", pa.int32()), ("b", pa.string())]) + table = pa.Table.from_pydict(data, schema=schema) + + # convert to DataFrame + df = ctx.from_arrow(table) + tables = list(ctx.catalog().schema().names()) + + assert df + assert len(tables) == 1 + assert isinstance(df, DataFrame) + assert set(df.schema().names) == {"a", "b"} + assert len(df.collect()) == 0 + + +def test_from_arrow_table_empty_no_schema(ctx): + data = {"a": [], "b": []} + table = pa.Table.from_pydict(data) + + # convert to DataFrame + df = ctx.from_arrow(table) + tables = list(ctx.catalog().schema().names()) + + assert df + assert len(tables) == 1 + assert isinstance(df, DataFrame) + assert set(df.schema().names) == {"a", "b"} + assert len(df.collect()) == 0 + + +def test_from_pylist(ctx): + # create a dataframe from Python list + data = [ + {"a": 1, "b": 4}, + {"a": 2, "b": 5}, + {"a": 3, "b": 6}, + ] + + df = ctx.from_pylist(data) + tables = list(ctx.catalog().schema().names()) + + assert df + assert len(tables) == 1 + assert isinstance(df, DataFrame) + assert set(df.schema().names) == {"a", "b"} + assert df.collect()[0].num_rows == 3 + + +def test_from_pydict(ctx): + # create a dataframe from Python dictionary + data = {"a": [1, 2, 3], "b": [4, 5, 6]} + + df = ctx.from_pydict(data) + tables = list(ctx.catalog().schema().names()) + + assert df + assert len(tables) == 1 + assert isinstance(df, DataFrame) + assert set(df.schema().names) == {"a", "b"} + assert df.collect()[0].num_rows == 3 + + +def test_from_pandas(ctx): + # create a dataframe from pandas dataframe + pd = pytest.importorskip("pandas") + data = {"a": [1, 2, 3], "b": [4, 5, 6]} + pandas_df = pd.DataFrame(data) + + df = ctx.from_pandas(pandas_df) + tables = list(ctx.catalog().schema().names()) + + assert df + assert len(tables) == 1 + assert isinstance(df, DataFrame) + assert set(df.schema().names) == {"a", "b"} + assert df.collect()[0].num_rows == 3 + + +def test_from_polars(ctx): + # create a dataframe from Polars dataframe + pd = pytest.importorskip("polars") + data = {"a": [1, 2, 3], "b": [4, 5, 6]} + polars_df = pd.DataFrame(data) + + df = ctx.from_polars(polars_df) + tables = list(ctx.catalog().schema().names()) + + assert df + assert len(tables) == 1 + assert isinstance(df, DataFrame) + assert set(df.schema().names) == {"a", "b"} + assert df.collect()[0].num_rows == 3 + + +def test_register_table(ctx, database): + default = ctx.catalog() + public = default.schema("public") + assert public.names() == {"csv", "csv1", "csv2"} + table = public.table("csv") + + ctx.register_table("csv3", table) + assert public.names() == {"csv", "csv1", "csv2", "csv3"} + + +def test_read_table_from_catalog(ctx, database): + default = ctx.catalog() + public = default.schema("public") + assert public.names() == {"csv", "csv1", "csv2"} + + table = public.table("csv") + table_df = ctx.read_table(table) + table_df.show() + + +def test_read_table_from_df(ctx): + df = ctx.from_pydict({"a": [1, 2]}) + result = ctx.read_table(df).collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + +def test_read_table_from_dataset(ctx): + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + + result = ctx.read_table(dataset).collect() + + assert result[0].column(0) == pa.array([1, 2, 3]) + assert result[0].column(1) == pa.array([4, 5, 6]) + + +def test_deregister_table(ctx, database): + default = ctx.catalog() + public = default.schema("public") + assert public.names() == {"csv", "csv1", "csv2"} + + ctx.deregister_table("csv") + assert public.names() == {"csv1", "csv2"} + + +def test_register_table_from_dataframe(ctx): + df = ctx.from_pydict({"a": [1, 2]}) + ctx.register_table("df_tbl", df) + result = ctx.sql("SELECT * FROM df_tbl").collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + +@pytest.mark.parametrize("temporary", [True, False]) +def test_register_table_from_dataframe_into_view(ctx, temporary): + df = ctx.from_pydict({"a": [1, 2]}) + table = df.into_view(temporary=temporary) + assert isinstance(table, Table) + if temporary: + assert table.kind == "temporary" + else: + assert table.kind == "view" + + ctx.register_table("view_tbl", table) + result = ctx.sql("SELECT * FROM view_tbl").collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + +def test_table_from_dataframe(ctx): + df = ctx.from_pydict({"a": [1, 2]}) + table = Table(df) + assert isinstance(table, Table) + ctx.register_table("from_dataframe_tbl", table) + result = ctx.sql("SELECT * FROM from_dataframe_tbl").collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + +def test_table_from_dataframe_internal(ctx): + df = ctx.from_pydict({"a": [1, 2]}) + table = Table(df.df) + assert isinstance(table, Table) + ctx.register_table("from_internal_dataframe_tbl", table) + result = ctx.sql("SELECT * FROM from_internal_dataframe_tbl").collect() + assert [b.to_pydict() for b in result] == [{"a": [1, 2]}] + + +def test_register_dataset(ctx): + # create a RecordBatch and register it as a pyarrow.dataset.Dataset + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + ctx.register_dataset("t", dataset) + + assert ctx.catalog().schema().names() == {"t"} + + result = ctx.sql("SELECT a+b, a-b FROM t").collect() + + assert result[0].column(0) == pa.array([5, 7, 9]) + assert result[0].column(1) == pa.array([-3, -3, -3]) + + +def test_dataset_filter(ctx, capfd): + # create a RecordBatch and register it as a pyarrow.dataset.Dataset + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + ctx.register_dataset("t", dataset) + + assert ctx.catalog().schema().names() == {"t"} + df = ctx.sql("SELECT a+b, a-b FROM t WHERE a BETWEEN 2 and 3 AND b > 5") + + # Make sure the filter was pushed down in Physical Plan + df.explain() + captured = capfd.readouterr() + assert "filter_expr=(((a >= 2) and (a <= 3)) and (b > 5))" in captured.out + + result = df.collect() + + assert result[0].column(0) == pa.array([9]) + assert result[0].column(1) == pa.array([-3]) + + +def test_dataset_count(ctx): + # `datafusion-python` issue: https://github.com/apache/datafusion-python/issues/800 + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + ctx.register_dataset("t", dataset) + + # Testing the dataframe API + df = ctx.table("t") + assert df.count() == 3 + + # Testing the SQL API + count = ctx.sql("SELECT COUNT(*) FROM t") + count = count.collect() + assert count[0].column(0) == pa.array([3]) + + +def test_pyarrow_predicate_pushdown_is_null(ctx, capfd): + """Ensure that pyarrow filter gets pushed down for `IsNull`""" + # create a RecordBatch and register it as a pyarrow.dataset.Dataset + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6]), pa.array([7, None, 9])], + names=["a", "b", "c"], + ) + dataset = ds.dataset([batch]) + ctx.register_dataset("t", dataset) + # Make sure the filter was pushed down in Physical Plan + df = ctx.sql("SELECT a FROM t WHERE c is NULL") + df.explain() + captured = capfd.readouterr() + assert "filter_expr=is_null(c, {nan_is_null=false})" in captured.out + + result = df.collect() + assert result[0].column(0) == pa.array([2]) + + +def test_pyarrow_predicate_pushdown_timestamp(ctx, tmpdir, capfd): + """Ensure that pyarrow filter gets pushed down for timestamp""" + # Ref: https://github.com/apache/datafusion-python/issues/703 + + # create pyarrow dataset with no actual files + col_type = pa.timestamp("ns", "+00:00") + nyd_2000 = pa.scalar(dt.datetime(2000, 1, 1, tzinfo=dt.timezone.utc), col_type) + pa_dataset_fs = pa.fs.SubTreeFileSystem(str(tmpdir), pa.fs.LocalFileSystem()) + pa_dataset_format = pa.dataset.ParquetFileFormat() + pa_dataset_partition = pa.dataset.field("a") <= nyd_2000 + fragments = [ + # NOTE: we never actually make this file. + # Working predicate pushdown means it never gets accessed + pa_dataset_format.make_fragment( + "1.parquet", + filesystem=pa_dataset_fs, + partition_expression=pa_dataset_partition, + ) + ] + pa_dataset = pa.dataset.FileSystemDataset( + fragments, + pa.schema([pa.field("a", col_type)]), + pa_dataset_format, + pa_dataset_fs, + ) + + ctx.register_dataset("t", pa_dataset) + + # the partition for our only fragment is for a < 2000-01-01. + # so querying for a > 2024-01-01 should not touch any files + df = ctx.sql("SELECT * FROM t WHERE a > '2024-01-01T00:00:00+00:00'") + assert df.collect() == [] + + +def test_dataset_filter_nested_data(ctx): + # create Arrow StructArrays to test nested data types + data = pa.StructArray.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + batch = pa.RecordBatch.from_arrays( + [data], + names=["nested_data"], + ) + dataset = ds.dataset([batch]) + ctx.register_dataset("t", dataset) + + assert ctx.catalog().schema().names() == {"t"} + + df = ctx.table("t") + + # This filter will not be pushed down to DatasetExec since it + # isn't supported + df = df.filter(column("nested_data")["b"] > literal(5)).select( + column("nested_data")["a"] + column("nested_data")["b"], + column("nested_data")["a"] - column("nested_data")["b"], + ) + + result = df.collect() + + assert result[0].column(0) == pa.array([9]) + assert result[0].column(1) == pa.array([-3]) + + +def test_table_exist(ctx): + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + ctx.register_dataset("t", dataset) + + assert ctx.table_exist("t") is True + + +def test_table_not_found(ctx): + from uuid import uuid4 + + with pytest.raises(KeyError): + ctx.table(f"not-found-{uuid4()}") + + +def test_read_json(ctx): + path = pathlib.Path(__file__).parent.resolve() + + # Default + test_data_path = path / "data_test_context" / "data.json" + df = ctx.read_json(test_data_path) + result = df.collect() + + assert result[0].column(0) == pa.array(["a", "b", "c"]) + assert result[0].column(1) == pa.array([1, 2, 3]) + + # Schema + schema = pa.schema( + [ + pa.field("A", pa.string(), nullable=True), + ] + ) + df = ctx.read_json(test_data_path, schema=schema) + result = df.collect() + + assert result[0].column(0) == pa.array(["a", "b", "c"]) + assert result[0].schema == schema + + # File extension + test_data_path = path / "data_test_context" / "data.json" + df = ctx.read_json(test_data_path, file_extension=".json") + result = df.collect() + + assert result[0].column(0) == pa.array(["a", "b", "c"]) + assert result[0].column(1) == pa.array([1, 2, 3]) + + +def test_read_json_compressed(ctx, tmp_path): + path = pathlib.Path(__file__).parent.resolve() + test_data_path = path / "data_test_context" / "data.json" + + # File compression type + gzip_path = tmp_path / "data.json.gz" + + with ( + pathlib.Path.open(test_data_path, "rb") as csv_file, + gzip.open(gzip_path, "wb") as gzipped_file, + ): + gzipped_file.writelines(csv_file) + + df = ctx.read_json(gzip_path, file_extension=".gz", file_compression_type="gz") + result = df.collect() + + assert result[0].column(0) == pa.array(["a", "b", "c"]) + assert result[0].column(1) == pa.array([1, 2, 3]) + + +def test_read_csv(ctx): + csv_df = ctx.read_csv(path="testing/data/csv/aggregate_test_100.csv") + csv_df.select(column("c1")).show() + + +def test_read_csv_list(ctx): + csv_df = ctx.read_csv(path=["testing/data/csv/aggregate_test_100.csv"]) + expected = csv_df.count() * 2 + + double_csv_df = ctx.read_csv( + path=[ + "testing/data/csv/aggregate_test_100.csv", + "testing/data/csv/aggregate_test_100.csv", + ] + ) + actual = double_csv_df.count() + + double_csv_df.select(column("c1")).show() + assert actual == expected + + +def test_read_csv_compressed(ctx, tmp_path): + test_data_path = pathlib.Path("testing/data/csv/aggregate_test_100.csv") + + expected = ctx.read_csv(test_data_path).collect() + + # File compression type + gzip_path = tmp_path / "aggregate_test_100.csv.gz" + + with ( + pathlib.Path.open(test_data_path, "rb") as csv_file, + gzip.open(gzip_path, "wb") as gzipped_file, + ): + gzipped_file.writelines(csv_file) + + csv_df = ctx.read_csv(gzip_path, file_extension=".gz", file_compression_type="gz") + assert csv_df.collect() == expected + + csv_df = ctx.read_csv( + gzip_path, + options=CsvReadOptions(file_extension=".gz", file_compression_type="gz"), + ) + assert csv_df.collect() == expected + + +def test_read_parquet(ctx): + parquet_df = ctx.read_parquet(path="parquet/data/alltypes_plain.parquet") + parquet_df.show() + assert parquet_df is not None + + path = pathlib.Path.cwd() / "parquet/data/alltypes_plain.parquet" + parquet_df = ctx.read_parquet(path=path) + assert parquet_df is not None + + +def test_read_avro(ctx): + avro_df = ctx.read_avro(path="testing/data/avro/alltypes_plain.avro") + avro_df.show() + assert avro_df is not None + + path = pathlib.Path.cwd() / "testing/data/avro/alltypes_plain.avro" + avro_df = ctx.read_avro(path=path) + assert avro_df is not None + + +def test_create_sql_options(): + SQLOptions() + + +def test_sql_with_options_no_ddl(ctx): + sql = "CREATE TABLE IF NOT EXISTS valuetable AS VALUES(1,'HELLO'),(12,'DATAFUSION')" + ctx.sql(sql) + options = SQLOptions().with_allow_ddl(allow=False) + with pytest.raises(Exception, match="DDL"): + ctx.sql_with_options(sql, options=options) + + +def test_sql_with_options_no_dml(ctx): + table_name = "t" + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + dataset = ds.dataset([batch]) + ctx.register_dataset(table_name, dataset) + sql = f'INSERT INTO "{table_name}" VALUES (1, 2), (2, 3);' + ctx.sql(sql) + options = SQLOptions().with_allow_dml(allow=False) + with pytest.raises(Exception, match="DML"): + ctx.sql_with_options(sql, options=options) + + +def test_sql_with_options_no_statements(ctx): + sql = "SET time zone = 1;" + ctx.sql(sql) + options = SQLOptions().with_allow_statements(allow=False) + with pytest.raises(Exception, match="SetVariable"): + ctx.sql_with_options(sql, options=options) + + +@pytest.fixture +def batch(): + return pa.RecordBatch.from_arrays( + [pa.array([4, 5, 6])], + names=["a"], + ) + + +def test_create_dataframe_with_global_ctx(batch): + ctx = SessionContext.global_ctx() + + df = ctx.create_dataframe([[batch]]) + + result = df.collect()[0].column(0) + + assert result == pa.array([4, 5, 6]) + + +def test_csv_read_options_builder_pattern(): + """Test CsvReadOptions builder pattern.""" + from datafusion import CsvReadOptions + + options = ( + CsvReadOptions() + .with_has_header(False) # noqa: FBT003 + .with_delimiter("|") + .with_quote("'") + .with_schema_infer_max_records(2000) + .with_truncated_rows(True) # noqa: FBT003 + .with_newlines_in_values(True) # noqa: FBT003 + .with_file_extension(".tsv") + ) + assert options.has_header is False + assert options.delimiter == "|" + assert options.quote == "'" + assert options.schema_infer_max_records == 2000 + assert options.truncated_rows is True + assert options.newlines_in_values is True + assert options.file_extension == ".tsv" + + +def read_csv_with_options_inner( + tmp_path: pathlib.Path, + csv_content: str, + options: CsvReadOptions, + expected: pa.RecordBatch, + as_read: bool, + global_ctx: bool, +) -> None: + from datafusion import SessionContext + + # Create a test CSV file + group_dir = tmp_path / "group=a" + group_dir.mkdir(exist_ok=True) + + csv_path = group_dir / "test.csv" + csv_path.write_text(csv_content, newline="\n") + + ctx = SessionContext() + + if as_read: + if global_ctx: + from datafusion.io import read_csv + + df = read_csv(str(tmp_path), options=options) + else: + df = ctx.read_csv(str(tmp_path), options=options) + else: + ctx.register_csv("test_table", str(tmp_path), options=options) + df = ctx.sql("SELECT * FROM test_table") + df.show() + + # Verify the data + result = df.collect() + assert len(result) == 1 + assert result[0] == expected + + +@pytest.mark.parametrize( + ("as_read", "global_ctx"), + [ + (True, True), + (True, False), + (False, False), + ], +) +def test_read_csv_with_options(tmp_path, as_read, global_ctx): + """Test reading CSV with CsvReadOptions.""" + + csv_content = "Alice;30;|New York; NY|\nBob;25\n#Charlie;35;Paris\nPhil;75;Detroit' MI\nKarin;50;|Stockholm\nSweden|" # noqa: E501 + + # Some of the read options are difficult to test in combination + # such as schema and schema_infer_max_records so run multiple tests + # file_sort_order doesn't impact reading, but included here to ensure + # all options parse correctly + options = CsvReadOptions( + has_header=False, + delimiter=";", + quote="|", + terminator="\n", + escape="\\", + comment="#", + newlines_in_values=True, + schema_infer_max_records=1, + null_regex="[pP]+aris", + truncated_rows=True, + file_sort_order=[[column("column_1").sort(), column("column_2")], ["column_3"]], + ) + + expected = pa.RecordBatch.from_arrays( + [ + pa.array(["Alice", "Bob", "Phil", "Karin"]), + pa.array([30, 25, 75, 50]), + pa.array(["New York; NY", None, "Detroit' MI", "Stockholm\nSweden"]), + ], + names=["column_1", "column_2", "column_3"], + ) + + read_csv_with_options_inner( + tmp_path, csv_content, options, expected, as_read, global_ctx + ) + + schema = pa.schema( + [ + pa.field("name", pa.string(), nullable=False), + pa.field("age", pa.float32(), nullable=False), + pa.field("location", pa.string(), nullable=True), + ] + ) + options.with_schema(schema) + + expected = pa.RecordBatch.from_arrays( + [ + pa.array(["Alice", "Bob", "Phil", "Karin"]), + pa.array([30.0, 25.0, 75.0, 50.0]), + pa.array(["New York; NY", None, "Detroit' MI", "Stockholm\nSweden"]), + ], + schema=schema, + ) + + read_csv_with_options_inner( + tmp_path, csv_content, options, expected, as_read, global_ctx + ) + + csv_content = "name,age\nAlice,30\nBob,25\nCharlie,35\nDiego,40\nEmily,15" + + expected = pa.RecordBatch.from_arrays( + [ + pa.array(["Alice", "Bob", "Charlie", "Diego", "Emily"]), + pa.array([30, 25, 35, 40, 15]), + pa.array(["a", "a", "a", "a", "a"]), + ], + schema=pa.schema( + [ + pa.field("name", pa.string(), nullable=True), + pa.field("age", pa.int64(), nullable=True), + pa.field("group", pa.string(), nullable=False), + ] + ), + ) + options = CsvReadOptions( + table_partition_cols=[("group", pa.string())], + ) + + read_csv_with_options_inner( + tmp_path, csv_content, options, expected, as_read, global_ctx + ) diff --git a/python/tests/test_dataframe.py b/python/tests/test_dataframe.py new file mode 100644 index 000000000..759d6278c --- /dev/null +++ b/python/tests/test_dataframe.py @@ -0,0 +1,3571 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. +import ctypes +import datetime +import itertools +import os +import re +import threading +import time +from pathlib import Path +from typing import Any + +import pyarrow as pa +import pyarrow.parquet as pq +import pytest +from datafusion import ( + DataFrame, + InsertOp, + ParquetColumnOptions, + ParquetWriterOptions, + RecordBatch, + SessionContext, + WindowFrame, + column, + literal, + udf, +) +from datafusion import ( + col as df_col, +) +from datafusion import ( + functions as f, +) +from datafusion.dataframe import DataFrameWriteOptions +from datafusion.dataframe_formatter import ( + DataFrameHtmlFormatter, + configure_formatter, + get_formatter, + reset_formatter, +) +from datafusion.expr import EXPR_TYPE_ERROR, Window +from pyarrow.csv import write_csv + +pa_cffi = pytest.importorskip("pyarrow.cffi") + +MB = 1024 * 1024 + + +@pytest.fixture +def ctx(): + return SessionContext() + + +@pytest.fixture +def df(ctx): + # create a RecordBatch and a new DataFrame from it + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6]), pa.array([8, 5, 8])], + names=["a", "b", "c"], + ) + + return ctx.from_arrow(batch) + + +@pytest.fixture +def large_df(): + ctx = SessionContext() + + rows = 100000 + data = { + "a": list(range(rows)), + "b": [f"s-{i}" for i in range(rows)], + "c": [float(i + 0.1) for i in range(rows)], + } + batch = pa.record_batch(data) + + return ctx.from_arrow(batch) + + +@pytest.fixture +def large_multi_batch_df(): + """Create a DataFrame with multiple record batches for testing stream behavior. + + This fixture creates 10 batches of 10,000 rows each (100,000 rows total), + ensuring the DataFrame spans multiple batches. This is essential for testing + that memory limits actually cause early stream termination rather than + truncating all collected data. + """ + ctx = SessionContext() + + # Create multiple batches, each with 10,000 rows + batches = [] + rows_per_batch = 10000 + num_batches = 10 + + for batch_idx in range(num_batches): + start_row = batch_idx * rows_per_batch + end_row = start_row + rows_per_batch + data = { + "a": list(range(start_row, end_row)), + "b": [f"s-{i}" for i in range(start_row, end_row)], + "c": [float(i + 0.1) for i in range(start_row, end_row)], + } + batch = pa.record_batch(data) + batches.append(batch) + + # Register as record batches to maintain multi-batch structure + # Using [batches] wraps list in another list as required by register_record_batches + ctx.register_record_batches("large_multi_batch_data", [batches]) + return ctx.table("large_multi_batch_data") + + +@pytest.fixture +def struct_df(): + ctx = SessionContext() + + # create a RecordBatch and a new DataFrame from it + batch = pa.RecordBatch.from_arrays( + [pa.array([{"c": 1}, {"c": 2}, {"c": 3}]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + + return ctx.create_dataframe([[batch]]) + + +@pytest.fixture +def nested_df(): + ctx = SessionContext() + + # create a RecordBatch and a new DataFrame from it + # Intentionally make each array of different length + batch = pa.RecordBatch.from_arrays( + [pa.array([[1], [2, 3], [4, 5, 6], None]), pa.array([7, 8, 9, 10])], + names=["a", "b"], + ) + + return ctx.create_dataframe([[batch]]) + + +@pytest.fixture +def aggregate_df(): + ctx = SessionContext() + ctx.register_csv("test", "testing/data/csv/aggregate_test_100.csv") + return ctx.sql("select c1, sum(c2) from test group by c1") + + +@pytest.fixture +def partitioned_df(): + ctx = SessionContext() + + # create a RecordBatch and a new DataFrame from it + batch = pa.RecordBatch.from_arrays( + [ + pa.array([0, 1, 2, 3, 4, 5, 6]), + pa.array([7, None, 7, 8, 9, None, 9]), + pa.array(["A", "A", "A", "A", "B", "B", "B"]), + ], + names=["a", "b", "c"], + ) + + return ctx.create_dataframe([[batch]]) + + +@pytest.fixture +def clean_formatter_state(): + """Reset the HTML formatter after each test.""" + reset_formatter() + + +@pytest.fixture +def null_df(): + """Create a DataFrame with null values of different types.""" + ctx = SessionContext() + + # Create a RecordBatch with nulls across different types + batch = pa.RecordBatch.from_arrays( + [ + pa.array([1, None, 3, None], type=pa.int64()), + pa.array([4.5, 6.7, None, None], type=pa.float64()), + pa.array(["a", None, "c", None], type=pa.string()), + pa.array([True, None, False, None], type=pa.bool_()), + pa.array( + [10957, None, 18993, None], type=pa.date32() + ), # 2000-01-01, null, 2022-01-01, null + pa.array( + [946684800000, None, 1640995200000, None], type=pa.date64() + ), # 2000-01-01, null, 2022-01-01, null + ], + names=[ + "int_col", + "float_col", + "str_col", + "bool_col", + "date32_col", + "date64_col", + ], + ) + + return ctx.create_dataframe([[batch]]) + + +# custom style for testing with html formatter +class CustomStyleProvider: + def get_cell_style(self) -> str: + return ( + "background-color: #f5f5f5; color: #333; padding: 8px; border: " + "1px solid #ddd;" + ) + + def get_header_style(self) -> str: + return ( + "background-color: #4285f4; color: white; font-weight: bold; " + "padding: 10px; border: 1px solid #3367d6;" + ) + + +def count_table_rows(html_content: str) -> int: + """Count the number of table rows in HTML content. + Args: + html_content: HTML string to analyze + Returns: + Number of table rows found (number of tags) + """ + return len(re.findall(r" literal(2)).select( + column("a") + column("b"), + column("a") - column("b"), + ) + + # execute and collect the first (and only) batch + result = df1.collect()[0] + + assert result.column(0) == pa.array([9]) + assert result.column(1) == pa.array([-3]) + + df.show() + # verify that if there is no filter applied, internal dataframe is unchanged + df2 = df.filter() + assert df.df == df2.df + + df3 = df.filter(column("a") > literal(1), column("b") != literal(6)) + result = df3.collect()[0] + + assert result.column(0) == pa.array([2]) + assert result.column(1) == pa.array([5]) + assert result.column(2) == pa.array([5]) + + +def test_filter_string_predicates(df): + df_str = df.filter("a > 2") + result = df_str.collect()[0] + + assert result.column(0) == pa.array([3]) + assert result.column(1) == pa.array([6]) + assert result.column(2) == pa.array([8]) + + df_mixed = df.filter("a > 1", column("b") != literal(6)) + result_mixed = df_mixed.collect()[0] + + assert result_mixed.column(0) == pa.array([2]) + assert result_mixed.column(1) == pa.array([5]) + assert result_mixed.column(2) == pa.array([5]) + + df_strings = df.filter("a > 1", "b < 6") + result_strings = df_strings.collect()[0] + + assert result_strings.column(0) == pa.array([2]) + assert result_strings.column(1) == pa.array([5]) + assert result_strings.column(2) == pa.array([5]) + + +def test_parse_sql_expr(df): + plan1 = df.filter(df.parse_sql_expr("a > 2")).logical_plan() + plan2 = df.filter(column("a") > literal(2)).logical_plan() + # object equality not implemented but string representation should match + assert str(plan1) == str(plan2) + + df1 = df.filter(df.parse_sql_expr("a > 2")).select( + column("a") + column("b"), + column("a") - column("b"), + ) + + # execute and collect the first (and only) batch + result = df1.collect()[0] + + assert result.column(0) == pa.array([9]) + assert result.column(1) == pa.array([-3]) + + df.show() + # verify that if there is no filter applied, internal dataframe is unchanged + df2 = df.filter() + assert df.df == df2.df + + df3 = df.filter(df.parse_sql_expr("a > 1"), df.parse_sql_expr("b != 6")) + result = df3.collect()[0] + + assert result.column(0) == pa.array([2]) + assert result.column(1) == pa.array([5]) + assert result.column(2) == pa.array([5]) + + +def test_show_empty(df, capsys): + df_empty = df.filter(column("a") > literal(3)) + df_empty.show() + captured = capsys.readouterr() + assert "DataFrame has no rows" in captured.out + + +def test_sort(df): + df = df.sort(column("b").sort(ascending=False)) + + table = pa.Table.from_batches(df.collect()) + expected = {"a": [3, 2, 1], "b": [6, 5, 4], "c": [8, 5, 8]} + + assert table.to_pydict() == expected + + +def test_sort_string_and_expression_equivalent(df): + from datafusion import col + + result_str = df.sort("a").to_pydict() + result_expr = df.sort(col("a")).to_pydict() + assert result_str == result_expr + + +def test_sort_unsupported(df): + with pytest.raises( + TypeError, + match=f"Expected Expr or column name.*{re.escape(EXPR_TYPE_ERROR)}", + ): + df.sort(1) + + +def test_aggregate_string_and_expression_equivalent(df): + from datafusion import col + + result_str = df.aggregate("a", [f.count()]).sort("a").to_pydict() + result_expr = df.aggregate(col("a"), [f.count()]).sort("a").to_pydict() + assert result_str == result_expr + + +def test_aggregate_tuple_group_by(df): + result_list = df.aggregate(["a"], [f.count()]).sort("a").to_pydict() + result_tuple = df.aggregate(("a",), [f.count()]).sort("a").to_pydict() + assert result_tuple == result_list + + +def test_aggregate_tuple_aggs(df): + result_list = df.aggregate("a", [f.count()]).sort("a").to_pydict() + result_tuple = df.aggregate("a", (f.count(),)).sort("a").to_pydict() + assert result_tuple == result_list + + +def test_filter_string_equivalent(df): + df1 = df.filter("a > 1").to_pydict() + df2 = df.filter(column("a") > literal(1)).to_pydict() + assert df1 == df2 + + +def test_filter_string_invalid(df): + with pytest.raises(Exception) as excinfo: + df.filter("this is not valid sql").collect() + assert "Expected Expr" not in str(excinfo.value) + + +def test_drop(df): + df = df.drop("c") + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert df.schema().names == ["a", "b"] + assert result.column(0) == pa.array([1, 2, 3]) + assert result.column(1) == pa.array([4, 5, 6]) + + +def test_limit(df): + df = df.limit(1) + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert len(result.column(0)) == 1 + assert len(result.column(1)) == 1 + + +def test_limit_with_offset(df): + # only 3 rows, but limit past the end to ensure that offset is working + df = df.limit(5, offset=2) + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert len(result.column(0)) == 1 + assert len(result.column(1)) == 1 + + +def test_head(df): + df = df.head(1) + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert result.column(0) == pa.array([1]) + assert result.column(1) == pa.array([4]) + assert result.column(2) == pa.array([8]) + + +def test_tail(df): + df = df.tail(1) + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert result.column(0) == pa.array([3]) + assert result.column(1) == pa.array([6]) + assert result.column(2) == pa.array([8]) + + +def test_with_column_sql_expression(df): + df = df.with_column("c", "a + b") + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert result.schema.field(0).name == "a" + assert result.schema.field(1).name == "b" + assert result.schema.field(2).name == "c" + + assert result.column(0) == pa.array([1, 2, 3]) + assert result.column(1) == pa.array([4, 5, 6]) + assert result.column(2) == pa.array([5, 7, 9]) + + +def test_with_column(df): + df = df.with_column("c", column("a") + column("b")) + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert result.schema.field(0).name == "a" + assert result.schema.field(1).name == "b" + assert result.schema.field(2).name == "c" + + assert result.column(0) == pa.array([1, 2, 3]) + assert result.column(1) == pa.array([4, 5, 6]) + assert result.column(2) == pa.array([5, 7, 9]) + + +def test_with_columns(df): + df = df.with_columns( + (column("a") + column("b")).alias("c"), + (column("a") + column("b")).alias("d"), + [ + (column("a") + column("b")).alias("e"), + (column("a") + column("b")).alias("f"), + ], + g=(column("a") + column("b")), + ) + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert result.schema.field(0).name == "a" + assert result.schema.field(1).name == "b" + assert result.schema.field(2).name == "c" + assert result.schema.field(3).name == "d" + assert result.schema.field(4).name == "e" + assert result.schema.field(5).name == "f" + assert result.schema.field(6).name == "g" + + assert result.column(0) == pa.array([1, 2, 3]) + assert result.column(1) == pa.array([4, 5, 6]) + assert result.column(2) == pa.array([5, 7, 9]) + assert result.column(3) == pa.array([5, 7, 9]) + assert result.column(4) == pa.array([5, 7, 9]) + assert result.column(5) == pa.array([5, 7, 9]) + assert result.column(6) == pa.array([5, 7, 9]) + + +def test_with_columns_str(df): + df = df.with_columns( + "a + b as c", + "a + b as d", + [ + "a + b as e", + "a + b as f", + ], + g="a + b", + ) + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert result.schema.field(0).name == "a" + assert result.schema.field(1).name == "b" + assert result.schema.field(2).name == "c" + assert result.schema.field(3).name == "d" + assert result.schema.field(4).name == "e" + assert result.schema.field(5).name == "f" + assert result.schema.field(6).name == "g" + + assert result.column(0) == pa.array([1, 2, 3]) + assert result.column(1) == pa.array([4, 5, 6]) + assert result.column(2) == pa.array([5, 7, 9]) + assert result.column(3) == pa.array([5, 7, 9]) + assert result.column(4) == pa.array([5, 7, 9]) + assert result.column(5) == pa.array([5, 7, 9]) + assert result.column(6) == pa.array([5, 7, 9]) + + +def test_cast(df): + df = df.cast({"a": pa.float16(), "b": pa.list_(pa.uint32())}) + expected = pa.schema( + [("a", pa.float16()), ("b", pa.list_(pa.uint32())), ("c", pa.int64())] + ) + + assert df.schema() == expected + + +def test_iter_batches(df): + batches = [] + for batch in df: + batches.append(batch) # noqa: PERF402 + + # Delete DataFrame to ensure RecordBatches remain valid + del df + + assert len(batches) == 1 + + batch = batches[0] + assert isinstance(batch, RecordBatch) + pa_batch = batch.to_pyarrow() + assert pa_batch.column(0).to_pylist() == [1, 2, 3] + assert pa_batch.column(1).to_pylist() == [4, 5, 6] + assert pa_batch.column(2).to_pylist() == [8, 5, 8] + + +def test_iter_returns_datafusion_recordbatch(df): + for batch in df: + assert isinstance(batch, RecordBatch) + + +def test_execute_stream_basic(df): + stream = df.execute_stream() + batches = list(stream) + + assert len(batches) == 1 + assert isinstance(batches[0], RecordBatch) + pa_batch = batches[0].to_pyarrow() + assert pa_batch.column(0).to_pylist() == [1, 2, 3] + assert pa_batch.column(1).to_pylist() == [4, 5, 6] + assert pa_batch.column(2).to_pylist() == [8, 5, 8] + + +def test_with_column_renamed(df): + df = df.with_column("c", column("a") + column("b")).with_column_renamed("c", "sum") + + result = df.collect()[0] + + assert result.schema.field(0).name == "a" + assert result.schema.field(1).name == "b" + assert result.schema.field(2).name == "sum" + + +def test_unnest(nested_df): + nested_df = nested_df.unnest_columns("a") + + # execute and collect the first (and only) batch + result = nested_df.collect()[0] + + assert result.column(0) == pa.array([1, 2, 3, 4, 5, 6, None]) + assert result.column(1) == pa.array([7, 8, 8, 9, 9, 9, 10]) + + +def test_unnest_without_nulls(nested_df): + nested_df = nested_df.unnest_columns("a", preserve_nulls=False) + + # execute and collect the first (and only) batch + result = nested_df.collect()[0] + + assert result.column(0) == pa.array([1, 2, 3, 4, 5, 6]) + assert result.column(1) == pa.array([7, 8, 8, 9, 9, 9]) + + +def test_join(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + df = ctx.create_dataframe([[batch]], "l") + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2]), pa.array([8, 10])], + names=["a", "c"], + ) + df1 = ctx.create_dataframe([[batch]], "r") + + df2 = df.join(df1, on="a", how="inner") + df2 = df2.sort(column("a")) + table = pa.Table.from_batches(df2.collect()) + + expected = {"a": [1, 2], "c": [8, 10], "b": [4, 5]} + assert table.to_pydict() == expected + + # Test the default behavior for dropping duplicate keys + # Since we may have a duplicate column name and pa.Table() + # hides the fact, instead we need to explicitly check the + # resultant arrays. + df2 = df.join( + df1, left_on="a", right_on="a", how="inner", coalesce_duplicate_keys=True + ) + df2 = df2.sort(column("a")) + result = df2.collect()[0] + assert result.num_columns == 3 + assert result.column(0) == pa.array([1, 2], pa.int64()) + assert result.column(1) == pa.array([4, 5], pa.int64()) + assert result.column(2) == pa.array([8, 10], pa.int64()) + + df2 = df.join( + df1, left_on="a", right_on="a", how="inner", coalesce_duplicate_keys=False + ) + df2 = df2.sort(column("l.a")) + result = df2.collect()[0] + assert result.num_columns == 4 + assert result.column(0) == pa.array([1, 2], pa.int64()) + assert result.column(1) == pa.array([4, 5], pa.int64()) + assert result.column(2) == pa.array([1, 2], pa.int64()) + assert result.column(3) == pa.array([8, 10], pa.int64()) + + # Verify we don't make a breaking change to pre-43.0.0 + # where users would pass join_keys as a positional argument + df2 = df.join(df1, (["a"], ["a"]), how="inner") + df2 = df2.sort(column("a")) + table = pa.Table.from_batches(df2.collect()) + + expected = {"a": [1, 2], "c": [8, 10], "b": [4, 5]} + assert table.to_pydict() == expected + + +def test_join_invalid_params(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + df = ctx.create_dataframe([[batch]], "l") + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2]), pa.array([8, 10])], + names=["a", "c"], + ) + df1 = ctx.create_dataframe([[batch]], "r") + + with pytest.deprecated_call(): + df2 = df.join(df1, join_keys=(["a"], ["a"]), how="inner") + df2.show() + df2 = df2.sort(column("a")) + table = pa.Table.from_batches(df2.collect()) + + expected = {"a": [1, 2], "c": [8, 10], "b": [4, 5]} + assert table.to_pydict() == expected + + with pytest.raises( + ValueError, match=r"`left_on` or `right_on` should not provided with `on`" + ): + df2 = df.join(df1, on="a", how="inner", right_on="test") + + with pytest.raises( + ValueError, match=r"`left_on` and `right_on` should both be provided." + ): + df2 = df.join(df1, left_on="a", how="inner") + + with pytest.raises( + ValueError, match=r"either `on` or `left_on` and `right_on` should be provided." + ): + df2 = df.join(df1, how="inner") + + +def test_join_on(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + df = ctx.create_dataframe([[batch]], "l") + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2]), pa.array([-8, 10])], + names=["a", "c"], + ) + df1 = ctx.create_dataframe([[batch]], "r") + + df2 = df.join_on(df1, column("l.a").__eq__(column("r.a")), how="inner") + df2.show() + df2 = df2.sort(column("l.a")) + table = pa.Table.from_batches(df2.collect()) + + expected = {"a": [1, 2], "c": [-8, 10], "b": [4, 5]} + assert table.to_pydict() == expected + + df3 = df.join_on( + df1, + column("l.a").__eq__(column("r.a")), + column("l.a").__lt__(column("r.c")), + how="inner", + ) + df3.show() + df3 = df3.sort(column("l.a")) + table = pa.Table.from_batches(df3.collect()) + expected = {"a": [2], "c": [10], "b": [5]} + assert table.to_pydict() == expected + + +def test_join_full_with_drop_duplicate_keys(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 3, 5, 7, 9]), pa.array([True, True, True, True, True])], + names=["log_time", "key_frame"], + ) + key_frame = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([2, 4, 6, 8, 10])], + names=["log_time"], + ) + query_times = ctx.create_dataframe([[batch]]) + + merged = query_times.join( + key_frame, + left_on="log_time", + right_on="log_time", + how="full", + coalesce_duplicate_keys=True, + ) + merged = merged.sort(column("log_time")) + result = merged.collect()[0] + + assert result.num_columns == 2 + assert result.column(0).to_pylist() == [1, 2, 3, 4, 5, 6, 7, 8, 9, 10] + + +def test_join_on_invalid_expr(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2]), pa.array([4, 5])], + names=["a", "b"], + ) + df = ctx.create_dataframe([[batch]], "l") + df1 = ctx.create_dataframe([[batch]], "r") + + with pytest.raises( + TypeError, match=r"Use col\(\)/column\(\) or lit\(\)/literal\(\)" + ): + df.join_on(df1, "a") + + +def test_aggregate_invalid_aggs(df): + with pytest.raises( + TypeError, match=r"Use col\(\)/column\(\) or lit\(\)/literal\(\)" + ): + df.aggregate([], "a") + + +def test_distinct(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3, 1, 2, 3]), pa.array([4, 5, 6, 4, 5, 6])], + names=["a", "b"], + ) + df_a = ctx.create_dataframe([[batch]]).distinct().sort(column("a")) + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + df_b = ctx.create_dataframe([[batch]]).sort(column("a")) + + assert df_a.collect() == df_b.collect() + + +data_test_window_functions = [ + ( + "row", + f.row_number(order_by=[column("b"), column("a").sort(ascending=False)]), + [4, 2, 3, 5, 7, 1, 6], + ), + ( + "row_w_params", + f.row_number( + order_by=[column("b"), column("a")], + partition_by=[column("c")], + ), + [2, 1, 3, 4, 2, 1, 3], + ), + ( + "row_w_params_no_lists", + f.row_number( + order_by=column("b"), + partition_by=column("c"), + ), + [2, 1, 3, 4, 2, 1, 3], + ), + ("rank", f.rank(order_by=[column("b")]), [3, 1, 3, 5, 6, 1, 6]), + ( + "rank_w_params", + f.rank(order_by=[column("b"), column("a")], partition_by=[column("c")]), + [2, 1, 3, 4, 2, 1, 3], + ), + ( + "rank_w_params_no_lists", + f.rank(order_by=column("a"), partition_by=column("c")), + [1, 2, 3, 4, 1, 2, 3], + ), + ( + "dense_rank", + f.dense_rank(order_by=[column("b")]), + [2, 1, 2, 3, 4, 1, 4], + ), + ( + "dense_rank_w_params", + f.dense_rank(order_by=[column("b"), column("a")], partition_by=[column("c")]), + [2, 1, 3, 4, 2, 1, 3], + ), + ( + "dense_rank_w_params_no_lists", + f.dense_rank(order_by=column("a"), partition_by=column("c")), + [1, 2, 3, 4, 1, 2, 3], + ), + ( + "percent_rank", + f.round(f.percent_rank(order_by=[column("b")]), literal(3)), + [0.333, 0.0, 0.333, 0.667, 0.833, 0.0, 0.833], + ), + ( + "percent_rank_w_params", + f.round( + f.percent_rank( + order_by=[column("b"), column("a")], partition_by=[column("c")] + ), + literal(3), + ), + [0.333, 0.0, 0.667, 1.0, 0.5, 0.0, 1.0], + ), + ( + "percent_rank_w_params_no_lists", + f.round( + f.percent_rank(order_by=column("a"), partition_by=column("c")), + literal(3), + ), + [0.0, 0.333, 0.667, 1.0, 0.0, 0.5, 1.0], + ), + ( + "cume_dist", + f.round(f.cume_dist(order_by=[column("b")]), literal(3)), + [0.571, 0.286, 0.571, 0.714, 1.0, 0.286, 1.0], + ), + ( + "cume_dist_w_params", + f.round( + f.cume_dist( + order_by=[column("b"), column("a")], partition_by=[column("c")] + ), + literal(3), + ), + [0.5, 0.25, 0.75, 1.0, 0.667, 0.333, 1.0], + ), + ( + "cume_dist_w_params_no_lists", + f.round( + f.cume_dist(order_by=column("a"), partition_by=column("c")), + literal(3), + ), + [0.25, 0.5, 0.75, 1.0, 0.333, 0.667, 1.0], + ), + ( + "ntile", + f.ntile(2, order_by=[column("b")]), + [1, 1, 1, 2, 2, 1, 2], + ), + ( + "ntile_w_params", + f.ntile(2, order_by=[column("b"), column("a")], partition_by=[column("c")]), + [1, 1, 2, 2, 1, 1, 2], + ), + ( + "ntile_w_params_no_lists", + f.ntile(2, order_by=column("b"), partition_by=column("c")), + [1, 1, 2, 2, 1, 1, 2], + ), + ("lead", f.lead(column("b"), order_by=[column("b")]), [7, None, 8, 9, 9, 7, None]), + ( + "lead_w_params", + f.lead( + column("b"), + shift_offset=2, + default_value=-1, + order_by=[column("b"), column("a")], + partition_by=[column("c")], + ), + [8, 7, -1, -1, -1, 9, -1], + ), + ( + "lead_w_params_no_lists", + f.lead( + column("b"), + shift_offset=2, + default_value=-1, + order_by=column("b"), + partition_by=column("c"), + ), + [8, 7, -1, -1, -1, 9, -1], + ), + ("lag", f.lag(column("b"), order_by=[column("b")]), [None, None, 7, 7, 8, None, 9]), + ( + "lag_w_params", + f.lag( + column("b"), + shift_offset=2, + default_value=-1, + order_by=[column("b"), column("a")], + partition_by=[column("c")], + ), + [-1, -1, None, 7, -1, -1, None], + ), + ( + "lag_w_params_no_lists", + f.lag( + column("b"), + shift_offset=2, + default_value=-1, + order_by=column("b"), + partition_by=column("c"), + ), + [-1, -1, None, 7, -1, -1, None], + ), + ( + "first_value", + f.first_value(column("a")).over( + Window(partition_by=[column("c")], order_by=[column("b")]) + ), + [1, 1, 1, 1, 5, 5, 5], + ), + ( + "first_value_without_list_args", + f.first_value(column("a")).over( + Window(partition_by=column("c"), order_by=column("b")) + ), + [1, 1, 1, 1, 5, 5, 5], + ), + ( + "first_value_order_by_string", + f.first_value(column("a")).over( + Window(partition_by=[column("c")], order_by="b") + ), + [1, 1, 1, 1, 5, 5, 5], + ), + ( + "last_value", + f.last_value(column("a")).over( + Window( + partition_by=[column("c")], + order_by=[column("b")], + window_frame=WindowFrame("rows", None, None), + ) + ), + [3, 3, 3, 3, 6, 6, 6], + ), + ( + "3rd_value", + f.nth_value(column("b"), 3).over(Window(order_by=[column("a")])), + [None, None, 7, 7, 7, 7, 7], + ), + ( + "avg", + f.round(f.avg(column("b")).over(Window(order_by=[column("a")])), literal(3)), + [7.0, 7.0, 7.0, 7.333, 7.75, 7.75, 8.0], + ), +] + + +@pytest.mark.parametrize(("name", "expr", "result"), data_test_window_functions) +def test_window_functions(partitioned_df, name, expr, result): + df = partitioned_df.select( + column("a"), column("b"), column("c"), f.alias(expr, name) + ) + df.sort(column("a")).show() + table = pa.Table.from_batches(df.collect()) + + expected = { + "a": [0, 1, 2, 3, 4, 5, 6], + "b": [7, None, 7, 8, 9, None, 9], + "c": ["A", "A", "A", "A", "B", "B", "B"], + name: result, + } + + assert table.sort_by("a").to_pydict() == expected + + +@pytest.mark.parametrize("partition", ["c", df_col("c")]) +def test_rank_partition_by_accepts_string(partitioned_df, partition): + """Passing a string to partition_by should match using col().""" + df = partitioned_df.select( + f.rank(order_by=column("a"), partition_by=partition).alias("r") + ) + table = pa.Table.from_batches(df.sort(column("a")).collect()) + assert table.column("r").to_pylist() == [1, 2, 3, 4, 1, 2, 3] + + +@pytest.mark.parametrize("partition", ["c", df_col("c")]) +def test_window_partition_by_accepts_string(partitioned_df, partition): + """Window.partition_by accepts string identifiers.""" + expr = f.first_value(column("a")).over( + Window(partition_by=partition, order_by=column("b")) + ) + df = partitioned_df.select(expr.alias("fv")) + table = pa.Table.from_batches(df.sort(column("a")).collect()) + assert table.column("fv").to_pylist() == [1, 1, 1, 1, 5, 5, 5] + + +@pytest.mark.parametrize( + ("units", "start_bound", "end_bound"), + [ + (units, start_bound, end_bound) + for units in ("rows", "range") + for start_bound in (None, 0, 1) + for end_bound in (None, 0, 1) + ] + + [ + ("groups", 0, 0), + ], +) +def test_valid_window_frame(units, start_bound, end_bound): + WindowFrame(units, start_bound, end_bound) + + +@pytest.mark.parametrize( + ("units", "start_bound", "end_bound"), + [ + ("invalid-units", 0, None), + ("invalid-units", None, 0), + ("invalid-units", None, None), + ("groups", None, 0), + ("groups", 0, None), + ("groups", None, None), + ], +) +def test_invalid_window_frame(units, start_bound, end_bound): + with pytest.raises(NotImplementedError, match=f"(?i){units}"): + WindowFrame(units, start_bound, end_bound) + + +def test_window_frame_defaults_match_postgres(partitioned_df): + col_a = column("a") + + # When order is not set, the default frame should be unbounded preceding to + # unbounded following. When order is set, the default frame is unbounded preceding + # to current row. + no_order = f.avg(col_a).over(Window()).alias("over_no_order") + with_order = f.avg(col_a).over(Window(order_by=[col_a])).alias("over_with_order") + df = partitioned_df.select(col_a, no_order, with_order) + + expected = { + "a": [0, 1, 2, 3, 4, 5, 6], + "over_no_order": [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0], + "over_with_order": [0.0, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0], + } + + assert df.sort(col_a).to_pydict() == expected + + +def _build_last_value_df(df): + return df.select( + f.last_value(column("a")) + .over( + Window( + partition_by=[column("c")], + order_by=[column("b")], + window_frame=WindowFrame("rows", None, None), + ) + ) + .alias("expr"), + f.last_value(column("a")) + .over( + Window( + partition_by=[column("c")], + order_by="b", + window_frame=WindowFrame("rows", None, None), + ) + ) + .alias("str"), + ) + + +def _build_nth_value_df(df): + return df.select( + f.nth_value(column("b"), 3).over(Window(order_by=[column("a")])).alias("expr"), + f.nth_value(column("b"), 3).over(Window(order_by="a")).alias("str"), + ) + + +def _build_rank_df(df): + return df.select( + f.rank(order_by=[column("b")]).alias("expr"), + f.rank(order_by="b").alias("str"), + ) + + +def _build_array_agg_df(df): + return df.aggregate( + [column("c")], + [ + f.array_agg(column("a"), order_by=[column("a")]).alias("expr"), + f.array_agg(column("a"), order_by="a").alias("str"), + ], + ).sort(column("c")) + + +@pytest.mark.parametrize( + ("builder", "expected"), + [ + pytest.param(_build_last_value_df, [3, 3, 3, 3, 6, 6, 6], id="last_value"), + pytest.param(_build_nth_value_df, [None, None, 7, 7, 7, 7, 7], id="nth_value"), + pytest.param(_build_rank_df, [1, 1, 3, 3, 5, 6, 6], id="rank"), + pytest.param(_build_array_agg_df, [[0, 1, 2, 3], [4, 5, 6]], id="array_agg"), + ], +) +def test_order_by_string_equivalence(partitioned_df, builder, expected): + df = builder(partitioned_df) + table = pa.Table.from_batches(df.collect()) + assert table.column("expr").to_pylist() == expected + assert table.column("expr").to_pylist() == table.column("str").to_pylist() + + +def test_html_formatter_cell_dimension(df, clean_formatter_state): + """Test configuring the HTML formatter with different options.""" + # Configure with custom settings + configure_formatter( + max_width=500, + max_height=200, + enable_cell_expansion=False, + ) + + html_output = df._repr_html_() + + # Verify our configuration was applied + assert "max-height: 200px" in html_output + assert "max-width: 500px" in html_output + # With cell expansion disabled, we shouldn't see expandable-container elements + assert "expandable-container" not in html_output + + +def test_html_formatter_custom_style_provider(df, clean_formatter_state): + """Test using custom style providers with the HTML formatter.""" + + # Configure with custom style provider + configure_formatter(style_provider=CustomStyleProvider()) + + html_output = df._repr_html_() + + # Verify our custom styles were applied + assert "background-color: #4285f4" in html_output + assert "color: white" in html_output + assert "background-color: #f5f5f5" in html_output + + +def test_html_formatter_type_formatters(df, clean_formatter_state): + """Test registering custom type formatters for specific data types.""" + + # Get current formatter and register custom formatters + formatter = get_formatter() + + # Format integers with color based on value + # Using int as the type for the formatter will work since we convert + # Arrow scalar values to Python native types in _get_cell_value + def format_int(value): + return f' 2 else "blue"}">{value}' + + formatter.register_formatter(int, format_int) + + html_output = df._repr_html_() + + # Our test dataframe has values 1,2,3 so we should see: + assert '1' in html_output + + +def test_html_formatter_custom_cell_builder(df, clean_formatter_state): + """Test using a custom cell builder function.""" + + # Create a custom cell builder with distinct styling for different value ranges + def custom_cell_builder(value, row, col, table_id): + try: + num_value = int(value) + if num_value > 5: # Values > 5 get green background with indicator + return ( + '' + ) + if num_value < 3: # Values < 3 get blue background with indicator + return ( + '' + ) + except (ValueError, TypeError): + pass + + # Default styling for other cells (3, 4, 5) + return f'' + + # Set our custom cell builder + formatter = get_formatter() + formatter.set_custom_cell_builder(custom_cell_builder) + + html_output = df._repr_html_() + + # Extract cells with specific styling using regex + low_cells = re.findall( + r'', html_output + ) + mid_cells = re.findall( + r'', html_output + ) + high_cells = re.findall( + r'', html_output + ) + + # Sort the extracted values for consistent comparison + low_cells = sorted(map(int, low_cells)) + mid_cells = sorted(map(int, mid_cells)) + high_cells = sorted(map(int, high_cells)) + + # Verify specific values have the correct styling applied + assert low_cells == [1, 2] # Values < 3 + assert mid_cells == [3, 4, 5, 5] # Values 3-5 + assert high_cells == [6, 8, 8] # Values > 5 + + # Verify the exact content with styling appears in the output + assert ( + '' + in html_output + ) + assert ( + '' + in html_output + ) + assert ( + '' in html_output + ) + assert ( + '' in html_output + ) + assert ( + '' + in html_output + ) + assert ( + '' + in html_output + ) + + # Count occurrences to ensure all cells are properly styled + assert html_output.count("-low") == 2 # Two low values (1, 2) + assert html_output.count("-mid") == 4 # Four mid values (3, 4, 5, 5) + assert html_output.count("-high") == 3 # Three high values (6, 8, 8) + + # Create a custom cell builder that changes background color based on value + def custom_cell_builder(value, row, col, table_id): + # Handle numeric values regardless of their exact type + try: + num_value = int(value) + if num_value > 5: # Values > 5 get green background + return f'' + if num_value < 3: # Values < 3 get light blue background + return f'' + except (ValueError, TypeError): + pass + + # Default styling for other cells + return f'' + + # Set our custom cell builder + formatter = get_formatter() + formatter.set_custom_cell_builder(custom_cell_builder) + + html_output = df._repr_html_() + + # Verify our custom cell styling was applied + assert "background-color: #d3e9f0" in html_output # For values 1,2 + + +def test_html_formatter_custom_header_builder(df, clean_formatter_state): + """Test using a custom header builder function.""" + + # Create a custom header builder with tooltips + def custom_header_builder(field): + tooltips = { + "a": "Primary key column", + "b": "Secondary values", + "c": "Additional data", + } + tooltip = tooltips.get(field.name, "") + return ( + f'' + ) + + # Set our custom header builder + formatter = get_formatter() + formatter.set_custom_header_builder(custom_header_builder) + + html_output = df._repr_html_() + + # Verify our custom headers were applied + assert 'title="Primary key column"' in html_output + assert 'title="Secondary values"' in html_output + assert "background-color: #333; color: white" in html_output + + +def test_html_formatter_complex_customization(df, clean_formatter_state): + """Test combining multiple customization options together.""" + + # Create a dark mode style provider + class DarkModeStyleProvider: + def get_cell_style(self) -> str: + return ( + "background-color: #222; color: #eee; " + "padding: 8px; border: 1px solid #444;" + ) + + def get_header_style(self) -> str: + return ( + "background-color: #111; color: #fff; padding: 10px; " + "border: 1px solid #333;" + ) + + # Configure with dark mode style + configure_formatter( + max_cell_length=10, + style_provider=DarkModeStyleProvider(), + custom_css=""" + .datafusion-table { + font-family: monospace; + border-collapse: collapse; + } + .datafusion-table tr:hover td { + background-color: #444 !important; + } + """, + ) + + # Add type formatters for special formatting - now working with native int values + formatter = get_formatter() + formatter.register_formatter( + int, + lambda n: f'{n}', + ) + + html_output = df._repr_html_() + + # Verify our customizations were applied + assert "background-color: #222" in html_output + assert "background-color: #111" in html_output + assert ".datafusion-table" in html_output + assert "color: #5af" in html_output # Even numbers + + +def test_html_formatter_memory(df, clean_formatter_state): + """Test the memory and row control parameters in DataFrameHtmlFormatter.""" + configure_formatter(max_memory_bytes=10, min_rows=1) + html_output = df._repr_html_() + + # Count the number of table rows in the output + tr_count = count_table_rows(html_output) + # With a tiny memory limit of 10 bytes, the formatter should display + # the minimum number of rows (1) plus a message about truncation + assert tr_count == 2 # 1 for header row, 1 for data row + assert "data truncated" in html_output.lower() + + configure_formatter(max_memory_bytes=10 * MB, min_rows=1) + html_output = df._repr_html_() + # With larger memory limit and min_rows=2, should display all rows + tr_count = count_table_rows(html_output) + # Table should have header row (1) + 3 data rows = 4 rows + assert tr_count == 4 + # No truncation message should appear + assert "data truncated" not in html_output.lower() + + +def test_html_formatter_memory_boundary_conditions(large_df, clean_formatter_state): + """Test memory limit behavior at boundary conditions with large dataset. + + This test validates that the formatter correctly handles edge cases when + the memory limit is reached with a large dataset (100,000 rows), ensuring + that min_rows constraint is properly respected while respecting memory limits. + Uses large_df to actually test memory limit behavior with realistic data sizes. + """ + + # Get the raw size of the data to test boundary conditions + # First, capture output with no limits + # NOTE: max_rows=200000 is set well above the dataset size (100k rows) to ensure + # we're testing memory limits, not row limits. Default max_rows=10 would + # truncate before memory limit is reached. + configure_formatter(max_memory_bytes=10 * MB, min_rows=1, max_rows=200000) + unrestricted_output = large_df._repr_html_() + unrestricted_rows = count_table_rows(unrestricted_output) + + # Test 1: Very small memory limit should still respect min_rows + # With large dataset, this should definitely hit memory limit before min_rows + configure_formatter(max_memory_bytes=10, min_rows=1) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + assert tr_count >= 2 # At least header + 1 data row (minimum) + # Should show truncation since we limited memory so aggressively + assert "data truncated" in html_output.lower() + + # Test 2: Memory limit at default size (2MB) should truncate the large dataset + # Default max_rows would truncate at 10 rows, so we don't set it here to test + # that memory limit is respected even with default row limit + configure_formatter(max_memory_bytes=2 * MB, min_rows=1) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + assert tr_count >= 2 # At least header + min_rows + # Should be truncated since full dataset is much larger than 2MB + assert tr_count < unrestricted_rows + + # Test 3: Very large memory limit should show much more data + # NOTE: max_rows=200000 is critical here - without it, default max_rows=10 + # would limit output to 10 rows even though we have 100MB of memory available + configure_formatter(max_memory_bytes=100 * MB, min_rows=1, max_rows=200000) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + # Should show significantly more rows, possibly all + assert tr_count > 100 # Should show substantially more rows + + # Test 4: Min rows should override memory limit + # With tiny memory and larger min_rows, min_rows should win + configure_formatter(max_memory_bytes=10, min_rows=2) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + assert tr_count >= 3 # At least header + 2 data rows (min_rows) + # Should show truncation message despite min_rows being satisfied + assert "data truncated" in html_output.lower() + + # Test 5: With reasonable memory and min_rows settings + # NOTE: max_rows=200000 ensures we test memory limit behavior, not row limit + configure_formatter(max_memory_bytes=2 * MB, min_rows=10, max_rows=200000) + html_output = large_df._repr_html_() + tr_count = count_table_rows(html_output) + assert tr_count >= 11 # header + at least 10 data rows (min_rows) + # Should be truncated due to memory limit + assert tr_count < unrestricted_rows + + +def test_html_formatter_stream_early_termination( + large_multi_batch_df, clean_formatter_state +): + """Test that memory limits cause early stream termination with multi-batch data. + + This test specifically validates that the formatter stops collecting data when + the memory limit is reached, rather than collecting all data and then truncating. + The large_multi_batch_df fixture creates 10 record batches, allowing us to verify + that not all batches are consumed when memory limit is hit. + + Key difference from test_html_formatter_memory_boundary_conditions: + - Uses multi-batch DataFrame to verify stream termination behavior + - Tests with memory limit exceeded by 2-3 batches but not 1 batch + - Verifies partial data + truncation message + respects min_rows + """ + + # Get baseline: how much data fits without memory limit + configure_formatter(max_memory_bytes=100 * MB, min_rows=1, max_rows=200000) + unrestricted_output = large_multi_batch_df._repr_html_() + unrestricted_rows = count_table_rows(unrestricted_output) + + # Test 1: Memory limit exceeded by ~2 batches (each batch ~10k rows) + # With 1 batch (~1-2MB), we should have space. With 2-3 batches, we exceed limit. + # Set limit to ~3MB to ensure we collect ~1 batch before hitting limit + configure_formatter(max_memory_bytes=3 * MB, min_rows=1, max_rows=200000) + html_output = large_multi_batch_df._repr_html_() + tr_count = count_table_rows(html_output) + + # Should show significant truncation (not all 100k rows) + assert tr_count < unrestricted_rows, "Should be truncated by memory limit" + assert tr_count >= 2, "Should respect min_rows" + assert "data truncated" in html_output.lower(), "Should indicate truncation" + + # Test 2: Very tight memory limit should still respect min_rows + # Even with tiny memory (10 bytes), should show at least min_rows + configure_formatter(max_memory_bytes=10, min_rows=5, max_rows=200000) + html_output = large_multi_batch_df._repr_html_() + tr_count = count_table_rows(html_output) + + assert tr_count >= 6, "Should show header + at least min_rows (5)" + assert "data truncated" in html_output.lower(), "Should indicate truncation" + + # Test 3: Memory limit should take precedence over max_rows in early termination + # With max_rows=100 but small memory limit, should terminate early due to memory + configure_formatter(max_memory_bytes=2 * MB, min_rows=1, max_rows=100) + html_output = large_multi_batch_df._repr_html_() + tr_count = count_table_rows(html_output) + + # Should be truncated by memory limit (showing more than max_rows would suggest + # but less than unrestricted) + assert tr_count >= 2, "Should respect min_rows" + assert tr_count < unrestricted_rows, "Should be truncated" + # Output should indicate why truncation occurred + assert "data truncated" in html_output.lower() + + +def test_html_formatter_max_rows(df, clean_formatter_state): + configure_formatter(min_rows=2, max_rows=2) + html_output = df._repr_html_() + + tr_count = count_table_rows(html_output) + # Table should have header row (1) + 2 data rows = 3 rows + assert tr_count == 3 + + configure_formatter(min_rows=2, max_rows=3) + html_output = df._repr_html_() + + tr_count = count_table_rows(html_output) + # Table should have header row (1) + 3 data rows = 4 rows + assert tr_count == 4 + + +def test_html_formatter_validation(): + # Test validation for invalid parameters + + with pytest.raises(ValueError, match="max_cell_length must be a positive integer"): + DataFrameHtmlFormatter(max_cell_length=0) + + with pytest.raises(ValueError, match="max_width must be a positive integer"): + DataFrameHtmlFormatter(max_width=0) + + with pytest.raises(ValueError, match="max_height must be a positive integer"): + DataFrameHtmlFormatter(max_height=0) + + with pytest.raises(ValueError, match="max_memory_bytes must be a positive integer"): + DataFrameHtmlFormatter(max_memory_bytes=0) + + with pytest.raises(ValueError, match="max_memory_bytes must be a positive integer"): + DataFrameHtmlFormatter(max_memory_bytes=-100) + + with pytest.raises(ValueError, match="min_rows must be a positive integer"): + DataFrameHtmlFormatter(min_rows=0) + + with pytest.raises(ValueError, match="min_rows must be a positive integer"): + DataFrameHtmlFormatter(min_rows=-5) + + with pytest.raises(ValueError, match="max_rows must be a positive integer"): + DataFrameHtmlFormatter(max_rows=0) + + with pytest.raises(ValueError, match="max_rows must be a positive integer"): + DataFrameHtmlFormatter(max_rows=-10) + + with pytest.raises( + ValueError, match="min_rows must be less than or equal to max_rows" + ): + DataFrameHtmlFormatter(min_rows=5, max_rows=4) + + +def test_repr_rows_backward_compatibility(clean_formatter_state): + """Test that repr_rows parameter still works as deprecated alias.""" + # Should work when not conflicting with max_rows + with pytest.warns(DeprecationWarning, match="repr_rows parameter is deprecated"): + formatter = DataFrameHtmlFormatter(repr_rows=15, min_rows=10) + assert formatter.max_rows == 15 + assert formatter.repr_rows == 15 + + # Should fail when conflicting with max_rows + with pytest.raises(ValueError, match="Cannot specify both repr_rows and max_rows"): + DataFrameHtmlFormatter(repr_rows=5, max_rows=10) + + # Setting repr_rows via property should warn + formatter2 = DataFrameHtmlFormatter() + with pytest.warns(DeprecationWarning, match="repr_rows is deprecated"): + formatter2.repr_rows = 7 + assert formatter2.max_rows == 7 + assert formatter2.repr_rows == 7 + + +def test_configure_formatter(df, clean_formatter_state): + """Test using custom style providers with the HTML formatter and configured + parameters.""" + + # these are non-default values + max_cell_length = 10 + max_width = 500 + max_height = 30 + max_memory_bytes = 3 * MB + min_rows = 2 + max_rows = 2 + enable_cell_expansion = False + show_truncation_message = False + use_shared_styles = False + + reset_formatter() + formatter_default = get_formatter() + + assert formatter_default.max_cell_length != max_cell_length + assert formatter_default.max_width != max_width + assert formatter_default.max_height != max_height + assert formatter_default.max_memory_bytes != max_memory_bytes + assert formatter_default.min_rows != min_rows + assert formatter_default.max_rows != max_rows + assert formatter_default.enable_cell_expansion != enable_cell_expansion + assert formatter_default.show_truncation_message != show_truncation_message + assert formatter_default.use_shared_styles != use_shared_styles + + # Configure with custom style provider and additional parameters + configure_formatter( + max_cell_length=max_cell_length, + max_width=max_width, + max_height=max_height, + max_memory_bytes=max_memory_bytes, + min_rows=min_rows, + max_rows=max_rows, + enable_cell_expansion=enable_cell_expansion, + show_truncation_message=show_truncation_message, + use_shared_styles=use_shared_styles, + ) + formatter_custom = get_formatter() + assert formatter_custom.max_cell_length == max_cell_length + assert formatter_custom.max_width == max_width + assert formatter_custom.max_height == max_height + assert formatter_custom.max_memory_bytes == max_memory_bytes + assert formatter_custom.min_rows == min_rows + assert formatter_custom.max_rows == max_rows + assert formatter_custom.enable_cell_expansion == enable_cell_expansion + assert formatter_custom.show_truncation_message == show_truncation_message + assert formatter_custom.use_shared_styles == use_shared_styles + + +def test_configure_formatter_invalid_params(clean_formatter_state): + """Test that configure_formatter rejects invalid parameters.""" + with pytest.raises(ValueError, match="Invalid formatter parameters"): + configure_formatter(invalid_param=123) + + # Test with multiple parameters, one valid and one invalid + with pytest.raises(ValueError, match="Invalid formatter parameters"): + configure_formatter(max_width=500, not_a_real_param="test") + + # Test with multiple invalid parameters + with pytest.raises(ValueError, match="Invalid formatter parameters"): + configure_formatter(fake_param1="test", fake_param2=456) + + +def test_get_dataframe(tmp_path): + ctx = SessionContext() + + path = tmp_path / "test.csv" + table = pa.Table.from_arrays( + [ + [1, 2, 3, 4], + ["a", "b", "c", "d"], + [1.1, 2.2, 3.3, 4.4], + ], + names=["int", "str", "float"], + ) + write_csv(table, path) + + ctx.register_csv("csv", path) + + df = ctx.table("csv") + assert isinstance(df, DataFrame) + + +def test_struct_select(struct_df): + df = struct_df.select( + column("a")["c"] + column("b"), + column("a")["c"] - column("b"), + ) + + # execute and collect the first (and only) batch + result = df.collect()[0] + + assert result.column(0) == pa.array([5, 7, 9]) + assert result.column(1) == pa.array([-3, -3, -3]) + + +def test_explain(df): + df = df.select( + column("a") + column("b"), + column("a") - column("b"), + ) + df.explain() + + +def test_logical_plan(aggregate_df): + plan = aggregate_df.logical_plan() + + expected = "Projection: test.c1, sum(test.c2)" + + assert expected == plan.display() + + expected = ( + "Projection: test.c1, sum(test.c2)\n" + " Aggregate: groupBy=[[test.c1]], aggr=[[sum(test.c2)]]\n" + " TableScan: test" + ) + + assert expected == plan.display_indent() + + +def test_optimized_logical_plan(aggregate_df): + plan = aggregate_df.optimized_logical_plan() + + expected = "Aggregate: groupBy=[[test.c1]], aggr=[[sum(test.c2)]]" + + assert expected == plan.display() + + expected = ( + "Aggregate: groupBy=[[test.c1]], aggr=[[sum(test.c2)]]\n" + " TableScan: test projection=[c1, c2]" + ) + + assert expected == plan.display_indent() + + +def test_execution_plan(aggregate_df): + plan = aggregate_df.execution_plan() + + expected = ( + "AggregateExec: mode=FinalPartitioned, gby=[c1@0 as c1], aggr=[sum(test.c2)]\n" + ) + + assert expected == plan.display() + + # Check the number of partitions is as expected. + assert isinstance(plan.partition_count, int) + + expected = ( + "ProjectionExec: expr=[c1@0 as c1, SUM(test.c2)@1 as SUM(test.c2)]\n" + " Aggregate: groupBy=[[test.c1]], aggr=[[SUM(test.c2)]]\n" + " TableScan: test projection=[c1, c2]" + ) + + indent = plan.display_indent() + + # indent plan will be different for everyone due to absolute path + # to filename, so we just check for some expected content + assert "AggregateExec:" in indent + assert "RepartitionExec:" in indent + assert "DataSourceExec:" in indent + assert "file_type=csv" in indent + + ctx = SessionContext() + rows_returned = 0 + for idx in range(plan.partition_count): + stream = ctx.execute(plan, idx) + try: + batch = stream.next() + assert batch is not None + rows_returned += len(batch.to_pyarrow()[0]) + except StopIteration: + # This is one of the partitions with no values + pass + with pytest.raises(StopIteration): + stream.next() + + assert rows_returned == 5 + + +@pytest.mark.asyncio +async def test_async_iteration_of_df(aggregate_df): + rows_returned = 0 + async for batch in aggregate_df: + assert batch is not None + rows_returned += len(batch.to_pyarrow()[0]) + + assert rows_returned == 5 + + +def test_repartition(df): + df.repartition(2) + + +def test_repartition_by_hash(df): + df.repartition_by_hash(column("a"), num=2) + + +def test_repartition_by_hash_sql_expression(df): + df.repartition_by_hash("a", num=2) + + +def test_repartition_by_hash_mix(df): + df.repartition_by_hash(column("a"), "b", num=2) + + +def test_intersect(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + df_a = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([3, 4, 5]), pa.array([6, 7, 8])], + names=["a", "b"], + ) + df_b = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([3]), pa.array([6])], + names=["a", "b"], + ) + df_c = ctx.create_dataframe([[batch]]).sort(column("a")) + + df_a_i_b = df_a.intersect(df_b).sort(column("a")) + + assert df_c.collect() == df_a_i_b.collect() + + +def test_except_all(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + df_a = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([3, 4, 5]), pa.array([6, 7, 8])], + names=["a", "b"], + ) + df_b = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2]), pa.array([4, 5])], + names=["a", "b"], + ) + df_c = ctx.create_dataframe([[batch]]).sort(column("a")) + + df_a_e_b = df_a.except_all(df_b).sort(column("a")) + + assert df_c.collect() == df_a_e_b.collect() + + +def test_collect_partitioned(): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + + assert [[batch]] == ctx.create_dataframe([[batch]]).collect_partitioned() + + +def test_collect_column(ctx: SessionContext): + batch_1 = pa.RecordBatch.from_pydict({"a": [1, 2, 3]}) + batch_2 = pa.RecordBatch.from_pydict({"a": [4, 5, 6]}) + batch_3 = pa.RecordBatch.from_pydict({"a": [7, 8, 9]}) + + ctx.register_record_batches("t", [[batch_1, batch_2], [batch_3]]) + + result = ctx.table("t").sort(column("a")).collect_column("a") + expected = pa.array([1, 2, 3, 4, 5, 6, 7, 8, 9]) + assert result == expected + + +def test_union(ctx): + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + df_a = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([3, 4, 5]), pa.array([6, 7, 8])], + names=["a", "b"], + ) + df_b = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3, 3, 4, 5]), pa.array([4, 5, 6, 6, 7, 8])], + names=["a", "b"], + ) + df_c = ctx.create_dataframe([[batch]]).sort(column("a")) + + df_a_u_b = df_a.union(df_b).sort(column("a")) + + assert df_c.collect() == df_a_u_b.collect() + + +def test_union_distinct(ctx): + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3]), pa.array([4, 5, 6])], + names=["a", "b"], + ) + df_a = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([3, 4, 5]), pa.array([6, 7, 8])], + names=["a", "b"], + ) + df_b = ctx.create_dataframe([[batch]]) + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3, 4, 5]), pa.array([4, 5, 6, 7, 8])], + names=["a", "b"], + ) + df_c = ctx.create_dataframe([[batch]]).sort(column("a")) + + df_a_u_b = df_a.union(df_b, distinct=True).sort(column("a")) + + assert df_c.collect() == df_a_u_b.collect() + assert df_c.collect() == df_a_u_b.collect() + + +def test_cache(df): + assert df.cache().collect() == df.collect() + + +def test_count(df): + # Get number of rows + assert df.count() == 3 + + +def test_to_pandas(df): + # Skip test if pandas is not installed + pd = pytest.importorskip("pandas") + + # Convert datafusion dataframe to pandas dataframe + pandas_df = df.to_pandas() + assert isinstance(pandas_df, pd.DataFrame) + assert pandas_df.shape == (3, 3) + assert set(pandas_df.columns) == {"a", "b", "c"} + + +def test_empty_to_pandas(df): + # Skip test if pandas is not installed + pd = pytest.importorskip("pandas") + + # Convert empty datafusion dataframe to pandas dataframe + pandas_df = df.limit(0).to_pandas() + assert isinstance(pandas_df, pd.DataFrame) + assert pandas_df.shape == (0, 3) + assert set(pandas_df.columns) == {"a", "b", "c"} + + +def test_to_polars(df): + # Skip test if polars is not installed + pl = pytest.importorskip("polars") + + # Convert datafusion dataframe to polars dataframe + polars_df = df.to_polars() + assert isinstance(polars_df, pl.DataFrame) + assert polars_df.shape == (3, 3) + assert set(polars_df.columns) == {"a", "b", "c"} + + +def test_empty_to_polars(df): + # Skip test if polars is not installed + pl = pytest.importorskip("polars") + + # Convert empty datafusion dataframe to polars dataframe + polars_df = df.limit(0).to_polars() + assert isinstance(polars_df, pl.DataFrame) + assert polars_df.shape == (0, 3) + assert set(polars_df.columns) == {"a", "b", "c"} + + +def test_to_arrow_table(df): + # Convert datafusion dataframe to pyarrow Table + pyarrow_table = df.to_arrow_table() + assert isinstance(pyarrow_table, pa.Table) + assert pyarrow_table.shape == (3, 3) + assert set(pyarrow_table.column_names) == {"a", "b", "c"} + + +def test_parquet_non_null_column_to_pyarrow(ctx, tmp_path): + path = tmp_path.joinpath("t.parquet") + + ctx.sql("create table t_(a int not null)").collect() + ctx.sql("insert into t_ values (1), (2), (3)").collect() + ctx.sql(f"copy (select * from t_) to '{path}'").collect() + + ctx.register_parquet("t", path) + pyarrow_table = ctx.sql("select max(a) as m from t").to_arrow_table() + assert pyarrow_table.to_pydict() == {"m": [3]} + + +def test_parquet_empty_batch_to_pyarrow(ctx, tmp_path): + path = tmp_path.joinpath("t.parquet") + + ctx.sql("create table t_(a int not null)").collect() + ctx.sql("insert into t_ values (1), (2), (3)").collect() + ctx.sql(f"copy (select * from t_) to '{path}'").collect() + + ctx.register_parquet("t", path) + pyarrow_table = ctx.sql("select * from t limit 0").to_arrow_table() + assert pyarrow_table.schema == pa.schema( + [ + pa.field("a", pa.int32(), nullable=False), + ] + ) + + +def test_parquet_null_aggregation_to_pyarrow(ctx, tmp_path): + path = tmp_path.joinpath("t.parquet") + + ctx.sql("create table t_(a int not null)").collect() + ctx.sql("insert into t_ values (1), (2), (3)").collect() + ctx.sql(f"copy (select * from t_) to '{path}'").collect() + + ctx.register_parquet("t", path) + pyarrow_table = ctx.sql( + "select max(a) as m from (select * from t where a < 0)" + ).to_arrow_table() + assert pyarrow_table.to_pydict() == {"m": [None]} + assert pyarrow_table.schema == pa.schema( + [ + pa.field("m", pa.int32(), nullable=True), + ] + ) + + +def test_execute_stream(df): + stream = df.execute_stream() + assert all(batch is not None for batch in stream) + assert not list(stream) # after one iteration the generator must be exhausted + + +@pytest.mark.asyncio +async def test_execute_stream_async(df): + stream = df.execute_stream() + batches = [batch async for batch in stream] + + assert all(batch is not None for batch in batches) + + # After consuming all batches, the stream should be exhausted + remaining_batches = [batch async for batch in stream] + assert not remaining_batches + + +@pytest.mark.parametrize("schema", [True, False]) +def test_execute_stream_to_arrow_table(df, schema): + stream = df.execute_stream() + + if schema: + pyarrow_table = pa.Table.from_batches( + (batch.to_pyarrow() for batch in stream), schema=df.schema() + ) + else: + pyarrow_table = pa.Table.from_batches(batch.to_pyarrow() for batch in stream) + + assert isinstance(pyarrow_table, pa.Table) + assert pyarrow_table.shape == (3, 3) + assert set(pyarrow_table.column_names) == {"a", "b", "c"} + + +@pytest.mark.asyncio +@pytest.mark.parametrize("schema", [True, False]) +async def test_execute_stream_to_arrow_table_async(df, schema): + stream = df.execute_stream() + + if schema: + pyarrow_table = pa.Table.from_batches( + [batch.to_pyarrow() async for batch in stream], schema=df.schema() + ) + else: + pyarrow_table = pa.Table.from_batches( + [batch.to_pyarrow() async for batch in stream] + ) + + assert isinstance(pyarrow_table, pa.Table) + assert pyarrow_table.shape == (3, 3) + assert set(pyarrow_table.column_names) == {"a", "b", "c"} + + +def test_execute_stream_partitioned(df): + streams = df.execute_stream_partitioned() + assert all(batch is not None for stream in streams for batch in stream) + assert all( + not list(stream) for stream in streams + ) # after one iteration all generators must be exhausted + + +@pytest.mark.asyncio +async def test_execute_stream_partitioned_async(df): + streams = df.execute_stream_partitioned() + + for stream in streams: + batches = [batch async for batch in stream] + assert all(batch is not None for batch in batches) + + # Ensure the stream is exhausted after iteration + remaining_batches = [batch async for batch in stream] + assert not remaining_batches + + +def test_empty_to_arrow_table(df): + # Convert empty datafusion dataframe to pyarrow Table + pyarrow_table = df.limit(0).to_arrow_table() + assert isinstance(pyarrow_table, pa.Table) + assert pyarrow_table.shape == (0, 3) + assert set(pyarrow_table.column_names) == {"a", "b", "c"} + + +def test_iter_batches_dataframe(fail_collect): + ctx = SessionContext() + + batch1 = pa.record_batch([pa.array([1])], names=["a"]) + batch2 = pa.record_batch([pa.array([2])], names=["a"]) + df = ctx.create_dataframe([[batch1], [batch2]]) + + expected = [batch1, batch2] + results = [b.to_pyarrow() for b in df] + + assert len(results) == len(expected) + for exp in expected: + assert any(got.equals(exp) for got in results) + + +def test_arrow_c_stream_to_table_and_reader(fail_collect): + ctx = SessionContext() + + # Create a DataFrame with two separate record batches + batch1 = pa.record_batch([pa.array([1])], names=["a"]) + batch2 = pa.record_batch([pa.array([2])], names=["a"]) + df = ctx.create_dataframe([[batch1], [batch2]]) + + table = pa.Table.from_batches(batch.to_pyarrow() for batch in df) + batches = table.to_batches() + + assert len(batches) == 2 + expected = [batch1, batch2] + for exp in expected: + assert any(got.equals(exp) for got in batches) + assert table.schema == df.schema() + assert table.column("a").num_chunks == 2 + + reader = pa.RecordBatchReader.from_stream(df) + assert isinstance(reader, pa.RecordBatchReader) + reader_table = pa.Table.from_batches(reader) + expected = pa.Table.from_batches([batch1, batch2]) + assert reader_table.equals(expected) + + +def test_arrow_c_stream_order(): + ctx = SessionContext() + + batch1 = pa.record_batch([pa.array([1])], names=["a"]) + batch2 = pa.record_batch([pa.array([2])], names=["a"]) + + df = ctx.create_dataframe([[batch1, batch2]]) + + table = pa.Table.from_batches(batch.to_pyarrow() for batch in df) + expected = pa.Table.from_batches([batch1, batch2]) + + assert table.equals(expected) + col = table.column("a") + assert col.chunk(0)[0].as_py() == 1 + assert col.chunk(1)[0].as_py() == 2 + + +def test_arrow_c_stream_schema_selection(fail_collect): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [ + pa.array([1, 2]), + pa.array([3, 4]), + pa.array([5, 6]), + ], + names=["a", "b", "c"], + ) + df = ctx.create_dataframe([[batch]]) + + requested_schema = pa.schema([("c", pa.int64()), ("a", pa.int64())]) + + c_schema = pa_cffi.ffi.new("struct ArrowSchema*") + address = int(pa_cffi.ffi.cast("uintptr_t", c_schema)) + requested_schema._export_to_c(address) + capsule_new = ctypes.pythonapi.PyCapsule_New + capsule_new.restype = ctypes.py_object + capsule_new.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p] + + reader = pa.RecordBatchReader.from_stream(df, schema=requested_schema) + + assert reader.schema == requested_schema + + batches = list(reader) + + assert len(batches) == 1 + expected_batch = pa.record_batch( + [pa.array([5, 6]), pa.array([1, 2])], names=["c", "a"] + ) + assert batches[0].equals(expected_batch) + + +def test_arrow_c_stream_schema_mismatch(fail_collect): + ctx = SessionContext() + + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2]), pa.array([3, 4])], names=["a", "b"] + ) + df = ctx.create_dataframe([[batch]]) + + bad_schema = pa.schema([("a", pa.string())]) + + c_schema = pa_cffi.ffi.new("struct ArrowSchema*") + address = int(pa_cffi.ffi.cast("uintptr_t", c_schema)) + bad_schema._export_to_c(address) + + capsule_new = ctypes.pythonapi.PyCapsule_New + capsule_new.restype = ctypes.py_object + capsule_new.argtypes = [ctypes.c_void_p, ctypes.c_char_p, ctypes.c_void_p] + bad_capsule = capsule_new(ctypes.c_void_p(address), b"arrow_schema", None) + + with pytest.raises(Exception, match="Fail to merge schema"): + df.__arrow_c_stream__(bad_capsule) + + +def test_to_pylist(df): + # Convert datafusion dataframe to Python list + pylist = df.to_pylist() + assert isinstance(pylist, list) + assert pylist == [ + {"a": 1, "b": 4, "c": 8}, + {"a": 2, "b": 5, "c": 5}, + {"a": 3, "b": 6, "c": 8}, + ] + + +def test_to_pydict(df): + # Convert datafusion dataframe to Python dictionary + pydict = df.to_pydict() + assert isinstance(pydict, dict) + assert pydict == {"a": [1, 2, 3], "b": [4, 5, 6], "c": [8, 5, 8]} + + +def test_describe(df): + # Calculate statistics + df = df.describe() + + # Collect the result + result = df.to_pydict() + + assert result == { + "describe": [ + "count", + "null_count", + "mean", + "std", + "min", + "max", + "median", + ], + "a": [3.0, 0.0, 2.0, 1.0, 1.0, 3.0, 2.0], + "b": [3.0, 0.0, 5.0, 1.0, 4.0, 6.0, 5.0], + "c": [3.0, 0.0, 7.0, 1.7320508075688772, 5.0, 8.0, 8.0], + } + + +@pytest.mark.parametrize("path_to_str", [True, False]) +def test_write_csv(ctx, df, tmp_path, path_to_str): + path = str(tmp_path) if path_to_str else tmp_path + + df.write_csv(path, with_header=True) + + ctx.register_csv("csv", path) + result = ctx.table("csv").to_pydict() + expected = df.to_pydict() + + assert result == expected + + +def generate_test_write_params() -> list[tuple]: + # Overwrite and Replace are not implemented for many table writers + insert_ops = [InsertOp.APPEND, None] + sort_by_cases = [ + (None, [1, 2, 3], "unsorted"), + (column("c"), [2, 1, 3], "single_column_expr"), + (column("a").sort(ascending=False), [3, 2, 1], "single_sort_expr"), + ([column("c"), column("b")], [2, 1, 3], "list_col_expr"), + ( + [column("c").sort(ascending=False), column("b").sort(ascending=False)], + [3, 1, 2], + "list_sort_expr", + ), + ] + + formats = ["csv", "json", "parquet", "table"] + + return [ + pytest.param( + output_format, + insert_op, + sort_by, + expected_a, + id=f"{output_format}_{test_id}", + ) + for output_format, insert_op, ( + sort_by, + expected_a, + test_id, + ) in itertools.product(formats, insert_ops, sort_by_cases) + ] + + +@pytest.mark.parametrize( + ("output_format", "insert_op", "sort_by", "expected_a"), + generate_test_write_params(), +) +def test_write_files_with_options( + ctx, df, tmp_path, output_format, insert_op, sort_by, expected_a +) -> None: + write_options = DataFrameWriteOptions(insert_operation=insert_op, sort_by=sort_by) + + if output_format == "csv": + df.write_csv(tmp_path, with_header=True, write_options=write_options) + ctx.register_csv("test_table", tmp_path) + elif output_format == "json": + df.write_json(tmp_path, write_options=write_options) + ctx.register_json("test_table", tmp_path) + elif output_format == "parquet": + df.write_parquet(tmp_path, write_options=write_options) + ctx.register_parquet("test_table", tmp_path) + elif output_format == "table": + batch = pa.RecordBatch.from_arrays([[], [], []], schema=df.schema()) + ctx.register_record_batches("test_table", [[batch]]) + ctx.table("test_table").show() + df.write_table("test_table", write_options=write_options) + + result = ctx.table("test_table").to_pydict()["a"] + ctx.table("test_table").show() + + assert result == expected_a + + +@pytest.mark.parametrize("path_to_str", [True, False]) +def test_write_json(ctx, df, tmp_path, path_to_str): + path = str(tmp_path) if path_to_str else tmp_path + + df.write_json(path) + + ctx.register_json("json", path) + result = ctx.table("json").to_pydict() + expected = df.to_pydict() + + assert result == expected + + +@pytest.mark.parametrize("path_to_str", [True, False]) +def test_write_parquet(df, tmp_path, path_to_str): + path = str(tmp_path) if path_to_str else tmp_path + + df.write_parquet(str(path)) + result = pq.read_table(str(path)).to_pydict() + expected = df.to_pydict() + + assert result == expected + + +@pytest.mark.parametrize( + ("compression", "compression_level"), + [("gzip", 6), ("brotli", 7), ("zstd", 15)], +) +def test_write_compressed_parquet(df, tmp_path, compression, compression_level): + path = tmp_path + + df.write_parquet( + str(path), compression=compression, compression_level=compression_level + ) + + # test that the actual compression scheme is the one written + for _root, _dirs, files in os.walk(path): + for file in files: + if file.endswith(".parquet"): + metadata = pq.ParquetFile(tmp_path / file).metadata.to_dict() + for row_group in metadata["row_groups"]: + for columns in row_group["columns"]: + assert columns["compression"].lower() == compression + + result = pq.read_table(str(path)).to_pydict() + expected = df.to_pydict() + + assert result == expected + + +@pytest.mark.parametrize( + ("compression", "compression_level"), + [("gzip", 12), ("brotli", 15), ("zstd", 23), ("wrong", 12)], +) +def test_write_compressed_parquet_wrong_compression_level( + df, tmp_path, compression, compression_level +): + path = tmp_path + + with pytest.raises(ValueError): + df.write_parquet( + str(path), + compression=compression, + compression_level=compression_level, + ) + + +@pytest.mark.parametrize("compression", ["wrong"]) +def test_write_compressed_parquet_invalid_compression(df, tmp_path, compression): + path = tmp_path + + with pytest.raises(ValueError): + df.write_parquet(str(path), compression=compression) + + +# not testing lzo because it it not implemented yet +# https://github.com/apache/arrow-rs/issues/6970 +@pytest.mark.parametrize("compression", ["zstd", "brotli", "gzip"]) +def test_write_compressed_parquet_default_compression_level(df, tmp_path, compression): + # Test write_parquet with zstd, brotli, gzip default compression level, + # ie don't specify compression level + # should complete without error + path = tmp_path + + df.write_parquet(str(path), compression=compression) + + +def test_write_parquet_with_options_default_compression(df, tmp_path): + """Test that the default compression is ZSTD.""" + df.write_parquet(tmp_path) + + for file in tmp_path.rglob("*.parquet"): + metadata = pq.ParquetFile(file).metadata.to_dict() + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["compression"].lower() == "zstd" + + +@pytest.mark.parametrize( + "compression", + ["gzip(6)", "brotli(7)", "zstd(15)", "snappy", "uncompressed"], +) +def test_write_parquet_with_options_compression(df, tmp_path, compression): + import re + + path = tmp_path + df.write_parquet_with_options( + str(path), ParquetWriterOptions(compression=compression) + ) + + # test that the actual compression scheme is the one written + for _root, _dirs, files in os.walk(path): + for file in files: + if file.endswith(".parquet"): + metadata = pq.ParquetFile(tmp_path / file).metadata.to_dict() + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["compression"].lower() == re.sub( + r"\(\d+\)", "", compression + ) + + result = pq.read_table(str(path)).to_pydict() + expected = df.to_pydict() + + assert result == expected + + +@pytest.mark.parametrize( + "compression", + ["gzip(12)", "brotli(15)", "zstd(23)"], +) +def test_write_parquet_with_options_wrong_compression_level(df, tmp_path, compression): + path = tmp_path + + with pytest.raises(Exception, match=r"valid compression range .*? exceeded."): + df.write_parquet_with_options( + str(path), ParquetWriterOptions(compression=compression) + ) + + +@pytest.mark.parametrize("compression", ["wrong", "wrong(12)"]) +def test_write_parquet_with_options_invalid_compression(df, tmp_path, compression): + path = tmp_path + + with pytest.raises(Exception, match="Unknown or unsupported parquet compression"): + df.write_parquet_with_options( + str(path), ParquetWriterOptions(compression=compression) + ) + + +@pytest.mark.parametrize( + ("writer_version", "format_version"), + [("1.0", "1.0"), ("2.0", "2.6"), (None, "1.0")], +) +def test_write_parquet_with_options_writer_version( + df, tmp_path, writer_version, format_version +): + """Test the Parquet writer version. Note that writer_version=2.0 results in + format_version=2.6""" + if writer_version is None: + df.write_parquet_with_options(tmp_path, ParquetWriterOptions()) + else: + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(writer_version=writer_version) + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + assert metadata["format_version"] == format_version + + +@pytest.mark.parametrize("writer_version", ["1.2.3", "custom-version", "0"]) +def test_write_parquet_with_options_wrong_writer_version(df, tmp_path, writer_version): + """Test that invalid writer versions in Parquet throw an exception.""" + with pytest.raises(Exception, match="Invalid parquet writer version"): + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(writer_version=writer_version) + ) + + +@pytest.mark.parametrize("dictionary_enabled", [True, False, None]) +def test_write_parquet_with_options_dictionary_enabled( + df, tmp_path, dictionary_enabled +): + """Test enabling/disabling the dictionaries in Parquet.""" + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(dictionary_enabled=dictionary_enabled) + ) + # by default, the dictionary is enabled, so None results in True + result = dictionary_enabled if dictionary_enabled is not None else True + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["has_dictionary_page"] == result + + +@pytest.mark.parametrize( + ("statistics_enabled", "has_statistics"), + [("page", True), ("chunk", True), ("none", False), (None, True)], +) +def test_write_parquet_with_options_statistics_enabled( + df, tmp_path, statistics_enabled, has_statistics +): + """Test configuring the statistics in Parquet. In pyarrow we can only check for + column-level statistics, so "page" and "chunk" are tested in the same way.""" + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(statistics_enabled=statistics_enabled) + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + if has_statistics: + assert col["statistics"] is not None + else: + assert col["statistics"] is None + + +@pytest.mark.parametrize("max_row_group_size", [1000, 5000, 10000, 100000]) +def test_write_parquet_with_options_max_row_group_size( + large_df, tmp_path, max_row_group_size +): + """Test configuring the max number of rows per group in Parquet. These test cases + guarantee that the number of rows for each row group is max_row_group_size, given + the total number of rows is a multiple of max_row_group_size.""" + path = f"{tmp_path}/t.parquet" + large_df.write_parquet_with_options( + path, ParquetWriterOptions(max_row_group_size=max_row_group_size) + ) + + parquet = pq.ParquetFile(path) + metadata = parquet.metadata.to_dict() + for row_group in metadata["row_groups"]: + assert row_group["num_rows"] == max_row_group_size + + +@pytest.mark.parametrize("created_by", ["datafusion", "datafusion-python", "custom"]) +def test_write_parquet_with_options_created_by(df, tmp_path, created_by): + """Test configuring the created by metadata in Parquet.""" + df.write_parquet_with_options(tmp_path, ParquetWriterOptions(created_by=created_by)) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + assert metadata["created_by"] == created_by + + +@pytest.mark.parametrize("statistics_truncate_length", [5, 25, 50]) +def test_write_parquet_with_options_statistics_truncate_length( + df, tmp_path, statistics_truncate_length +): + """Test configuring the truncate limit in Parquet's row-group-level statistics.""" + ctx = SessionContext() + data = { + "a": [ + "a_the_quick_brown_fox_jumps_over_the_lazy_dog", + "m_the_quick_brown_fox_jumps_over_the_lazy_dog", + "z_the_quick_brown_fox_jumps_over_the_lazy_dog", + ], + "b": ["a_smaller", "m_smaller", "z_smaller"], + } + df = ctx.from_arrow(pa.record_batch(data)) + df.write_parquet_with_options( + tmp_path, + ParquetWriterOptions(statistics_truncate_length=statistics_truncate_length), + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + statistics = col["statistics"] + assert len(statistics["min"]) <= statistics_truncate_length + assert len(statistics["max"]) <= statistics_truncate_length + + +def test_write_parquet_with_options_default_encoding(tmp_path): + """Test that, by default, Parquet files are written with dictionary encoding. + Note that dictionary encoding is not used for boolean values, so it is not tested + here.""" + ctx = SessionContext() + data = { + "a": [1, 2, 3], + "b": ["1", "2", "3"], + "c": [1.01, 2.02, 3.03], + } + df = ctx.from_arrow(pa.record_batch(data)) + df.write_parquet_with_options(tmp_path, ParquetWriterOptions()) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["encodings"] == ("PLAIN", "RLE", "RLE_DICTIONARY") + + +@pytest.mark.parametrize( + ("encoding", "data_types", "result"), + [ + ("plain", ["int", "float", "str", "bool"], ("PLAIN", "RLE")), + ("rle", ["bool"], ("RLE",)), + ("delta_binary_packed", ["int"], ("RLE", "DELTA_BINARY_PACKED")), + ("delta_length_byte_array", ["str"], ("RLE", "DELTA_LENGTH_BYTE_ARRAY")), + ("delta_byte_array", ["str"], ("RLE", "DELTA_BYTE_ARRAY")), + ("byte_stream_split", ["int", "float"], ("RLE", "BYTE_STREAM_SPLIT")), + ], +) +def test_write_parquet_with_options_encoding(tmp_path, encoding, data_types, result): + """Test different encodings in Parquet in their respective support column types.""" + ctx = SessionContext() + + data = {} + for data_type in data_types: + if data_type == "int": + data["int"] = [1, 2, 3] + elif data_type == "float": + data["float"] = [1.01, 2.02, 3.03] + elif data_type == "str": + data["str"] = ["a", "b", "c"] + elif data_type == "bool": + data["bool"] = [True, False, True] + + df = ctx.from_arrow(pa.record_batch(data)) + df.write_parquet_with_options( + tmp_path, ParquetWriterOptions(encoding=encoding, dictionary_enabled=False) + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + assert col["encodings"] == result + + +@pytest.mark.parametrize("encoding", ["bit_packed"]) +def test_write_parquet_with_options_unsupported_encoding(df, tmp_path, encoding): + """Test that unsupported Parquet encodings do not work.""" + # BaseException is used since this throws a Rust panic: https://github.com/PyO3/pyo3/issues/3519 + with pytest.raises(BaseException, match=r"Encoding .*? is not supported"): + df.write_parquet_with_options(tmp_path, ParquetWriterOptions(encoding=encoding)) + + +@pytest.mark.parametrize("encoding", ["non_existent", "unknown", "plain123"]) +def test_write_parquet_with_options_invalid_encoding(df, tmp_path, encoding): + """Test that invalid Parquet encodings do not work.""" + with pytest.raises(Exception, match="Unknown or unsupported parquet encoding"): + df.write_parquet_with_options(tmp_path, ParquetWriterOptions(encoding=encoding)) + + +@pytest.mark.parametrize("encoding", ["plain_dictionary", "rle_dictionary"]) +def test_write_parquet_with_options_dictionary_encoding_fallback( + df, tmp_path, encoding +): + """Test that the dictionary encoding cannot be used as fallback in Parquet.""" + # BaseException is used since this throws a Rust panic: https://github.com/PyO3/pyo3/issues/3519 + with pytest.raises( + BaseException, match="Dictionary encoding can not be used as fallback encoding" + ): + df.write_parquet_with_options(tmp_path, ParquetWriterOptions(encoding=encoding)) + + +def test_write_parquet_with_options_bloom_filter(df, tmp_path): + """Test Parquet files with and without (default) bloom filters. Since pyarrow does + not expose any information about bloom filters, the easiest way to confirm that they + are actually written is to compare the file size.""" + path_no_bloom_filter = tmp_path / "1" + path_bloom_filter = tmp_path / "2" + + df.write_parquet_with_options(path_no_bloom_filter, ParquetWriterOptions()) + df.write_parquet_with_options( + path_bloom_filter, ParquetWriterOptions(bloom_filter_on_write=True) + ) + + size_no_bloom_filter = 0 + for file in path_no_bloom_filter.rglob("*.parquet"): + size_no_bloom_filter += Path(file).stat().st_size + + size_bloom_filter = 0 + for file in path_bloom_filter.rglob("*.parquet"): + size_bloom_filter += Path(file).stat().st_size + + assert size_no_bloom_filter < size_bloom_filter + + +def test_write_parquet_with_options_column_options(df, tmp_path): + """Test writing Parquet files with different options for each column, which replace + the global configs (when provided).""" + data = { + "a": [1, 2, 3], + "b": ["a", "b", "c"], + "c": [False, True, False], + "d": [1.01, 2.02, 3.03], + "e": [4, 5, 6], + } + + column_specific_options = { + "a": ParquetColumnOptions(statistics_enabled="none"), + "b": ParquetColumnOptions(encoding="plain", dictionary_enabled=False), + "c": ParquetColumnOptions( + compression="snappy", encoding="rle", dictionary_enabled=False + ), + "d": ParquetColumnOptions( + compression="zstd(6)", + encoding="byte_stream_split", + dictionary_enabled=False, + statistics_enabled="none", + ), + # column "e" will use the global configs + } + + results = { + "a": { + "statistics": False, + "compression": "brotli", + "encodings": ("PLAIN", "RLE", "RLE_DICTIONARY"), + }, + "b": { + "statistics": True, + "compression": "brotli", + "encodings": ("PLAIN", "RLE"), + }, + "c": { + "statistics": True, + "compression": "snappy", + "encodings": ("RLE",), + }, + "d": { + "statistics": False, + "compression": "zstd", + "encodings": ("RLE", "BYTE_STREAM_SPLIT"), + }, + "e": { + "statistics": True, + "compression": "brotli", + "encodings": ("PLAIN", "RLE", "RLE_DICTIONARY"), + }, + } + + ctx = SessionContext() + df = ctx.from_arrow(pa.record_batch(data)) + df.write_parquet_with_options( + tmp_path, + ParquetWriterOptions( + compression="brotli(8)", column_specific_options=column_specific_options + ), + ) + + for file in tmp_path.rglob("*.parquet"): + parquet = pq.ParquetFile(file) + metadata = parquet.metadata.to_dict() + + for row_group in metadata["row_groups"]: + for col in row_group["columns"]: + column_name = col["path_in_schema"] + result = results[column_name] + assert (col["statistics"] is not None) == result["statistics"] + assert col["compression"].lower() == result["compression"].lower() + assert col["encodings"] == result["encodings"] + + +def test_write_parquet_options(df, tmp_path): + options = ParquetWriterOptions(compression="gzip", compression_level=6) + df.write_parquet(str(tmp_path), options) + + result = pq.read_table(str(tmp_path)).to_pydict() + expected = df.to_pydict() + + assert result == expected + + +def test_write_parquet_options_error(df, tmp_path): + options = ParquetWriterOptions(compression="gzip", compression_level=6) + with pytest.raises(ValueError): + df.write_parquet(str(tmp_path), options, compression_level=1) + + +def test_write_table(ctx, df): + batch = pa.RecordBatch.from_arrays( + [pa.array([1, 2, 3])], + names=["a"], + ) + + ctx.register_record_batches("t", [[batch]]) + + df = ctx.table("t").with_column("a", column("a") * literal(-1)) + + ctx.table("t").show() + + df.write_table("t") + result = ctx.table("t").sort(column("a")).collect()[0][0].to_pylist() + expected = [-3, -2, -1, 1, 2, 3] + + assert result == expected + + +def test_dataframe_export(df) -> None: + # Guarantees that we have the canonical implementation + # reading our dataframe export + table = pa.table(df) + assert table.num_columns == 3 + assert table.num_rows == 3 + + desired_schema = pa.schema([("a", pa.int64())]) + + # Verify we can request a schema + table = pa.table(df, schema=desired_schema) + assert table.num_columns == 1 + assert table.num_rows == 3 + + # Expect a table of nulls if the schema don't overlap + desired_schema = pa.schema([("g", pa.string())]) + table = pa.table(df, schema=desired_schema) + assert table.num_columns == 1 + assert table.num_rows == 3 + for i in range(3): + assert table[0][i].as_py() is None + + # Expect an error when we cannot convert schema + desired_schema = pa.schema([("a", pa.float32())]) + failed_convert = False + try: + table = pa.table(df, schema=desired_schema) + except Exception: + failed_convert = True + assert failed_convert + + # Expect an error when we have a not set non-nullable + desired_schema = pa.schema([("g", pa.string(), False)]) + failed_convert = False + try: + table = pa.table(df, schema=desired_schema) + except Exception: + failed_convert = True + assert failed_convert + + +def test_dataframe_transform(df): + def add_string_col(df_internal) -> DataFrame: + return df_internal.with_column("string_col", literal("string data")) + + def add_with_parameter(df_internal, value: Any) -> DataFrame: + return df_internal.with_column("new_col", literal(value)) + + df = df.transform(add_string_col).transform(add_with_parameter, 3) + + result = df.to_pydict() + + assert result["a"] == [1, 2, 3] + assert result["string_col"] == ["string data" for _i in range(3)] + assert result["new_col"] == [3 for _i in range(3)] + + +def test_dataframe_repr_html_structure(df, clean_formatter_state) -> None: + """Test that DataFrame._repr_html_ produces expected HTML output structure.""" + + output = df._repr_html_() + + # Since we've added a fair bit of processing to the html output, lets just verify + # the values we are expecting in the table exist. Use regex and ignore everything + # between the and . We also don't want the closing > on the + # td and th segments because that is where the formatting data is written. + + headers = ["a", "b", "c"] + headers = [f"{v}" for v in headers] + header_pattern = "(.*?)".join(headers) + header_matches = re.findall(header_pattern, output, re.DOTALL) + assert len(header_matches) == 1 + + # Update the pattern to handle values that may be wrapped in spans + body_data = [[1, 4, 8], [2, 5, 5], [3, 6, 8]] + + body_lines = [ + f"(?:]*?>)?{v}(?:)?" + for inner in body_data + for v in inner + ] + body_pattern = "(.*?)".join(body_lines) + + body_matches = re.findall(body_pattern, output, re.DOTALL) + + assert len(body_matches) == 1, "Expected pattern of values not found in HTML output" + + +def test_dataframe_repr_html_values(df, clean_formatter_state): + """Test that DataFrame._repr_html_ contains the expected data values.""" + html = df._repr_html_() + assert html is not None + + # Create a more flexible pattern that handles values being wrapped in spans + # This pattern will match the sequence of values 1,4,8,2,5,5,3,6,8 regardless + # of formatting + pattern = re.compile( + r"]*?>(?:]*?>)?1(?:)?.*?" + r"]*?>(?:]*?>)?4(?:)?.*?" + r"]*?>(?:]*?>)?8(?:)?.*?" + r"]*?>(?:]*?>)?2(?:)?.*?" + r"]*?>(?:]*?>)?5(?:)?.*?" + r"]*?>(?:]*?>)?5(?:)?.*?" + r"]*?>(?:]*?>)?3(?:)?.*?" + r"]*?>(?:]*?>)?6(?:)?.*?" + r"]*?>(?:]*?>)?8(?:)?", + re.DOTALL, + ) + + # Print debug info if the test fails + matches = re.findall(pattern, html) + if not matches: + print(f"HTML output snippet: {html[:500]}...") # noqa: T201 + + assert len(matches) > 0, "Expected pattern of values not found in HTML output" + + +def test_html_formatter_shared_styles(df, clean_formatter_state): + """Test that shared styles work correctly across multiple tables.""" + + # First, ensure we're using shared styles + configure_formatter(use_shared_styles=True) + + html_first = df._repr_html_() + html_second = df._repr_html_() + + assert "
" + f"{field.name}
" + f"
" + "" + "" + f"{formatted_value}" + f"" + f"
" + f"
{formatted_value}
{value}-high{value}-low{value}-mid]*>(\d+)-low]*>(\d+)-mid]*>(\d+)-high1-low2-low3-mid4-mid6-high8-high{value}{value}{value}{field.name}