diff --git a/.github/docs/pull_request_template.md b/.github/docs/pull_request_template.md
new file mode 100644
index 00000000000..4515ddf65b9
--- /dev/null
+++ b/.github/docs/pull_request_template.md
@@ -0,0 +1,16 @@
+## Description
+
+Describe the changes that this PR introduces. Link any relevant # (issue) or # (pull request).
+
+## Types of changes
+
+- [ ] Bug fix
+- [ ] New feature
+- [ ] Breaking change
+- [ ] Documentation Update
+
+## Checklist
+
+- [ ] I have changed storage version if on disk format has changed.
+- [ ] I have requested a review from a maintainer.
+- [ ] I have updated the documentation (if needed).
diff --git a/.github/workflows/benchmark-workflow.yml b/.github/workflows/benchmark-workflow.yml
new file mode 100644
index 00000000000..427e0fd64e8
--- /dev/null
+++ b/.github/workflows/benchmark-workflow.yml
@@ -0,0 +1,20 @@
+name: Benchmark
+on:
+ workflow_dispatch:
+ workflow_call:
+
+jobs:
+ benchmark:
+ name: benchmark
+ env:
+ NUM_THREADS: 30
+ GEN: ninja
+ runs-on: kuzu-self-hosted-benchmarking
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Build
+ run: make benchmark LTO=1
+
+ - name: Benchmark
+ run: python3 benchmark/benchmark_runner.py --dataset ldbc-sf100 --thread 10
diff --git a/.github/workflows/build-extensions.yml b/.github/workflows/build-extensions.yml
index f8be4ed3de2..18d726ab6e6 100644
--- a/.github/workflows/build-extensions.yml
+++ b/.github/workflows/build-extensions.yml
@@ -74,7 +74,7 @@ jobs:
docker run -d --name kuzu-x86 \
-v $PWD:/kuzu -w /kuzu \
-e NUM_THREADS=2 -e GEN=ninja -e CC=gcc -e CXX=g++ \
- i386/debian:latest tail -f /dev/null
+ i386/debian:stable tail -f /dev/null
- name: Install dependencies
run: |
@@ -108,7 +108,7 @@ jobs:
- name: Install OpenSSL
run: |
- brew install openssl@3
+ brew install openssl@3 duckdb
OPENSSL_ROOT=$(readlink -f /opt/homebrew/Cellar/openssl@3/*/)
echo "OPENSSL_ROOT_DIR=$OPENSSL_ROOT" >> $GITHUB_ENV
diff --git a/.github/workflows/ci-workflow.yml b/.github/workflows/ci-workflow.yml
index b25ae646230..551ca7b4f46 100644
--- a/.github/workflows/ci-workflow.yml
+++ b/.github/workflows/ci-workflow.yml
@@ -11,6 +11,7 @@ on:
push:
branches:
- master
+ workflow_dispatch:
# Only allow one run in this group to run at a time, and cancel any runs in progress in this group.
# We use the workflow name and then add the pull request number, or (if it's a push to master), we use the name of the branch.
@@ -24,7 +25,7 @@ concurrency:
jobs:
gcc-build-test:
name: gcc build & test
- needs: [clang-formatting-check, sanity-checks]
+ needs: [clang-format, sanity-checks, python-lint-check]
runs-on: kuzu-self-hosted-testing
env:
NUM_THREADS: 32
@@ -54,11 +55,6 @@ jobs:
run: npm install --include=dev
working-directory: tools/nodejs_api
- - name: Extension test
- run: |
- cd scripts/ && python3 http-server.py &
- make extension-test && make clean
-
- name: Build
run: make all
@@ -87,7 +83,7 @@ jobs:
gcc-build-test-x86:
name: gcc build & test 32-bit
- needs: [clang-formatting-check, sanity-checks]
+ needs: [clang-format, sanity-checks]
runs-on: ubuntu-latest
steps:
@@ -95,10 +91,10 @@ jobs:
- name: Start Docker container
run: |
- docker run -d --name kuzu-x86 \
- -v $PWD:/kuzu -w /kuzu \
- -e NUM_THREADS=2 -e GEN=ninja -e CC=gcc -e CXX=g++ \
- i386/debian:latest tail -f /dev/null
+ docker run -d --name kuzu-x86 \
+ -v $PWD:/kuzu -w /kuzu \
+ -e NUM_THREADS=2 -e GEN=ninja -e CC=gcc -e CXX=g++ \
+ i386/debian:stable tail -f /dev/null
- name: Install dependencies
run: |
@@ -161,7 +157,7 @@ jobs:
clang-build-test:
name: clang build and test
- needs: [clang-formatting-check, sanity-checks]
+ needs: [clang-format, sanity-checks, python-lint-check]
runs-on: kuzu-self-hosted-testing
env:
NUM_THREADS: 32
@@ -187,11 +183,6 @@ jobs:
run: npm install --include=dev
working-directory: tools/nodejs_api
- - name: Extension test
- run: |
- cd scripts/ && python3 http-server.py &
- make extension-test && make clean
-
- name: Build
run: make all
@@ -209,7 +200,7 @@ jobs:
msvc-build-test:
name: msvc build & test
- needs: [clang-formatting-check, sanity-checks]
+ needs: [clang-format, sanity-checks, python-lint-check]
runs-on: self-hosted-windows
env:
# Shorten build path as much as possible
@@ -224,6 +215,7 @@ jobs:
AWS_S3_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
+ PG_HOST: ${{ secrets.PG_HOST }}
RUN_ID: "$(hostname)-$([Math]::Floor((Get-Date).TimeOfDay.TotalSeconds))"
steps:
- uses: actions/checkout@v3
@@ -237,13 +229,6 @@ jobs:
run: npm install --include=dev
working-directory: tools/nodejs_api
- - name: Extension test
- shell: cmd
- run: |
- call "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvars64.bat"
- cd scripts/ && start /b python http-server.py && cd ..
- make extension-test && make clean
-
- name: Build
shell: cmd
run: |
@@ -286,7 +271,7 @@ jobs:
tidy-and-diagnostics:
name: clang tidy & clangd diagnostics check
- needs: [clang-formatting-check, sanity-checks]
+ needs: [clang-format, sanity-checks]
runs-on: kuzu-self-hosted-testing
env:
NUM_THREADS: 32
@@ -323,25 +308,58 @@ jobs:
- name: Ensure generated grammar files are up to date
run: cmp src/antlr4/Cypher.g4 scripts/antlr4/Cypher.g4.copy
- clang-formatting-check:
- name: clang-format check
+ clang-format:
+ name: clang format
runs-on: ubuntu-22.04
steps:
- name: Install clang-format
run: |
- sudo apt-get update
- sudo apt-get install -y clang-format-11
+ sudo apt-get install -y lsb-release wget software-properties-common gnupg
+ wget https://apt.llvm.org/llvm.sh
+ chmod +x llvm.sh
+ yes | sudo ./llvm.sh 18 all
- uses: actions/checkout@v3
+ with:
+ repository: ${{ github.event.pull_request.head.repo.full_name }}
+ ref: ${{ github.event.pull_request.head.ref }}
+
+ - name: Check and fix source format
+ run: python3 scripts/run-clang-format.py --in-place --clang-format-executable /usr/bin/clang-format-18 -r src/
+
+ - name: Check and fix test format
+ run: python3 scripts/run-clang-format.py --in-place --clang-format-executable /usr/bin/clang-format-18 -r test/
+
+ - name: Check and fix extension format
+ run: python3 scripts/run-clang-format.py --in-place --clang-format-executable /usr/bin/clang-format-18 -r extension/
- - name: Check source format
- run: python3 scripts/run-clang-format.py --clang-format-executable /usr/bin/clang-format-11 -r src/
+ - name: Fail if any chang is made (master branch)
+ if: github.ref == 'refs/heads/master'
+ run: git diff --exit-code
+
+ - name: Commit changes (non-master branch)
+ uses: EndBug/add-and-commit@v9
+ if: github.ref != 'refs/heads/master'
+ with:
+ author_name: "CI Bot"
+ message: "Run clang-format"
+
+ python-lint-check:
+ name: python lint check
+ runs-on: macos-14
+ steps:
+ - uses: actions/checkout@v3
- - name: Check test format
- run: python3 scripts/run-clang-format.py --clang-format-executable /usr/bin/clang-format-11 -r test/
+ - name: Setup Python
+ uses: actions/setup-python@v2
+ with:
+ python-version: "3.11"
- - name: Check extension format
- run: python3 scripts/run-clang-format.py --clang-format-executable /usr/bin/clang-format-11 -r extension/
+ - name: Run Python lint
+ working-directory: tools/python_api
+ run: |
+ make requirements
+ ./.venv/bin/ruff check src_py test --verbose
rustfmt-check:
name: rustfmt check
@@ -359,22 +377,11 @@ jobs:
benchmark:
name: benchmark
needs: [gcc-build-test, clang-build-test]
- env:
- NUM_THREADS: 30
- GEN: ninja
- runs-on: kuzu-self-hosted-benchmarking
- steps:
- - uses: actions/checkout@v3
-
- - name: Build
- run: make benchmark LTO=1
-
- - name: Benchmark
- run: python3 benchmark/benchmark_runner.py --dataset ldbc-sf100 --thread 10
+ uses: ./.github/workflows/benchmark-workflow.yml
macos-clang-tidy:
name: macos clang tidy & clangd diagnostics check
- needs: [clang-formatting-check, sanity-checks]
+ needs: [clang-format, sanity-checks]
runs-on: self-hosted-mac-x64
env:
NUM_THREADS: 32
@@ -399,7 +406,7 @@ jobs:
macos-build-test:
name: apple clang build & test
- needs: [clang-formatting-check, sanity-checks, rustfmt-check]
+ needs: [clang-format, sanity-checks, rustfmt-check, python-lint-check]
runs-on: self-hosted-mac-x64
env:
NUM_THREADS: 32
@@ -411,6 +418,7 @@ jobs:
AWS_S3_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
+ PG_HOST: ${{ secrets.PG_HOST }}
RUN_ID: "$(hostname)-$(date +%s)"
steps:
- uses: actions/checkout@v3
@@ -424,11 +432,6 @@ jobs:
run: npm install --include=dev
working-directory: tools/nodejs_api
- - name: Extension test
- run: |
- cd scripts/ && python3 http-server.py &
- make extension-test && make clean
-
- name: Build
run: make all
@@ -469,3 +472,129 @@ jobs:
ulimit -n 10240
source /Users/runner/.cargo/env
cargo build --locked --features arrow
+
+ shell-test:
+ name: shell test
+ runs-on: ubuntu-latest
+ needs: [clang-format, sanity-checks]
+ env:
+ WERROR: 0
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Build
+ run: make release NUM_THREADS=$(nproc)
+
+ - name: Test
+ working-directory: tools/shell/test
+ run: |
+ pip3 install pytest pexpect
+ python3 -m pytest -v .
+
+ linux-extension-test:
+ name: linux extension test
+ needs: [gcc-build-test, clang-build-test]
+ runs-on: kuzu-self-hosted-testing
+ env:
+ NUM_THREADS: 32
+ TEST_JOBS: 16
+ CLANGD_DIAGNOSTIC_JOBS: 32
+ CLANGD_DIAGNOSTIC_INSTANCES: 6
+ GEN: ninja
+ CC: gcc
+ CXX: g++
+ UW_S3_ACCESS_KEY_ID: ${{ secrets.UW_S3_ACCESS_KEY_ID }}
+ UW_S3_SECRET_ACCESS_KEY: ${{ secrets.UW_S3_SECRET_ACCESS_KEY }}
+ AWS_S3_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
+ AWS_S3_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
+ RUN_ID: "$(hostname)-$(date +%s)"
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Update PostgreSQL host
+ working-directory: extension/postgres_scanner/test/test_files
+ env:
+ FNAME: postgres_scanner.test
+ FIND: "localhost"
+ run: |
+ node -e 'fs=require("fs");fs.readFile(process.env.FNAME,"utf8",(err,data)=>{if(err!=null)throw err;fs.writeFile(process.env.FNAME,data.replaceAll(process.env.FIND,process.env.PG_HOST),"utf8",e=>{if(e!=null)throw e;});});'
+ cat postgres_scanner.test
+
+ - name: Extension test
+ run: |
+ cd scripts/ && python3 http-server.py &
+ make extension-test && make clean
+
+ macos-extension-test:
+ name: macos extension test
+ needs: [macos-build-test]
+ runs-on: self-hosted-mac-x64
+ env:
+ NUM_THREADS: 32
+ TEST_JOBS: 16
+ GEN: ninja
+ UW_S3_ACCESS_KEY_ID: ${{ secrets.UW_S3_ACCESS_KEY_ID }}
+ UW_S3_SECRET_ACCESS_KEY: ${{ secrets.UW_S3_SECRET_ACCESS_KEY }}
+ AWS_S3_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
+ AWS_S3_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
+ PG_HOST: ${{ secrets.PG_HOST }}
+ RUN_ID: "$(hostname)-$(date +%s)"
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Update PostgreSQL host
+ working-directory: extension/postgres_scanner/test/test_files
+ env:
+ FNAME: postgres_scanner.test
+ FIND: "localhost"
+ run: |
+ node -e 'fs=require("fs");fs.readFile(process.env.FNAME,"utf8",(err,data)=>{if(err!=null)throw err;fs.writeFile(process.env.FNAME,data.replaceAll(process.env.FIND,process.env.PG_HOST),"utf8",e=>{if(e!=null)throw e;});});'
+ cat postgres_scanner.test
+
+ - name: Extension test
+ run: |
+ cd scripts/ && python3 http-server.py &
+ make extension-test && make clean
+
+ windows-extension-test:
+ name: windows extension test
+ needs: [msvc-build-test]
+ runs-on: self-hosted-windows
+ env:
+ # Shorten build path as much as possible
+ CARGO_TARGET_DIR: ${{ github.workspace }}/rs
+ CARGO_BUILD_JOBS: 18
+ NUM_THREADS: 18
+ TEST_JOBS: 9
+ WERROR: 0
+ UW_S3_ACCESS_KEY_ID: ${{ secrets.UW_S3_ACCESS_KEY_ID }}
+ UW_S3_SECRET_ACCESS_KEY: ${{ secrets.UW_S3_SECRET_ACCESS_KEY }}
+ AWS_S3_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
+ AWS_S3_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
+ AWS_ACCESS_KEY_ID: ${{ secrets.AWS_S3_ACCESS_KEY_ID }}
+ AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_S3_SECRET_ACCESS_KEY }}
+ PG_HOST: ${{ secrets.PG_HOST }}
+ RUN_ID: "$(hostname)-$([Math]::Floor((Get-Date).TimeOfDay.TotalSeconds))"
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Update PostgreSQL host
+ working-directory: extension/postgres_scanner/test/test_files
+ env:
+ FNAME: postgres_scanner.test
+ FIND: "localhost"
+ run: |
+ node -e 'fs=require("fs");fs.readFile(process.env.FNAME,"utf8",(err,data)=>{if(err!=null)throw err;fs.writeFile(process.env.FNAME,data.replaceAll(process.env.FIND,process.env.PG_HOST),"utf8",e=>{if(e!=null)throw e;});});'
+ cat postgres_scanner.test
+
+ - name: Extension test
+ shell: cmd
+ run: |
+ call "C:\Program Files\Microsoft Visual Studio\2022\Community\VC\Auxiliary\Build\vcvars64.bat"
+ cd scripts/ && start /b python http-server.py && cd ..
+ make extension-test && make clean
diff --git a/.github/workflows/multiplatform-build-test.yml b/.github/workflows/multiplatform-build-test.yml
index a4009cfc528..b730bd021d6 100644
--- a/.github/workflows/multiplatform-build-test.yml
+++ b/.github/workflows/multiplatform-build-test.yml
@@ -9,118 +9,206 @@ jobs:
ubuntu-2004-build-test:
name: ubuntu-20.04
runs-on: ubuntu-20.04
+ defaults:
+ run:
+ shell: bash
env:
CC: gcc-11
CXX: g++-11
steps:
- uses: actions/checkout@v3
+ continue-on-error: true
- name: Install packages
+ continue-on-error: true
run: |
sudo apt-get update
sudo apt-get install -y build-essential manpages-dev software-properties-common
sudo add-apt-repository -y ppa:ubuntu-toolchain-r/test
sudo apt-get update
- sudo apt-get install -y gcc-11 g++-11 python3.9
+ sudo apt-get install -y gcc-11 g++-11
+
+ - uses: actions/setup-python@v5
+ continue-on-error: true
+ with:
+ python-version: "3.11"
- name: Ensure Python dependencies
+ continue-on-error: true
run: |
pip install torch~=2.0.0 --extra-index-url https://download.pytorch.org/whl/cpu
pip install --user -r tools/python_api/requirements_dev.txt -f https://data.pyg.org/whl/torch-2.0.0+cpu.html
- pip install --user backports.zoneinfo
- name: Ensure Node.js dependencies
+ continue-on-error: true
working-directory: tools/nodejs_api
run: npm install --include=dev
- name: Build
- run: make release NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make release NUM_THREADS=$(nproc)
+ echo "Build,$?" > status.txt
- name: Test
- run: make test NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make test NUM_THREADS=$(nproc)
+ echo "Test,$?" >> status.txt
- name: C and C++ examples
- run: make example NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make example NUM_THREADS=$(nproc)
+ echo "C and C++ examples,$?" >> status.txt
- name: Python test
- run: make pytest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make pytest NUM_THREADS=$(nproc)
+ echo "Python test,$?" >> status.txt
- name: Node.js test
- run: make nodejstest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make nodejstest NUM_THREADS=$(nproc)
+ echo "Node.js test,$?" >> status.txt
- name: Java test
- run: make javatest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make javatest NUM_THREADS=$(nproc)
+ echo "Java test,$?" >> status.txt
- name: Rust share build
+ continue-on-error: true
run: echo $'[workspace]\nmembers = ["tools/rust_api","examples/rust"]' > Cargo.toml
- name: Rust set env
+ continue-on-error: true
run: echo "CARGO_BUILD_JOBS=$(nproc)" >> $GITHUB_ENV
- name: Rust test
+ continue-on-error: true
working-directory: tools/rust_api
run: |
cargo test --release --features arrow -- --test-threads=1
+ echo "Rust test,$?" >> ../../status.txt
- name: Rust example
+ continue-on-error: true
working-directory: examples/rust
- run: cargo build --release --features arrow
+ run: |
+ cargo build --release --features arrow
+ echo "Rust example,$?" >> ../../status.txt
+
+ - name: Rename status.txt
+ continue-on-error: true
+ run: mv status.txt ubuntu-20.04.csv
+
+ - uses: actions/upload-artifact@v4
+ continue-on-error: true
+ with:
+ name: ubuntu-20.04
+ path: ubuntu-20.04.csv
macos-build-test:
strategy:
matrix:
- runner: [macos-12, macos-13]
+ runner: [macos-13, macos-14]
fail-fast: false
name: ${{ matrix.runner }}
runs-on: ${{ matrix.runner }}
+ defaults:
+ run:
+ shell: bash
steps:
- uses: actions/checkout@v3
+ continue-on-error: true
- uses: actions/setup-python@v4
+ continue-on-error: true
with:
python-version: "3.10"
- name: Ensure Python dependencies
+ continue-on-error: true
run: |
pip install torch~=2.0.0 --extra-index-url https://download.pytorch.org/whl/cpu
pip install --user -r tools/python_api/requirements_dev.txt -f https://data.pyg.org/whl/torch-2.0.0+cpu.html
- name: Ensure Node.js dependencies
+ continue-on-error: true
working-directory: tools/nodejs_api
run: npm install --include=dev
- name: Build
- run: make release NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ continue-on-error: true
+ run: |
+ make release NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ echo "Build,$?" > status.txt
- name: Test
- run: make test NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ continue-on-error: true
+ run: |
+ make test NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ echo "Test,$?" >> status.txt
- name: C and C++ examples
- run: make example NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ continue-on-error: true
+ run: |
+ make example NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ echo "C and C++ examples,$?" >> status.txt
- name: Python test
- run: make pytest NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ continue-on-error: true
+ run: |
+ make pytest NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ echo "Python test,$?" >> status.txt
- name: Node.js test
- run: make nodejstest NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ continue-on-error: true
+ run: |
+ make nodejstest NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ echo "Node.js test,$?" >> status.txt
- name: Java test
- run: make javatest NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ continue-on-error: true
+ run: |
+ make javatest NUM_THREADS=$(sysctl -n hw.physicalcpu)
+ echo "Java test,$?" >> status.txt
- name: Rust share build
+ continue-on-error: true
run: echo $'[workspace]\nmembers = ["tools/rust_api","examples/rust"]' > Cargo.toml
- name: Rust set env
+ continue-on-error: true
run: echo "CARGO_BUILD_JOBS=$(sysctl -n hw.physicalcpu)" >> $GITHUB_ENV
- name: Rust test
+ continue-on-error: true
working-directory: tools/rust_api
run: |
cargo test --release --features arrow -- --test-threads=1
+ echo "Rust test,$?" >> ../../status.txt
- name: Rust example
+ continue-on-error: true
working-directory: examples/rust
- run: cargo build --release --features arrow
+ run: |
+ cargo build --release --features arrow
+ echo "Rust example,$?" >> ../../status.txt
+
+ - name: Rename status.txt
+ continue-on-error: true
+ run: mv status.txt ${{ matrix.runner }}.csv
+
+ - uses: actions/upload-artifact@v4
+ continue-on-error: true
+ with:
+ name: ${{ matrix.runner }}
+ path: ${{ matrix.runner }}.csv
windows-build-test:
strategy:
@@ -131,6 +219,7 @@ jobs:
runs-on: ${{ matrix.runner }}
steps:
- name: Disable Windows Defender
+ continue-on-error: true
shell: powershell
run: |
Set-MpPreference -DisableRealtimeMonitoring $true
@@ -145,53 +234,78 @@ jobs:
Set-MpPreference -SubmitSamplesConsent NeverSend
- uses: actions/checkout@v3
+ continue-on-error: true
- uses: ilammy/msvc-dev-cmd@v1
+ continue-on-error: true
- uses: actions/setup-python@v4
+ continue-on-error: true
with:
python-version: "3.10"
- name: Ensure Python dependencies
+ continue-on-error: true
shell: cmd
run: |
pip install torch~=2.0.0 --extra-index-url https://download.pytorch.org/whl/cpu
pip install --user -r tools\python_api\requirements_dev.txt -f https://data.pyg.org/whl/torch-2.0.0+cpu.html
- name: Ensure Node.js dependencies
+ continue-on-error: true
shell: cmd
working-directory: .\tools\nodejs_api
run: npm install --include=dev
- name: Build
+ continue-on-error: true
shell: cmd
- run: make release NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ run: |
+ make release NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ echo Build,%ERRORLEVEL% > status.txt
- name: Test
+ continue-on-error: true
shell: cmd
- run: make test NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ run: |
+ make test NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ echo Test,%ERRORLEVEL% >> status.txt
- name: C and C++ examples
+ continue-on-error: true
shell: cmd
- run: make example NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ run: |
+ make example NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ echo C and C++ examples,%ERRORLEVEL% >> status.txt
- name: Python test
+ continue-on-error: true
shell: cmd
- run: make pytest NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ run: |
+ make pytest NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ echo Python test,%ERRORLEVEL% >> status.txt
- name: Node.js test
+ continue-on-error: true
shell: cmd
- run: make nodejstest NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ run: |
+ make nodejstest NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ echo Node.js test,%ERRORLEVEL% >> status.txt
- name: Java test
+ continue-on-error: true
shell: cmd
- run: make javatest NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ run: |
+ make javatest NUM_THREADS=%NUMBER_OF_PROCESSORS%
+ echo Java test,%ERRORLEVEL% >> status.txt
- name: Rust share build
+ continue-on-error: true
shell: cmd
run: (echo [workspace]& echo members = ["tools/rust_api","examples/rust"]) > Cargo.toml
- name: Rust test
+ continue-on-error: true
shell: cmd
run: |
make clean
@@ -199,12 +313,27 @@ jobs:
set CXXFLAGS=/std:c++20
set CARGO_BUILD_JOBS=%NUMBER_OF_PROCESSORS%
cargo test --release --features arrow -- --test-threads=1
+ echo Rust test,%ERRORLEVEL% >> status.txt
- name: Rust example
+ continue-on-error: true
shell: cmd
run: |
set CARGO_BUILD_JOBS=%NUMBER_OF_PROCESSORS%
cargo build --release --features arrow
+ echo Rust example,%ERRORLEVEL% >> status.txt
+
+ - name: Rename status.txt
+ continue-on-error: true
+ shell: cmd
+ run: |
+ rename status.txt ${{ matrix.runner }}.csv
+
+ - uses: actions/upload-artifact@v4
+ continue-on-error: true
+ with:
+ name: ${{ matrix.runner }}
+ path: ${{ matrix.runner }}.csv
debian-ubuntu-build-test:
strategy:
@@ -221,6 +350,7 @@ jobs:
JAVA_HOME: /usr/lib/jvm/java-17-openjdk-amd64
steps:
- name: Install packages
+ continue-on-error: true
run: |
apt-get update
apt-get install -y ca-certificates curl gnupg
@@ -231,59 +361,101 @@ jobs:
apt-get install -y git build-essential cmake gcc g++ python3 python3-dev python3-pip openjdk-17-jdk nodejs
- uses: actions/checkout@v3
+ continue-on-error: true
- name: Ensure Python dependencies
+ continue-on-error: true
run: |
pip install torch~=2.0.0 --extra-index-url https://download.pytorch.org/whl/cpu ${{ matrix.image != 'debian:11' && '--break-system-packages' || '' }}
pip install --user -r tools/python_api/requirements_dev.txt -f https://data.pyg.org/whl/torch-2.0.0+cpu.html ${{ matrix.image != 'debian:11' && '--break-system-packages' || '' }}
- name: Ensure Node.js dependencies
working-directory: tools/nodejs_api
+ continue-on-error: true
run: npm install --include=dev
- name: Install Rust
+ continue-on-error: true
run: |
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-update-default-toolchain
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- $HOME/.cargo/bin/rustup toolchain install 1.67
+ $HOME/.cargo/bin/rustup toolchain install 1.76
- name: Build
- run: make release NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make release NUM_THREADS=$(nproc)
+ echo "Build,$?" > status.txt
- name: Test
- run: make test NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make test NUM_THREADS=$(nproc)
+ echo "Test,$?" >> status.txt
- name: C and C++ examples
- run: make example NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make example NUM_THREADS=$(nproc)
+ echo "C and C++ examples,$?" >> status.txt
- name: Python test
- run: make pytest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make pytest NUM_THREADS=$(nproc)
+ echo "Python test,$?" >> status.txt
- name: Node.js test
- run: make nodejstest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make nodejstest NUM_THREADS=$(nproc)
+ echo "Node.js test,$?" >> status.txt
- name: Java test
- run: make javatest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make javatest NUM_THREADS=$(nproc)
+ echo "Java test,$?" >> status.txt
- name: Rust share build
+ continue-on-error: true
run: echo '[workspace]\nmembers = ["tools/rust_api","examples/rust"]' > Cargo.toml
- name: Rust set env
+ continue-on-error: true
run: echo "CARGO_BUILD_JOBS=$(nproc)" >> $GITHUB_ENV
- name: Rust test
+ continue-on-error: true
working-directory: tools/rust_api
run: |
cargo test --release --features arrow -- --test-threads=1
+ echo "Rust test,$?" >> ../../status.txt
- name: Rust example
+ continue-on-error: true
working-directory: examples/rust
- run: cargo build --release --features arrow
+ run: |
+ cargo build --release --features arrow
+ echo "Rust example,$?" >> ../../status.txt
+
+ - name: Rename status.txt
+ continue-on-error: true
+ run: |
+ PLATFORM=$(echo ${{ matrix.image }} | tr ':' '-')
+ echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
+ mv status.txt $PLATFORM.csv
+
+ - uses: actions/upload-artifact@v4
+ continue-on-error: true
+ with:
+ name: ${{env.PLATFORM}}
+ path: ${{env.PLATFORM}}.csv
- rhel-fedora-build-test:
+ rhel-fedora-build-test:
strategy:
matrix:
- image: ["rockylinux:8", "rockylinux:9", "fedora:38"]
+ image: ["rockylinux:8", "rockylinux:9", "fedora:38", "fedora:39"]
fail-fast: false
name: ${{ matrix.image }}
runs-on: ubuntu-latest
@@ -296,6 +468,7 @@ jobs:
HOME: /root
steps:
- name: Enable EPEL
+ continue-on-error: true
if: ${{ matrix.image == 'rockylinux:8' || matrix.image == 'rockylinux:9' }}
run: |
dnf update -y
@@ -303,150 +476,121 @@ jobs:
dnf update -y
- name: Enable SHA-1 on Rocky Linux 9
+ continue-on-error: true
if: matrix.image == 'rockylinux:9'
run: update-crypto-policies --set LEGACY
- name: Install packages
+ continue-on-error: true
run: |
curl -fsSL https://rpm.nodesource.com/setup_20.x | bash -
- dnf install -y git cmake ${{ matrix.image == 'rockylinux:8' && 'gcc-toolset-12 python39 python39-devel' || 'gcc gcc-c++ python3-devel' }} java-17-openjdk-devel nodejs
+ dnf install -y git cmake ${{ matrix.image == 'rockylinux:8' && 'gcc-toolset-12 python3.11 python3.11-devel' || 'gcc gcc-c++ python3-devel' }} java-17-openjdk-devel nodejs
- - name: Enable gcc-toolset-12 and python39 on Rocky Linux 8
+ - name: Enable gcc-toolset-12 and python3.11 on Rocky Linux 8
+ continue-on-error: true
if: matrix.image == 'rockylinux:8'
run: |
- alternatives --set python /usr/bin/python3.9
- alternatives --set python3 /usr/bin/python3.9
- echo "PYTHON_EXECUTABLE=/usr/bin/python3.9" >> $GITHUB_ENV
- echo "PYBIND11_PYTHON_VERSION=3.9" >> $GITHUB_ENV
+ alternatives --set python /usr/bin/python3.11
+ alternatives --set python3 /usr/bin/python3.11
+ echo "PYTHON_EXECUTABLE=/usr/bin/python3.11" >> $GITHUB_ENV
+ echo "PYBIND11_PYTHON_VERSION=3.11" >> $GITHUB_ENV
source /opt/rh/gcc-toolset-12/enable
echo $PATH >> $GITHUB_PATH
- uses: actions/checkout@v3
+ continue-on-error: true
- name: Ensure Python dependencies
+ continue-on-error: true
run: |
python3 -m venv /opt/venv
source /opt/venv/bin/activate
- pip3 install torch~=2.0.0 --extra-index-url https://download.pytorch.org/whl/cpu
- pip3 install -r tools/python_api/requirements_dev.txt -f https://data.pyg.org/whl/torch-2.0.0+cpu.html
+ pip3 install torch~=2.2.1 --extra-index-url https://download.pytorch.org/whl/cpu
+ pip3 install -r tools/python_api/requirements_dev.txt -f https://data.pyg.org/whl/torch-2.2.1+cpu.html
- name: Ensure Node.js dependencies
+ continue-on-error: true
working-directory: tools/nodejs_api
run: npm install --include=dev
- name: Install Rust
+ continue-on-error: true
run: |
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-update-default-toolchain
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- $HOME/.cargo/bin/rustup toolchain install 1.67
+ $HOME/.cargo/bin/rustup toolchain install 1.76
- name: Build
- run: make release NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make release NUM_THREADS=$(nproc)
+ echo "Build,$?" > status.txt
- name: Test
- run: make test NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make test NUM_THREADS=$(nproc)
+ echo "Test,$?" >> status.txt
- name: C and C++ examples
- run: make example NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make example NUM_THREADS=$(nproc)
+ echo "C and C++ examples,$?" >> status.txt
- name: Python test
+ continue-on-error: true
run: |
source /opt/venv/bin/activate
make pytest NUM_THREADS=$(nproc)
+ echo "Python test,$?" >> status.txt
- name: Node.js test
- run: make nodejstest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make nodejstest NUM_THREADS=$(nproc)
+ echo "Node.js test,$?" >> status.txt
- name: Java test
- run: make javatest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make javatest NUM_THREADS=$(nproc)
+ echo "Java test,$?" >> status.txt
- name: Rust share build
+ continue-on-error: true
run: echo $'[workspace]\nmembers = ["tools/rust_api","examples/rust"]' > Cargo.toml
- name: Rust set env
+ continue-on-error: true
run: echo "CARGO_BUILD_JOBS=$(nproc)" >> $GITHUB_ENV
- name: Rust test
+ continue-on-error: true
working-directory: tools/rust_api
run: |
cargo test --release --features arrow -- --test-threads=1
+ echo "Rust test,$?" >> ../../status.txt
- name: Rust example
+ continue-on-error: true
working-directory: examples/rust
- run: cargo build --release --features arrow
-
- centos-7-build-test:
- name: centos-7
- runs-on: ubuntu-latest
- container:
- image: quay.io/pypa/manylinux2014_x86_64
- env:
- CC: gcc
- CXX: g++
- PYBIND11_PYTHON_VERSION: 3.10
- PYTHON_EXECUTABLE: /usr/local/bin/python3.10
- JAVA_HOME: /usr/lib/jvm/java-11-openjdk
- steps:
- - name: Install packages
- run: |
- yum update -y
- yum install -y npm java-11-openjdk-devel devtoolset-11
-
- - name: Enable gcc-toolset-11
run: |
- source /opt/rh/devtoolset-11/enable
- echo $PATH >> $GITHUB_PATH
-
- - uses: actions/checkout@v3
-
- - name: Ensure Python dependencies
- run: |
- ln -s /usr/local/bin/python3.10 /usr/bin/python3
- python3 -m pip install torch~=2.0.0 --extra-index-url https://download.pytorch.org/whl/cpu
- python3 -m pip install --user -r tools/python_api/requirements_dev.txt -f https://data.pyg.org/whl/torch-2.0.0+cpu.html
-
- - name: Ensure Node.js dependencies
- working-directory: tools/nodejs_api
- run: npm install --include=dev
-
- - name: Install Rust
- run: |
- curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-update-default-toolchain
- echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- $HOME/.cargo/bin/rustup toolchain install 1.67
-
- - name: Build
- run: make release NUM_THREADS=$(nproc)
-
- - name: Test
- run: make test NUM_THREADS=$(nproc)
-
- - name: C and C++ examples
- run: make example NUM_THREADS=$(nproc)
-
- - name: Python test
- run: make pytest NUM_THREADS=$(nproc)
-
- - name: Node.js test
- run: make nodejstest NUM_THREADS=$(nproc)
-
- - name: Java test
- run: make javatest NUM_THREADS=$(nproc)
-
- - name: Rust share build
- run: echo $'[workspace]\nmembers = ["tools/rust_api","examples/rust"]' > Cargo.toml
-
- - name: Rust set env
- run: echo "CARGO_BUILD_JOBS=$(nproc)" >> $GITHUB_ENV
+ cargo build --release --features arrow
+ echo "Rust example,$?" >> ../../status.txt
- - name: Rust test
- working-directory: tools/rust_api
+ - name: Rename status.txt
+ continue-on-error: true
run: |
- cargo test --release --features arrow -- --test-threads=1
+ PLATFORM=$(echo ${{ matrix.image }} | tr ':' '-')
+ echo "PLATFORM=$PLATFORM" >> $GITHUB_ENV
+ mv status.txt $PLATFORM.csv
- - name: Rust example
- working-directory: examples/rust
- run: cargo build --release --features arrow
+ - uses: actions/upload-artifact@v4
+ continue-on-error: true
+ with:
+ name: ${{env.PLATFORM}}
+ path: ${{env.PLATFORM}}.csv
archlinux-build-test:
name: archlinux
@@ -459,56 +603,135 @@ jobs:
JAVA_HOME: /usr/lib/jvm/java-17-openjdk
steps:
- name: Install packages
+ continue-on-error: true
run: |
pacman -Syu --noconfirm
pacman -S --needed --noconfirm git base-devel cmake gcc python python-pip npm jdk17-openjdk
- uses: actions/checkout@v3
+ continue-on-error: true
- name: Ensure Python dependencies
+ continue-on-error: true
run: |
pip install torch~=2.0.0 --extra-index-url https://download.pytorch.org/whl/cpu --break-system-packages
pip install --user -r tools/python_api/requirements_dev.txt -f https://data.pyg.org/whl/torch-2.0.0+cpu.html --break-system-packages
- name: Ensure Node.js dependencies
+ continue-on-error: true
working-directory: tools/nodejs_api
run: npm install --include=dev
- name: Install Rust
+ continue-on-error: true
run: |
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y --no-update-default-toolchain
echo "$HOME/.cargo/bin" >> $GITHUB_PATH
- $HOME/.cargo/bin/rustup toolchain install 1.67
+ $HOME/.cargo/bin/rustup toolchain install 1.76
- name: Build
- run: make release NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make release NUM_THREADS=$(nproc)
+ echo "Build,$?" > status.txt
- name: Test
- run: make test NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make test NUM_THREADS=$(nproc)
+ echo "Test,$?" >> status.txt
- name: C and C++ examples
- run: make example NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make example NUM_THREADS=$(nproc)
+ echo "C and C++ examples,$?" >> status.txt
- name: Python test
- run: make pytest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make pytest NUM_THREADS=$(nproc)
+ echo "Python test,$?" >> status.txt
- name: Node.js test
- run: make nodejstest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make nodejstest NUM_THREADS=$(nproc)
+ echo "Node.js test,$?" >> status.txt
- name: Java test
- run: make javatest NUM_THREADS=$(nproc)
+ continue-on-error: true
+ run: |
+ make javatest NUM_THREADS=$(nproc)
+ echo "Java test,$?" >> status.txt
- name: Rust share build
+ continue-on-error: true
run: echo $'[workspace]\nmembers = ["tools/rust_api","examples/rust"]' > Cargo.toml
- name: Rust set env
+ continue-on-error: true
run: echo "CARGO_BUILD_JOBS=$(nproc)" >> $GITHUB_ENV
- name: Rust test
working-directory: tools/rust_api
+ continue-on-error: true
run: |
cargo test --release --features arrow -- --test-threads=1
+ echo "Rust test,$?" >> ../../status.txt
- name: Rust example
working-directory: examples/rust
- run: cargo build --release --features arrow
+ continue-on-error: true
+ run: |
+ cargo build --release --features arrow
+ echo "Rust example,$?" >> ../../status.txt
+
+ - name: Rename status.txt
+ continue-on-error: true
+ run: |
+ mv status.txt archlinux.csv
+
+ - uses: actions/upload-artifact@v4
+ continue-on-error: true
+ with:
+ name: archlinux
+ path: archlinux.csv
+
+ collect-results:
+ runs-on: ubuntu-latest
+ env:
+ DISCORD_CHANNEL_ID: ${{ secrets.DISCORD_CHANNEL_ID }}
+ DISCORD_BOT_TOKEN: ${{ secrets.DISCORD_BOT_TOKEN }}
+ GITHUB_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}
+ needs:
+ [
+ ubuntu-2004-build-test,
+ macos-build-test,
+ windows-build-test,
+ debian-ubuntu-build-test,
+ rhel-fedora-build-test,
+ archlinux-build-test,
+ ]
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Download artifacts
+ uses: actions/download-artifact@v4
+ with:
+ path: results
+
+ - name: List artifacts
+ run: ls -R results
+
+ - name: Create summary
+ run: |
+ pip3 install pandas tabulate discord.py
+ python3 scripts/multiplatform-test-helper/collect-results.py results
+ python3 scripts/multiplatform-test-helper/notify-discord.py results.json
+
+ - name: Upload summary
+ uses: actions/upload-artifact@v4
+ with:
+ name: results
+ path: results.md
diff --git a/.gitignore b/.gitignore
index a073684b3d1..6c496cb8e21 100644
--- a/.gitignore
+++ b/.gitignore
@@ -17,6 +17,7 @@ compile_commands.json
### Python
# Byte-compiled / optimized / DLL files
__pycache__/
+.venv/
*.py[cod]
*$py.class
cmake-build-debug/
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 8520f18aaa3..88aae897602 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,6 +1,6 @@
cmake_minimum_required(VERSION 3.15)
-project(Kuzu VERSION 0.3.1.1 LANGUAGES CXX C)
+project(Kuzu VERSION 0.3.2.5 LANGUAGES CXX C)
find_package(Threads REQUIRED)
@@ -46,6 +46,7 @@ if(CMAKE_SIZEOF_VOID_P EQUAL 8)
elseif(CMAKE_SIZEOF_VOID_P EQUAL 4)
message(STATUS "32-bit architecture detected")
add_compile_definitions(__32BIT__)
+ set(__32BIT__ TRUE)
endif()
if(NOT CMAKE_BUILD_TYPE)
@@ -103,8 +104,8 @@ if(MSVC)
# Enables support for custom hardware exception handling
add_compile_options("/EHa")
# Remove the default to avoid warnings
- STRING(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
- STRING(REPLACE "/EHs" "" CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS})
+ STRING(REPLACE "/EHsc" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ STRING(REPLACE "/EHs" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
# Store all libraries and binaries in the same directory so that kuzu_shared.dll is found at runtime
set(LIBRARY_OUTPUT_PATH "${CMAKE_BINARY_DIR}/src")
set(EXECUTABLE_OUTPUT_PATH "${CMAKE_BINARY_DIR}/src")
@@ -196,7 +197,7 @@ endif ()
if(${BUILD_KUZU})
add_definitions(-DKUZU_ROOT_DIRECTORY="${PROJECT_SOURCE_DIR}")
add_definitions(-DKUZU_CMAKE_VERSION="${CMAKE_PROJECT_VERSION}")
-add_definitions(-DKUZU_EXTENSION_VERSION="0.1.0")
+add_definitions(-DKUZU_EXTENSION_VERSION="0.2.5")
include_directories(src/include)
include_directories(third_party/antlr4_cypher/include)
@@ -226,7 +227,9 @@ elseif (${BUILD_BENCHMARK})
endif ()
add_subdirectory(tools)
endif ()
-add_subdirectory(extension)
+if (EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/extension/CMakeLists.txt")
+ add_subdirectory(extension)
+endif ()
if (${BUILD_EXAMPLES})
add_subdirectory(examples/c)
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 23bd98c367b..4d6ed6826d8 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -3,7 +3,7 @@
Welcome to Kùzu! We are excited that you are interested in contributing to Kùzu.
Before submitting your contribution though, please make sure to take a moment and read through the following guidelines.
-Join our project's [Slack workspace](https://join.slack.com/t/kuzudb/shared_invite/zt-1w0thj6s7-0bLaU8Sb~4fDMKJ~oejG_g) for real-time communication with the core team and other contributors.
+Join our project's [Discord community](https://discord.gg/VtX2gw9Rug) for real-time communication with the core team and other contributors.
If you have a question or need help, feel free to ask in the appropriate channel or create an issue.
## Code of Conduct
diff --git a/LICENSE b/LICENSE
index 2eac1b77815..60179b0a3ec 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2022 Kùzu Contributors
+Copyright (c) 2022-2024 Kùzu Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/Makefile b/Makefile
index 3b541b55845..1eeae88d6a6 100644
--- a/Makefile
+++ b/Makefile
@@ -64,7 +64,7 @@ allconfig:
$(call config-cmake-release, \
-DBUILD_BENCHMARK=TRUE \
-DBUILD_EXAMPLES=TRUE \
- -DBUILD_EXTENSIONS=httpfs \
+ -DBUILD_EXTENSIONS="httpfs;duckdb_scanner;postgres_scanner" \
-DBUILD_JAVA=TRUE \
-DBUILD_NODEJS=TRUE \
-DBUILD_PYTHON=TRUE \
@@ -79,7 +79,7 @@ alldebug:
$(call run-cmake-debug, \
-DBUILD_BENCHMARK=TRUE \
-DBUILD_EXAMPLES=TRUE \
- -DBUILD_EXTENSIONS=httpfs \
+ -DBUILD_EXTENSIONS="httpfs;duckdb_scanner;postgres_scanner" \
-DBUILD_JAVA=TRUE \
-DBUILD_NODEJS=TRUE \
-DBUILD_PYTHON=TRUE \
@@ -156,21 +156,21 @@ example:
extension-test:
$(call run-cmake-release, \
- -DBUILD_EXTENSIONS=httpfs \
+ -DBUILD_EXTENSIONS="httpfs;duckdb_scanner;postgres_scanner" \
-DBUILD_EXTENSION_TESTS=TRUE \
)
- ctest --test-dir build/release/extension/httpfs/test --output-on-failure -j ${TEST_JOBS}
+ ctest --test-dir build/release/extension --output-on-failure -j ${TEST_JOBS}
aws s3 rm s3://kuzu-dataset-us/${RUN_ID}/ --recursive
extension-debug:
$(call run-cmake-debug, \
- -DBUILD_EXTENSIONS=httpfs \
+ -DBUILD_EXTENSIONS="httpfs;duckdb_scanner;postgres_scanner" \
-DBUILD_KUZU=FALSE \
)
extension-release:
$(call run-cmake-release, \
- -DBUILD_EXTENSIONS=httpfs \
+ -DBUILD_EXTENSIONS="httpfs;duckdb_scanner;postgres_scanner" \
-DBUILD_KUZU=FALSE \
)
diff --git a/README.md b/README.md
index f229aab0690..00b0fbe4f64 100644
--- a/README.md
+++ b/README.md
@@ -6,17 +6,13 @@
-
-
+
-
-
+
-
-
+
-
-
+
# Kùzu
@@ -46,20 +42,15 @@ Kùzu is available under a permissible license. So try it out and help us make i
| C/C++ | [precompiled binaries](https://github.com/kuzudb/kuzu/releases/latest) |
| CLI | [precompiled binaries](https://github.com/kuzudb/kuzu/releases/latest) |
-To learn more about installation, see our [Installation](https://kuzudb.com/docusaurus/installation/) page.
+To learn more about installation, see our [Installation](https://docs.kuzudb.com/installation) page.
## Getting Started
-Refer to our [Getting Started](https://kuzudb.com/docusaurus/getting-started/) page for your first example.
-
-More information can be found at
-- [Data Import](https://kuzudb.com/docusaurus/data-import/)
-- [Cypher Reference](https://kuzudb.com/docusaurus/cypher/)
-- [Client APIs](https://kuzudb.com/docusaurus/client-apis/)
+Refer to our [Getting Started](https://docs.kuzudb.com/get-started/) page for your first example.
## Build from Source
-Instructions can be found at [Build Kùzu from Source](https://kuzudb.com/docusaurus/development/building-kuzu).
+You can build from source using the instructions provided in the [developer guide](https://docs.kuzudb.com/developer-guide).
## Contributing
We welcome contributions to Kùzu. If you are interested in contributing to Kùzu, please read our [Contributing Guide](CONTRIBUTING.md).
diff --git a/dataset/load-from-test/fixed_list/fixed_list_correct.csv b/dataset/load-from-test/array/array_correct.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/fixed_list_correct.csv
rename to dataset/load-from-test/array/array_correct.csv
diff --git a/dataset/load-from-test/fixed_list/fixed_list_int64.csv b/dataset/load-from-test/array/array_int64.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/fixed_list_int64.csv
rename to dataset/load-from-test/array/array_int64.csv
diff --git a/dataset/load-from-test/fixed_list/fixed_list_null.csv b/dataset/load-from-test/array/array_null.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/fixed_list_null.csv
rename to dataset/load-from-test/array/array_null.csv
diff --git a/dataset/load-from-test/fixed_list/fixed_list_null2.csv b/dataset/load-from-test/array/array_null2.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/fixed_list_null2.csv
rename to dataset/load-from-test/array/array_null2.csv
diff --git a/dataset/load-from-test/fixed_list/unsupported_type.csv b/dataset/load-from-test/array/array_string.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/unsupported_type.csv
rename to dataset/load-from-test/array/array_string.csv
diff --git a/dataset/load-from-test/fixed_list/unsupported_type2.csv b/dataset/load-from-test/array/array_uint8.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/unsupported_type2.csv
rename to dataset/load-from-test/array/array_uint8.csv
diff --git a/dataset/load-from-test/fixed_list/fixed_list_with_null.csv b/dataset/load-from-test/array/array_with_null.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/fixed_list_with_null.csv
rename to dataset/load-from-test/array/array_with_null.csv
diff --git a/dataset/load-from-test/fixed_list/error_conversion.csv b/dataset/load-from-test/array/error_conversion.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/error_conversion.csv
rename to dataset/load-from-test/array/error_conversion.csv
diff --git a/dataset/load-from-test/fixed_list/error_conversion2.csv b/dataset/load-from-test/array/error_conversion2.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/error_conversion2.csv
rename to dataset/load-from-test/array/error_conversion2.csv
diff --git a/dataset/load-from-test/fixed_list/incorrect_num.csv b/dataset/load-from-test/array/incorrect_num.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/incorrect_num.csv
rename to dataset/load-from-test/array/incorrect_num.csv
diff --git a/dataset/load-from-test/fixed_list/long_fixed_list.csv b/dataset/load-from-test/array/long_array.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/long_fixed_list.csv
rename to dataset/load-from-test/array/long_array.csv
diff --git a/dataset/load-from-test/fixed_list/long_fixed_list2.csv b/dataset/load-from-test/array/long_array2.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/long_fixed_list2.csv
rename to dataset/load-from-test/array/long_array2.csv
diff --git a/dataset/load-from-test/fixed_list/zero_ele.csv b/dataset/load-from-test/array/zero_ele.csv
similarity index 100%
rename from dataset/load-from-test/fixed_list/zero_ele.csv
rename to dataset/load-from-test/array/zero_ele.csv
diff --git a/dataset/load-from-test/var_list/bracket_error.csv b/dataset/load-from-test/list/bracket_error.csv
similarity index 100%
rename from dataset/load-from-test/var_list/bracket_error.csv
rename to dataset/load-from-test/list/bracket_error.csv
diff --git a/dataset/load-from-test/var_list/change_config.csv b/dataset/load-from-test/list/change_config.csv
similarity index 100%
rename from dataset/load-from-test/var_list/change_config.csv
rename to dataset/load-from-test/list/change_config.csv
diff --git a/dataset/load-from-test/var_list/change_config_error.csv b/dataset/load-from-test/list/change_config_error.csv
similarity index 100%
rename from dataset/load-from-test/var_list/change_config_error.csv
rename to dataset/load-from-test/list/change_config_error.csv
diff --git a/dataset/load-from-test/var_list/conversion_error.csv b/dataset/load-from-test/list/conversion_error.csv
similarity index 100%
rename from dataset/load-from-test/var_list/conversion_error.csv
rename to dataset/load-from-test/list/conversion_error.csv
diff --git a/dataset/load-from-test/var_list/delim_error.csv b/dataset/load-from-test/list/delim_error.csv
similarity index 100%
rename from dataset/load-from-test/var_list/delim_error.csv
rename to dataset/load-from-test/list/delim_error.csv
diff --git a/dataset/load-from-test/var_list/quote_error.csv b/dataset/load-from-test/list/quote_error.csv
similarity index 100%
rename from dataset/load-from-test/var_list/quote_error.csv
rename to dataset/load-from-test/list/quote_error.csv
diff --git a/dataset/load-from-test/var_list/should_pass.csv b/dataset/load-from-test/list/should_pass.csv
similarity index 100%
rename from dataset/load-from-test/var_list/should_pass.csv
rename to dataset/load-from-test/list/should_pass.csv
diff --git a/dataset/load-from-test/var_list/single_quote.csv b/dataset/load-from-test/list/single_quote.csv
similarity index 100%
rename from dataset/load-from-test/var_list/single_quote.csv
rename to dataset/load-from-test/list/single_quote.csv
diff --git a/dataset/load-from-test/var_list/single_quote2.csv b/dataset/load-from-test/list/single_quote2.csv
similarity index 100%
rename from dataset/load-from-test/var_list/single_quote2.csv
rename to dataset/load-from-test/list/single_quote2.csv
diff --git a/dataset/load-from-test/var_list/single_struct_bracket.csv b/dataset/load-from-test/list/single_struct_bracket.csv
similarity index 100%
rename from dataset/load-from-test/var_list/single_struct_bracket.csv
rename to dataset/load-from-test/list/single_struct_bracket.csv
diff --git a/dataset/load-from-test/struct/struct_with_fixed_list.csv b/dataset/load-from-test/struct/struct_with_array.csv
similarity index 100%
rename from dataset/load-from-test/struct/struct_with_fixed_list.csv
rename to dataset/load-from-test/struct/struct_with_array.csv
diff --git a/dataset/long-string-pk-tests/schema.cypher b/dataset/long-string-pk-tests/schema.cypher
index c2d3476cb82..014a774e006 100644
--- a/dataset/long-string-pk-tests/schema.cypher
+++ b/dataset/long-string-pk-tests/schema.cypher
@@ -1,2 +1,2 @@
-CREATE NODE TABLE Person(name STRING, spouse STRING, PRIMARY KEY (name))
+CREATE NODE TABLE Person (name STRING, spouse STRING, PRIMARY KEY (name))
create REL TABLE Knows (FROM Person TO Person);
diff --git a/dataset/tinysnb/copy.cypher b/dataset/tinysnb/copy.cypher
index f918361f5cb..1e852119b3c 100644
--- a/dataset/tinysnb/copy.cypher
+++ b/dataset/tinysnb/copy.cypher
@@ -1,7 +1,9 @@
+CALL ENABLE_MULTI_COPY=true
COPY person FROM "dataset/tinysnb/vPerson.csv" (HeaDER=true, deLim=',');
COPY organisation FROM "dataset/tinysnb/vOrganisation.csv";
COPY movies FROM "dataset/tinysnb/vMovies.csv";
COPY knows FROM "dataset/tinysnb/eKnows.csv";
+COPY knows FROM "dataset/tinysnb/eKnows_2.csv";
COPY studyAt FROM "dataset/tinysnb/eStudyAt.csv" (HeaDER=true);
COPY workAt FROM "dataset/tinysnb/eWorkAt.csv";
COPY meets FROM "dataset/tinysnb/eMeets.csv";
diff --git a/dataset/tinysnb/eKnows.csv b/dataset/tinysnb/eKnows.csv
index 471e63a0c2d..4d9278b3061 100644
--- a/dataset/tinysnb/eKnows.csv
+++ b/dataset/tinysnb/eKnows.csv
@@ -4,11 +4,3 @@
2,0,2021-06-30,1946-08-25 19:07:22,10 years 5 months 13 hours 24 us,"[2huh9y89fsfw23,23nsihufhw723]","{locations:['paris'], transfer: {day: 2000-01-01, amount: [20, 5000]}}",4,
2,3,1950-05-14,1946-08-25 19:07:22,23 minutes,"[fwehu9h9832wewew,23u9h989sdfsss]","{locations:['paris'], transfer: {day: 2011-05-01, amount: [2000, 5340]}}","cool stuff found",
2,5,1950-05-14,2012-12-11 20:07:22,20 years 30 days 48 hours,"[fwh9y81232uisuiehuf,ewnuihxy8dyf232]","{locations:['vancouver'], transfer: {day: 2020-01-01, amount: [120, 50]}}","matthew perry",
-3,0,2021-06-30,2002-07-31 11:42:53.12342,30 hours 40 days,"[fnioh8323aeweae34d,osd89e2ejshuih12]","{locations:['london','toronto'], transfer: {day: 2012-11-21, amount: [223, 5230]}}",10,
-3,2,1950-05-14,2007-02-12 12:11:42.123,28 minutes 30 milliseconds,"[fwh983-sdjisdfji,ioh89y32r2huir]","{locations:['paris','beijing'], transfer: {day: 2011-03-11, amount: [2323, 50]}}",1,
-3,5,2000-01-01,1998-10-02 13:09:22.423,300 milliseconds,"[psh989823oaaioe,nuiuah1nosndfisf]","{locations:[], transfer: {day: 1980-11-21, amount: [20, 5]}}",2,
-5,0,2021-06-30,1936-11-02 11:02:01,480us,"[fwewe]","{locations:['shanghai','nanjing'], transfer: {day: 1998-11-12, amount: [22, 53240]}}",15,
-5,2,1950-05-14,1982-11-11 13:12:05.123,23 minutes,"[fewh9182912e3,h9y8y89soidfsf,nuhudf78w78efw,hioshe0f9023sdsd]","{locations:['paris'], transfer: {day: 2000-01-01, amount: [20, 5000]}}","happy new year",
-5,3,2000-01-01,1999-04-21 15:12:11.42,48 hours 52 milliseconds,"[23h9sdslnfowhu2932,shuhf98922323sf]","{locations:['paris'], transfer: {day: 2000-01-01, amount: [20, 5000]}}",4,
-7,8,1905-12-12,2025-01-01 11:22:33.52,47 minutes 58 seconds,"[ahu2333333333333,12weeeeeeeeeeeeeeeeee]","{locations:['toronto','thisisalongcityname'], transfer: {day: 1930-11-22, amount: [18, 323]}}",8,
-7,9,1905-12-12,2020-03-01 12:11:41.6552,47 minutes 58 seconds,"[peweeeeeeeeeeeeeeeee,kowje9w0eweeeeeeeee]","{locations:['waterloo'], transfer: {day: 2000-01-01, amount: [1000, 5000]}}",10,
diff --git a/dataset/tinysnb/eKnows_2.csv b/dataset/tinysnb/eKnows_2.csv
new file mode 100644
index 00000000000..5b023bcf814
--- /dev/null
+++ b/dataset/tinysnb/eKnows_2.csv
@@ -0,0 +1,8 @@
+3,0,2021-06-30,2002-07-31 11:42:53.12342,30 hours 40 days,"[fnioh8323aeweae34d,osd89e2ejshuih12]","{locations:['london','toronto'], transfer: {day: 2012-11-21, amount: [223, 5230]}}",10,
+3,2,1950-05-14,2007-02-12 12:11:42.123,28 minutes 30 milliseconds,"[fwh983-sdjisdfji,ioh89y32r2huir]","{locations:['paris','beijing'], transfer: {day: 2011-03-11, amount: [2323, 50]}}",1,
+3,5,2000-01-01,1998-10-02 13:09:22.423,300 milliseconds,"[psh989823oaaioe,nuiuah1nosndfisf]","{locations:[], transfer: {day: 1980-11-21, amount: [20, 5]}}",2,
+5,0,2021-06-30,1936-11-02 11:02:01,480us,"[fwewe]","{locations:['shanghai','nanjing'], transfer: {day: 1998-11-12, amount: [22, 53240]}}",15,
+5,2,1950-05-14,1982-11-11 13:12:05.123,23 minutes,"[fewh9182912e3,h9y8y89soidfsf,nuhudf78w78efw,hioshe0f9023sdsd]","{locations:['paris'], transfer: {day: 2000-01-01, amount: [20, 5000]}}","happy new year",
+5,3,2000-01-01,1999-04-21 15:12:11.42,48 hours 52 milliseconds,"[23h9sdslnfowhu2932,shuhf98922323sf]","{locations:['paris'], transfer: {day: 2000-01-01, amount: [20, 5000]}}",4,
+7,8,1905-12-12,2025-01-01 11:22:33.52,47 minutes 58 seconds,"[ahu2333333333333,12weeeeeeeeeeeeeeeeee]","{locations:['toronto','thisisalongcityname'], transfer: {day: 1930-11-22, amount: [18, 323]}}",8,
+7,9,1905-12-12,2020-03-01 12:11:41.6552,47 minutes 58 seconds,"[peweeeeeeeeeeeeeeeee,kowje9w0eweeeeeeeee]","{locations:['waterloo'], transfer: {day: 2000-01-01, amount: [1000, 5000]}}",10,
diff --git a/examples/rust/Cargo.lock b/examples/rust/Cargo.lock
index f6e2e480e29..d47685b733c 100644
--- a/examples/rust/Cargo.lock
+++ b/examples/rust/Cargo.lock
@@ -404,7 +404,7 @@ dependencies = [
[[package]]
name = "kuzu"
-version = "0.3.1"
+version = "0.3.2"
dependencies = [
"arrow",
"cmake",
diff --git a/extension/CMakeLists.txt b/extension/CMakeLists.txt
index 162294bf26b..0f362842346 100644
--- a/extension/CMakeLists.txt
+++ b/extension/CMakeLists.txt
@@ -1,3 +1,29 @@
if("httpfs" IN_LIST BUILD_EXTENSIONS)
add_subdirectory(httpfs)
endif()
+
+if ("duckdb_scanner" IN_LIST BUILD_EXTENSIONS)
+ if(NOT __32BIT__)
+ # DuckDB does not officially support 32-bit builds, so we disable the
+ # extension for 32-bit builds
+ add_subdirectory(duckdb_scanner)
+ endif()
+endif()
+
+if ("postgres_scanner" IN_LIST BUILD_EXTENSIONS)
+ add_subdirectory(postgres_scanner)
+endif()
+
+if (${BUILD_EXTENSION_TESTS})
+ add_definitions(-DTEST_FILES_DIR="extension")
+ add_subdirectory(${CMAKE_SOURCE_DIR}/test/gtest ${CMAKE_CURRENT_BINARY_DIR}/test/gtest EXCLUDE_FROM_ALL)
+ # Make gtest available to subdirectories.
+ add_library(GTest::GTest INTERFACE IMPORTED)
+ target_link_libraries(GTest::GTest INTERFACE gtest_main)
+ target_link_libraries(GTest::GTest INTERFACE gmock_main)
+ enable_testing()
+ add_subdirectory(${CMAKE_SOURCE_DIR}/test/test_helper ${CMAKE_CURRENT_BINARY_DIR}/test/test_helper)
+ add_subdirectory(${CMAKE_SOURCE_DIR}/test/test_runner ${CMAKE_CURRENT_BINARY_DIR}/test/test_runner)
+ add_subdirectory(${CMAKE_SOURCE_DIR}/test/graph_test ${CMAKE_CURRENT_BINARY_DIR}/test/graph_test)
+ add_subdirectory(${CMAKE_SOURCE_DIR}/test/runner ${CMAKE_CURRENT_BINARY_DIR}/test/runner)
+endif()
diff --git a/extension/duckdb_scanner/CMakeLists.txt b/extension/duckdb_scanner/CMakeLists.txt
new file mode 100644
index 00000000000..0d1279f5996
--- /dev/null
+++ b/extension/duckdb_scanner/CMakeLists.txt
@@ -0,0 +1,56 @@
+find_package(DuckDB REQUIRED)
+
+include_directories(
+ ${PROJECT_SOURCE_DIR}/src/include
+ src/include
+ ${DuckDB_INCLUDE_DIRS})
+
+add_library(duckdb_scanner
+ SHARED
+ src/duckdb_scanner_extension.cpp
+ src/duckdb_storage.cpp
+ src/duckdb_scan.cpp
+ src/duckdb_type_converter.cpp
+ src/duckdb_catalog.cpp
+ src/duckdb_table_catalog_entry.cpp)
+
+set_target_properties(duckdb_scanner
+ PROPERTIES
+ ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build"
+ LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build"
+ RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build"
+)
+
+target_link_libraries(duckdb_scanner
+ PRIVATE
+ ${DuckDB_LIBRARIES})
+
+set_target_properties(duckdb_scanner PROPERTIES
+ OUTPUT_NAME duckdb_scanner
+ PREFIX "lib"
+ SUFFIX ".kuzu_extension"
+)
+
+if (WIN32)
+ # On windows, there is no dynamic lookup available, so it's not
+ # possible to generically look for symbols on library load. There are
+ # two main alternatives to statically linking kuzu, neither of which is
+ # appealing:
+ # 1. Link against the shared library. This works well assuming
+ # the DLL is locatable, but this assumption isn't valid for users
+ # of kuzu_shell.exe.
+ # 2. Link against the executable (kuzu_shell.exe). This is
+ # strange but works well for kuzu_shell.exe. However, it forces
+ # users who are embedding kuzu in their application to recompile
+ # the extension _and_ export the symbols for the extension to
+ # locate on load.
+ # We choose the simplest option. Windows isn't known
+ # for its small libraries anyways...
+ # Future work could make it possible to embed extension into kuzu,
+ # which would help fix this problem.
+ target_link_libraries(duckdb_scanner PRIVATE kuzu)
+endif()
+
+if (APPLE)
+ set_target_properties(duckdb_scanner PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
+endif ()
diff --git a/extension/duckdb_scanner/src/duckdb_catalog.cpp b/extension/duckdb_scanner/src/duckdb_catalog.cpp
new file mode 100644
index 00000000000..cf11fea6ac8
--- /dev/null
+++ b/extension/duckdb_scanner/src/duckdb_catalog.cpp
@@ -0,0 +1,134 @@
+#include "duckdb_catalog.h"
+
+#include "common/exception/binder.h"
+#include "duckdb_table_catalog_entry.h"
+#include "duckdb_type_converter.h"
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+void DuckDBCatalogContent::init(const std::string& dbPath, const std::string& catalogName,
+ main::ClientContext* context) {
+ auto [db, con] = getConnection(dbPath);
+ auto query = common::stringFormat(
+ "select table_name from information_schema.tables where table_catalog = '{}' and "
+ "table_schema = '{}';",
+ catalogName, getDefaultSchemaName());
+ auto result = con.Query(query);
+ std::unique_ptr resultChunk;
+ try {
+ resultChunk = result->Fetch();
+ } catch (std::exception& e) {
+ throw common::BinderException(e.what());
+ }
+ if (resultChunk->size() == 0) {
+ return;
+ }
+ common::ValueVector tableNamesVector{*common::LogicalType::STRING(),
+ context->getMemoryManager()};
+ duckdb_scanner::duckdb_conversion_func_t conversionFunc;
+ duckdb_scanner::getDuckDBVectorConversionFunc(common::PhysicalTypeID::STRING, conversionFunc);
+ conversionFunc(resultChunk->data[0], tableNamesVector, resultChunk->size());
+ for (auto i = 0u; i < resultChunk->size(); i++) {
+ auto tableName = tableNamesVector.getValue(i).getAsString();
+ createForeignTable(con, tableName, dbPath, catalogName);
+ }
+}
+
+static std::string getQuery(const binder::BoundCreateTableInfo& info) {
+ auto extraInfo = common::ku_dynamic_cast(info.extraInfo.get());
+ return common::stringFormat("SELECT * FROM {}.{}.{}", extraInfo->catalogName,
+ extraInfo->schemaName, info.tableName);
+}
+
+void DuckDBCatalogContent::createForeignTable(duckdb::Connection& con, const std::string& tableName,
+ const std::string& dbPath, const std::string& catalogName) {
+ auto tableID = assignNextTableID();
+ auto info = bindCreateTableInfo(con, tableName, dbPath, catalogName);
+ if (info == nullptr) {
+ return;
+ }
+ auto extraInfo = common::ku_dynamic_cast(info->extraInfo.get());
+ std::vector columnTypes;
+ std::vector columnNames;
+ for (auto& propertyInfo : extraInfo->propertyInfos) {
+ columnNames.push_back(propertyInfo.name);
+ columnTypes.push_back(propertyInfo.type);
+ }
+ DuckDBScanBindData bindData(getQuery(*info), std::move(columnTypes), std::move(columnNames),
+ std::bind(&DuckDBCatalogContent::getConnection, this, dbPath));
+ auto tableEntry = std::make_unique(info->tableName, tableID,
+ getScanFunction(std::move(bindData)));
+ for (auto& propertyInfo : extraInfo->propertyInfos) {
+ tableEntry->addProperty(propertyInfo.name, propertyInfo.type.copy());
+ }
+ tables->createEntry(std::move(tableEntry));
+}
+
+static bool getTableInfo(duckdb::Connection& con, const std::string& tableName,
+ const std::string& schemaName, const std::string& catalogName,
+ std::vector& columnTypes, std::vector& columnNames) {
+ auto query =
+ common::stringFormat("select data_type,column_name from information_schema.columns where "
+ "table_name = '{}' and table_schema = '{}' and table_catalog = '{}';",
+ tableName, schemaName, catalogName);
+ auto result = con.Query(query);
+ if (result->RowCount() == 0) {
+ return false;
+ }
+ columnTypes.reserve(result->RowCount());
+ columnNames.reserve(result->RowCount());
+ for (auto i = 0u; i < result->RowCount(); i++) {
+ try {
+ columnTypes.push_back(DuckDBTypeConverter::convertDuckDBType(
+ result->GetValue(0, i).GetValue()));
+ } catch (common::BinderException& e) {
+ return false;
+ }
+ columnNames.push_back(result->GetValue(1, i).GetValue());
+ }
+ return true;
+}
+
+bool DuckDBCatalogContent::bindPropertyInfos(duckdb::Connection& con, const std::string& tableName,
+ const std::string& catalogName, std::vector& propertyInfos) {
+ std::vector columnTypes;
+ std::vector columnNames;
+ if (!getTableInfo(con, tableName, getDefaultSchemaName(), catalogName, columnTypes,
+ columnNames)) {
+ return false;
+ }
+ for (auto i = 0u; i < columnNames.size(); i++) {
+ auto propertyInfo = binder::PropertyInfo(columnNames[i], columnTypes[i]);
+ propertyInfos.push_back(std::move(propertyInfo));
+ }
+ return true;
+}
+
+std::unique_ptr DuckDBCatalogContent::bindCreateTableInfo(
+ duckdb::Connection& con, const std::string& tableName, const std::string& dbPath,
+ const std::string& catalogName) {
+ std::vector propertyInfos;
+ if (!bindPropertyInfos(con, tableName, catalogName, propertyInfos)) {
+ return nullptr;
+ }
+ return std::make_unique(common::TableType::FOREIGN, tableName,
+ std::make_unique(dbPath, catalogName,
+ getDefaultSchemaName(), std::move(propertyInfos)));
+}
+
+std::string DuckDBCatalogContent::getDefaultSchemaName() const {
+ return "main";
+}
+
+std::pair DuckDBCatalogContent::getConnection(
+ const std::string& dbPath) const {
+ duckdb::DuckDB db(dbPath);
+ duckdb::Connection con(db);
+ return std::make_pair(std::move(db), std::move(con));
+}
+
+} // namespace duckdb_scanner
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/duckdb_scan.cpp b/extension/duckdb_scanner/src/duckdb_scan.cpp
new file mode 100644
index 00000000000..e1ac8454e00
--- /dev/null
+++ b/extension/duckdb_scanner/src/duckdb_scan.cpp
@@ -0,0 +1,233 @@
+#include "duckdb_scan.h"
+
+#include "common/types/types.h"
+#include "function/table/bind_input.h"
+
+using namespace kuzu::function;
+using namespace kuzu::common;
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+void getDuckDBVectorConversionFunc(PhysicalTypeID physicalTypeID,
+ duckdb_conversion_func_t& conversion_func);
+
+DuckDBScanBindData::DuckDBScanBindData(std::string query,
+ std::vector columnTypes, std::vector columnNames,
+ init_duckdb_conn_t initDuckDBConn)
+ : TableFuncBindData{std::move(columnTypes), std::move(columnNames)}, query{std::move(query)},
+ initDuckDBConn{std::move(initDuckDBConn)} {
+ conversionFunctions.resize(this->columnTypes.size());
+ for (auto i = 0u; i < this->columnTypes.size(); i++) {
+ getDuckDBVectorConversionFunc(this->columnTypes[i].getPhysicalType(),
+ conversionFunctions[i]);
+ }
+}
+
+std::unique_ptr DuckDBScanBindData::copy() const {
+ return std::make_unique(query, columnTypes, columnNames, initDuckDBConn);
+}
+
+DuckDBScanSharedState::DuckDBScanSharedState(std::unique_ptr queryResult)
+ : TableFuncSharedState(), queryResult{std::move(queryResult)} {}
+
+struct DuckDBScanFunction {
+ static constexpr char DUCKDB_SCAN_FUNC_NAME[] = "duckdb_scan";
+
+ static common::offset_t tableFunc(function::TableFuncInput& input,
+ function::TableFuncOutput& output);
+
+ static std::unique_ptr bindFunc(DuckDBScanBindData bindData,
+ main::ClientContext* /*context*/, function::TableFuncBindInput* input);
+
+ static std::unique_ptr initSharedState(
+ function::TableFunctionInitInput& input);
+
+ static std::unique_ptr initLocalState(
+ function::TableFunctionInitInput& input, function::TableFuncSharedState* state,
+ storage::MemoryManager*
+ /*mm*/);
+};
+
+std::unique_ptr DuckDBScanFunction::initSharedState(
+ function::TableFunctionInitInput& input) {
+ auto scanBindData = reinterpret_cast(input.bindData);
+ auto [db, conn] = scanBindData->initDuckDBConn();
+ auto result = conn.SendQuery(scanBindData->query);
+ if (result->HasError()) {
+ throw common::RuntimeException(
+ common::stringFormat("Failed to execute query: {} in duckdb.", result->GetError()));
+ }
+ return std::make_unique(std::move(result));
+}
+
+std::unique_ptr DuckDBScanFunction::initLocalState(
+ function::TableFunctionInitInput& /*input*/, function::TableFuncSharedState* /*state*/,
+ storage::MemoryManager* /*mm*/) {
+ return std::make_unique();
+}
+
+template
+void convertDuckDBVectorToVector(duckdb::Vector& duckDBVector, ValueVector& result,
+ uint64_t numValuesToCopy) {
+ auto duckDBData = (T*)duckDBVector.GetData();
+ auto validityMasks = duckdb::FlatVector::Validity(duckDBVector);
+ memcpy(result.getData(), duckDBData, numValuesToCopy * result.getNumBytesPerValue());
+ for (auto i = 0u; i < numValuesToCopy; i++) {
+ result.setNull(i, !validityMasks.RowIsValid(i));
+ }
+}
+
+template<>
+void convertDuckDBVectorToVector(duckdb::Vector& duckDBVector, ValueVector& result,
+ uint64_t numValuesToCopy);
+template<>
+void convertDuckDBVectorToVector(duckdb::Vector& duckDBVector, ValueVector& result,
+ uint64_t numValuesToCopy);
+
+template<>
+void convertDuckDBVectorToVector(duckdb::Vector& duckDBVector, ValueVector& result,
+ uint64_t numValuesToCopy) {
+ auto strs = reinterpret_cast(duckDBVector.GetData());
+ auto validityMasks = duckdb::FlatVector::Validity(duckDBVector);
+ for (auto i = 0u; i < numValuesToCopy; i++) {
+ result.setNull(i, !validityMasks.RowIsValid(i));
+ if (!result.isNull(i)) {
+ result.setValue(i, strs[i].GetString());
+ }
+ }
+}
+
+void getDuckDBVectorConversionFunc(PhysicalTypeID physicalTypeID,
+ duckdb_conversion_func_t& conversion_func) {
+ switch (physicalTypeID) {
+ case PhysicalTypeID::BOOL: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::INT128: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::INT64: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::INT32: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::INT16: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::INT8: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::UINT64: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::UINT32: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::UINT16: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::UINT8: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::DOUBLE: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::FLOAT: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::STRING: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::INTERVAL: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::LIST: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ case PhysicalTypeID::STRUCT: {
+ conversion_func = convertDuckDBVectorToVector;
+ } break;
+ default:
+ KU_UNREACHABLE;
+ }
+}
+
+template<>
+void convertDuckDBVectorToVector(duckdb::Vector& duckDBVector, ValueVector& result,
+ uint64_t numValuesToCopy) {
+ memcpy(result.getData(), duckDBVector.GetData(),
+ numValuesToCopy * result.getNumBytesPerValue());
+ auto numValuesInDataVec = 0;
+ auto listEntries = reinterpret_cast(duckDBVector.GetData());
+ auto validityMasks = duckdb::FlatVector::Validity(duckDBVector);
+ for (auto i = 0u; i < numValuesToCopy; i++) {
+ result.setNull(i, !validityMasks.RowIsValid(i));
+ if (!result.isNull(i)) {
+ numValuesInDataVec += listEntries[i].length;
+ }
+ }
+ ListVector::resizeDataVector(&result, numValuesInDataVec);
+ auto dataVec = ListVector::getDataVector(&result);
+ duckdb_conversion_func_t conversion_func;
+ getDuckDBVectorConversionFunc(dataVec->dataType.getPhysicalType(), conversion_func);
+ conversion_func(duckdb::ListVector::GetEntry(duckDBVector), *dataVec, numValuesInDataVec);
+}
+
+template<>
+void convertDuckDBVectorToVector(duckdb::Vector& duckDBVector, ValueVector& result,
+ uint64_t numValuesToCopy) {
+ auto& duckdbChildrenVectors = duckdb::StructVector::GetEntries(duckDBVector);
+ for (auto i = 0u; i < duckdbChildrenVectors.size(); i++) {
+ duckdb_conversion_func_t conversionFunc;
+ auto& duckdbChildVector = duckdbChildrenVectors[i];
+ auto fieldVector = StructVector::getFieldVector(&result, i);
+ getDuckDBVectorConversionFunc(fieldVector->dataType.getPhysicalType(), conversionFunc);
+ conversionFunc(*duckdbChildVector, *fieldVector, numValuesToCopy);
+ }
+}
+
+static void convertDuckDBResultToVector(duckdb::DataChunk& duckDBResult, DataChunk& result,
+ std::vector conversionFuncs) {
+ for (auto i = 0u; i < conversionFuncs.size(); i++) {
+ result.state->selVector->selectedSize = duckDBResult.size();
+ assert(duckDBResult.data[i].GetVectorType() == duckdb::VectorType::FLAT_VECTOR);
+ conversionFuncs[i](duckDBResult.data[i], *result.getValueVector(i),
+ result.state->selVector->selectedSize);
+ }
+}
+
+common::offset_t DuckDBScanFunction::tableFunc(function::TableFuncInput& input,
+ function::TableFuncOutput& output) {
+ auto duckdbScanSharedState = reinterpret_cast(input.sharedState);
+ auto duckdbScanBindData = reinterpret_cast(input.bindData);
+ std::unique_ptr result;
+ try {
+ result = duckdbScanSharedState->queryResult->Fetch();
+ } catch (std::exception& e) {
+ return 0;
+ }
+ if (result == nullptr) {
+ return 0;
+ }
+ convertDuckDBResultToVector(*result, output.dataChunk, duckdbScanBindData->conversionFunctions);
+ return output.dataChunk.state->selVector->selectedSize;
+}
+
+std::unique_ptr DuckDBScanFunction::bindFunc(
+ DuckDBScanBindData bindData, main::ClientContext* /*clientContext*/,
+ function::TableFuncBindInput* /*input*/) {
+ return bindData.copy();
+}
+
+TableFunction getScanFunction(DuckDBScanBindData bindData) {
+ return TableFunction(DuckDBScanFunction::DUCKDB_SCAN_FUNC_NAME, DuckDBScanFunction::tableFunc,
+ std::bind(DuckDBScanFunction::bindFunc, std::move(bindData), std::placeholders::_1,
+ std::placeholders::_2),
+ DuckDBScanFunction::initSharedState, DuckDBScanFunction::initLocalState,
+ std::vector{});
+}
+
+} // namespace duckdb_scanner
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/duckdb_scanner_extension.cpp b/extension/duckdb_scanner/src/duckdb_scanner_extension.cpp
new file mode 100644
index 00000000000..a215592aae6
--- /dev/null
+++ b/extension/duckdb_scanner/src/duckdb_scanner_extension.cpp
@@ -0,0 +1,28 @@
+#include "duckdb_scanner_extension.h"
+
+#include "duckdb_scan.h"
+#include "duckdb_storage.h"
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+void DuckDBScannerExtension::load(main::ClientContext* context) {
+ auto db = context->getDatabase();
+ db->registerStorageExtension("duckdb", std::make_unique());
+}
+
+} // namespace duckdb_scanner
+} // namespace kuzu
+
+extern "C" {
+// Because we link against the static library on windows, we implicitly inherit KUZU_STATIC_DEFINE,
+// which cancels out any exporting, so we can't use KUZU_API.
+#if defined(_WIN32)
+#define INIT_EXPORT __declspec(dllexport)
+#else
+#define INIT_EXPORT __attribute__((visibility("default")))
+#endif
+INIT_EXPORT void init(kuzu::main::ClientContext* context) {
+ kuzu::duckdb_scanner::DuckDBScannerExtension::load(context);
+}
+}
diff --git a/extension/duckdb_scanner/src/duckdb_storage.cpp b/extension/duckdb_scanner/src/duckdb_storage.cpp
new file mode 100644
index 00000000000..78dc791bc3c
--- /dev/null
+++ b/extension/duckdb_scanner/src/duckdb_storage.cpp
@@ -0,0 +1,33 @@
+#include "duckdb_storage.h"
+
+#include "catalog/catalog_entry/table_catalog_entry.h"
+#include "common/exception/binder.h"
+#include "duckdb_catalog.h"
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+std::unique_ptr attachDuckDB(std::string dbName, std::string dbPath,
+ main::ClientContext* clientContext) {
+ if (dbName == "") {
+ if (dbPath.find('.') != std::string::npos) {
+ auto fileNamePos = dbPath.find_last_of('/') + 1;
+ dbName = dbPath.substr(fileNamePos, dbPath.find_last_of('.') - fileNamePos);
+ } else {
+ dbName = dbPath;
+ }
+ }
+ auto duckdbCatalog = std::make_unique();
+ duckdbCatalog->init(dbPath, dbName, clientContext);
+ return std::make_unique(dbName, std::move(duckdbCatalog));
+}
+
+DuckDBStorageExtension::DuckDBStorageExtension() : StorageExtension{attachDuckDB} {}
+
+bool DuckDBStorageExtension::canHandleDB(std::string dbType) const {
+ common::StringUtils::toUpper(dbType);
+ return dbType == "DUCKDB";
+}
+
+} // namespace duckdb_scanner
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/duckdb_table_catalog_entry.cpp b/extension/duckdb_scanner/src/duckdb_table_catalog_entry.cpp
new file mode 100644
index 00000000000..fcc1c5c522a
--- /dev/null
+++ b/extension/duckdb_scanner/src/duckdb_table_catalog_entry.cpp
@@ -0,0 +1,20 @@
+#include "duckdb_table_catalog_entry.h"
+
+namespace kuzu {
+namespace catalog {
+
+DuckDBTableCatalogEntry::DuckDBTableCatalogEntry(std::string name, common::table_id_t tableID,
+ function::TableFunction scanFunction)
+ : TableCatalogEntry{CatalogEntryType::FOREIGN_TABLE_ENTRY, std::move(name), tableID},
+ scanFunction{std::move(scanFunction)} {}
+
+common::TableType DuckDBTableCatalogEntry::getTableType() const {
+ return common::TableType::FOREIGN;
+}
+
+std::unique_ptr DuckDBTableCatalogEntry::copy() const {
+ return std::make_unique(*this);
+}
+
+} // namespace catalog
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/duckdb_type_converter.cpp b/extension/duckdb_scanner/src/duckdb_type_converter.cpp
new file mode 100644
index 00000000000..f7212080145
--- /dev/null
+++ b/extension/duckdb_scanner/src/duckdb_type_converter.cpp
@@ -0,0 +1,134 @@
+#include "duckdb_type_converter.h"
+
+#include "common/exception/binder.h"
+#include "common/string_utils.h"
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+using namespace kuzu::common;
+
+common::LogicalType DuckDBTypeConverter::convertDuckDBType(std::string typeStr) {
+ typeStr = common::StringUtils::ltrim(typeStr);
+ typeStr = common::StringUtils::rtrim(typeStr);
+ if (typeStr == "BIGINT" || typeStr == "INT8" || typeStr == "LONG") {
+ return LogicalType{LogicalTypeID::INT64};
+ } else if (typeStr == "BLOB" || typeStr == "BYTEA" || typeStr == "BINARY" ||
+ typeStr == "VARBINARY") {
+ return LogicalType{LogicalTypeID::BLOB};
+ } else if (typeStr == "BOOLEAN" || typeStr == "BOOL" || typeStr == "LOGICAL") {
+ return LogicalType{LogicalTypeID::BOOL};
+ } else if (typeStr == "DATE") {
+ return LogicalType{LogicalTypeID::DATE};
+ } else if (typeStr == "DOUBLE" || typeStr == "FLOAT8") {
+ return LogicalType{LogicalTypeID::DOUBLE};
+ } else if (typeStr == "HUGEINT") {
+ return LogicalType{LogicalTypeID::INT128};
+ } else if (typeStr == "INTEGER" || typeStr == "INT4" || typeStr == "INT" ||
+ typeStr == "SIGNED") {
+ return LogicalType{LogicalTypeID::INT32};
+ } else if (typeStr == "INTERVAL") {
+ return LogicalType{LogicalTypeID::INTERVAL};
+ } else if (typeStr == "REAL" || typeStr == "FLOAT" || typeStr == "FLOAT4") {
+ return LogicalType{LogicalTypeID::FLOAT};
+ } else if (typeStr == "SMALLINT" || typeStr == "INT2" || typeStr == "SHORT") {
+ return LogicalType{LogicalTypeID::INT16};
+ } else if (typeStr == "TIMESTAMP" || typeStr == "DATETIME") {
+ return LogicalType{LogicalTypeID::TIMESTAMP};
+ } else if (typeStr == "TIMESTAMP_NS") {
+ return LogicalType{LogicalTypeID::TIMESTAMP_NS};
+ } else if (typeStr == "TIMESTAMP_MS") {
+ return LogicalType{LogicalTypeID::TIMESTAMP_MS};
+ } else if (typeStr == "TIMESTAMP_S") {
+ return LogicalType{LogicalTypeID::TIMESTAMP_SEC};
+ } else if (typeStr == "TIMESTAMP WITH TIME ZONE" || typeStr == "TIMESTAMPTZ") {
+ return LogicalType{LogicalTypeID::TIMESTAMP_TZ};
+ } else if (typeStr == "TINYINT" || typeStr == "INT1") {
+ return LogicalType{LogicalTypeID::INT8};
+ } else if (typeStr == "UBIGINT") {
+ return LogicalType{LogicalTypeID::UINT64};
+ } else if (typeStr == "UINTEGER") {
+ return LogicalType{LogicalTypeID::UINT32};
+ } else if (typeStr == "USMALLINT") {
+ return LogicalType{LogicalTypeID::UINT16};
+ } else if (typeStr == "UTINYINT") {
+ return LogicalType{LogicalTypeID::UINT8};
+ } else if (typeStr == "UUID") {
+ return LogicalType{LogicalTypeID::UUID};
+ } else if (typeStr == "VARCHAR" || typeStr == "CHAR" || typeStr == "BPCHAR" ||
+ typeStr == "TEXT" || typeStr == "STRING") {
+ return LogicalType{LogicalTypeID::STRING};
+ } else if (typeStr.ends_with("[]")) {
+ auto innerType = convertDuckDBType(typeStr.substr(0, typeStr.size() - 2));
+ return *LogicalType::LIST(innerType.copy());
+ } else if (typeStr.starts_with("STRUCT")) {
+ return *LogicalType::STRUCT(parseStructTypeInfo(typeStr));
+ } else if (typeStr.starts_with("UNION")) {
+ auto unionFields = parseStructTypeInfo(typeStr);
+ auto unionTagField = StructField(UnionType::TAG_FIELD_NAME,
+ std::make_unique(UnionType::TAG_FIELD_TYPE));
+ unionFields.insert(unionFields.begin(), std::move(unionTagField));
+ return *LogicalType::UNION(std::move(unionFields));
+ } else if (typeStr.starts_with("MAP")) {
+ auto leftBracketPos = typeStr.find('(');
+ auto rightBracketPos = typeStr.find_last_of(')');
+ auto mapTypeStr = typeStr.substr(leftBracketPos + 1, rightBracketPos - leftBracketPos - 1);
+ auto keyValueTypes = StringUtils::splitComma(mapTypeStr);
+ return *LogicalType::MAP(convertDuckDBType(keyValueTypes[0]),
+ convertDuckDBType(keyValueTypes[1]));
+ }
+ throw BinderException{stringFormat("Unsupported duckdb type: {}.", typeStr)};
+}
+
+std::vector DuckDBTypeConverter::parseStructFields(const std::string& structTypeStr) {
+ std::vector structFieldsStr;
+ auto startPos = 0u;
+ auto curPos = 0u;
+ auto numOpenBrackets = 0u;
+ while (curPos < structTypeStr.length()) {
+ switch (structTypeStr[curPos]) {
+ case '(': {
+ numOpenBrackets++;
+ } break;
+ case ')': {
+ numOpenBrackets--;
+ } break;
+ case ',': {
+ if (numOpenBrackets == 0) {
+ structFieldsStr.push_back(
+ StringUtils::ltrim(structTypeStr.substr(startPos, curPos - startPos)));
+ startPos = curPos + 1;
+ }
+ } break;
+ default: {
+ // Normal character, continue.
+ }
+ }
+ curPos++;
+ }
+ structFieldsStr.push_back(
+ StringUtils::ltrim(structTypeStr.substr(startPos, curPos - startPos)));
+ return structFieldsStr;
+}
+
+std::vector DuckDBTypeConverter::parseStructTypeInfo(
+ const std::string& structTypeStr) {
+ auto leftBracketPos = structTypeStr.find('(');
+ auto rightBracketPos = structTypeStr.find_last_of(')');
+ // Remove the leading and trailing brackets.
+ auto structFieldsStr =
+ structTypeStr.substr(leftBracketPos + 1, rightBracketPos - leftBracketPos - 1);
+ std::vector structFields;
+ auto structFieldStrs = parseStructFields(structFieldsStr);
+ for (auto& structFieldStr : structFieldStrs) {
+ auto pos = structFieldStr.find(' ');
+ auto fieldName = structFieldStr.substr(0, pos);
+ auto fieldTypeString = structFieldStr.substr(pos + 1);
+ structFields.emplace_back(fieldName,
+ std::make_unique(DuckDBTypeConverter::convertDuckDBType(fieldTypeString)));
+ }
+ return structFields;
+}
+
+} // namespace duckdb_scanner
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/include/duckdb_catalog.h b/extension/duckdb_scanner/src/include/duckdb_catalog.h
new file mode 100644
index 00000000000..2ecb068003c
--- /dev/null
+++ b/extension/duckdb_scanner/src/include/duckdb_catalog.h
@@ -0,0 +1,55 @@
+#pragma once
+
+#include "binder/ddl/bound_create_table_info.h"
+#include "catalog/catalog_content.h"
+#include "duckdb_scan.h"
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+struct BoundExtraCreateDuckDBTableInfo : public binder::BoundExtraCreateTableInfo {
+ std::string dbPath;
+ std::string catalogName;
+ std::string schemaName;
+
+ BoundExtraCreateDuckDBTableInfo(std::string dbPath, std::string catalogName,
+ std::string schemaName, std::vector propertyInfos)
+ : BoundExtraCreateTableInfo{std::move(propertyInfos)}, dbPath{std::move(dbPath)},
+ catalogName{std::move(catalogName)}, schemaName{std::move(schemaName)} {}
+ BoundExtraCreateDuckDBTableInfo(const BoundExtraCreateDuckDBTableInfo& other)
+ : BoundExtraCreateTableInfo{copyVector(other.propertyInfos)}, dbPath{other.dbPath},
+ catalogName{other.catalogName}, schemaName{other.schemaName} {}
+
+ std::unique_ptr copy() const override {
+ return std::make_unique(*this);
+ }
+};
+
+class DuckDBCatalogContent : public catalog::CatalogContent {
+public:
+ DuckDBCatalogContent() : catalog::CatalogContent{nullptr /* vfs */} {}
+
+ virtual void init(const std::string& dbPath, const std::string& catalogName,
+ main::ClientContext* context);
+
+protected:
+ bool bindPropertyInfos(duckdb::Connection& con, const std::string& tableName,
+ const std::string& catalogName, std::vector& propertyInfos);
+
+private:
+ virtual std::unique_ptr bindCreateTableInfo(
+ duckdb::Connection& con, const std::string& tableName, const std::string& dbPath,
+ const std::string& catalogName);
+
+ virtual std::string getDefaultSchemaName() const;
+
+ virtual std::pair getConnection(
+ const std::string& dbPath) const;
+
+private:
+ void createForeignTable(duckdb::Connection& con, const std::string& tableName,
+ const std::string& dbPath, const std::string& catalogName);
+};
+
+} // namespace duckdb_scanner
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/include/duckdb_scan.h b/extension/duckdb_scanner/src/include/duckdb_scan.h
new file mode 100644
index 00000000000..28c2c600247
--- /dev/null
+++ b/extension/duckdb_scanner/src/include/duckdb_scan.h
@@ -0,0 +1,46 @@
+#pragma once
+
+#include "common/vector/value_vector.h"
+#include "function/scalar_function.h"
+#include "function/table/bind_data.h"
+#include "function/table_functions.h"
+
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wunused-parameter"
+// Supress warnings from duckdb.hpp
+#undef ARROW_FLAG_DICTIONARY_ORDERED
+#include "common/types/types.h"
+#include "duckdb.hpp"
+#pragma GCC diagnostic pop
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+using duckdb_conversion_func_t = std::function;
+using init_duckdb_conn_t = std::function()>;
+
+struct DuckDBScanBindData : public function::TableFuncBindData {
+ explicit DuckDBScanBindData(std::string query, std::vector columnTypes,
+ std::vector columnNames, init_duckdb_conn_t initDuckDBConn);
+
+ std::unique_ptr copy() const override;
+
+ std::string query;
+ std::vector conversionFunctions;
+ init_duckdb_conn_t initDuckDBConn;
+};
+
+struct DuckDBScanSharedState : public function::TableFuncSharedState {
+ explicit DuckDBScanSharedState(std::unique_ptr queryResult);
+
+ std::unique_ptr queryResult;
+};
+
+void getDuckDBVectorConversionFunc(common::PhysicalTypeID physicalTypeID,
+ duckdb_conversion_func_t& conversion_func);
+
+function::TableFunction getScanFunction(DuckDBScanBindData bindData);
+
+} // namespace duckdb_scanner
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/include/duckdb_scanner_extension.h b/extension/duckdb_scanner/src/include/duckdb_scanner_extension.h
new file mode 100644
index 00000000000..2911768f0cc
--- /dev/null
+++ b/extension/duckdb_scanner/src/include/duckdb_scanner_extension.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include "extension/extension.h"
+#include "main/database.h"
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+class DuckDBScannerExtension final : public extension::Extension {
+public:
+ static void load(main::ClientContext* context);
+};
+
+} // namespace duckdb_scanner
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/include/duckdb_storage.h b/extension/duckdb_scanner/src/include/duckdb_storage.h
new file mode 100644
index 00000000000..aed030ba93f
--- /dev/null
+++ b/extension/duckdb_scanner/src/include/duckdb_storage.h
@@ -0,0 +1,17 @@
+#pragma once
+
+#include "common/string_utils.h"
+#include "storage/storage_extension.h"
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+class DuckDBStorageExtension final : public storage::StorageExtension {
+public:
+ DuckDBStorageExtension();
+
+ bool canHandleDB(std::string dbType) const override;
+};
+
+} // namespace duckdb_scanner
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/include/duckdb_table_catalog_entry.h b/extension/duckdb_scanner/src/include/duckdb_table_catalog_entry.h
new file mode 100644
index 00000000000..8ba4b5bdb92
--- /dev/null
+++ b/extension/duckdb_scanner/src/include/duckdb_table_catalog_entry.h
@@ -0,0 +1,33 @@
+#pragma once
+
+#include "catalog/catalog_entry/table_catalog_entry.h"
+#include "function/table_functions.h"
+
+namespace kuzu {
+namespace catalog {
+
+class DuckDBTableCatalogEntry final : public TableCatalogEntry {
+public:
+ //===--------------------------------------------------------------------===//
+ // constructors
+ //===--------------------------------------------------------------------===//
+ DuckDBTableCatalogEntry(std::string name, common::table_id_t tableID,
+ function::TableFunction scanFunction);
+
+ //===--------------------------------------------------------------------===//
+ // getter & setter
+ //===--------------------------------------------------------------------===//
+ common::TableType getTableType() const override;
+ function::TableFunction getScanFunction() override { return scanFunction; }
+
+ //===--------------------------------------------------------------------===//
+ // serialization & deserialization
+ //===--------------------------------------------------------------------===//
+ std::unique_ptr copy() const override;
+
+private:
+ function::TableFunction scanFunction;
+};
+
+} // namespace catalog
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/src/include/duckdb_type_converter.h b/extension/duckdb_scanner/src/include/duckdb_type_converter.h
new file mode 100644
index 00000000000..5c4de9e9925
--- /dev/null
+++ b/extension/duckdb_scanner/src/include/duckdb_type_converter.h
@@ -0,0 +1,18 @@
+#pragma once
+
+#include "common/types/types.h"
+
+namespace kuzu {
+namespace duckdb_scanner {
+
+class DuckDBTypeConverter {
+public:
+ static common::LogicalType convertDuckDBType(std::string typeStr);
+
+private:
+ static std::vector parseStructFields(const std::string& structTypeStr);
+ static std::vector parseStructTypeInfo(const std::string& structTypeStr);
+};
+
+} // namespace duckdb_scanner
+} // namespace kuzu
diff --git a/extension/duckdb_scanner/test/duckdb_database/other.db b/extension/duckdb_scanner/test/duckdb_database/other.db
new file mode 100644
index 00000000000..7cbc6ecccf9
Binary files /dev/null and b/extension/duckdb_scanner/test/duckdb_database/other.db differ
diff --git a/extension/duckdb_scanner/test/duckdb_database/tinysnb.db b/extension/duckdb_scanner/test/duckdb_database/tinysnb.db
new file mode 100644
index 00000000000..5a14eee06ad
Binary files /dev/null and b/extension/duckdb_scanner/test/duckdb_database/tinysnb.db differ
diff --git a/extension/duckdb_scanner/test/test_files/duckdb_scanner.test b/extension/duckdb_scanner/test/test_files/duckdb_scanner.test
new file mode 100644
index 00000000000..aa5a891cd63
--- /dev/null
+++ b/extension/duckdb_scanner/test/test_files/duckdb_scanner.test
@@ -0,0 +1,64 @@
+-GROUP DuckDBScanner
+-DATASET CSV empty
+
+--
+
+-CASE ScanDuckDBTable
+-STATEMENT load extension "${KUZU_ROOT_DIRECTORY}/extension/duckdb_scanner/build/libduckdb_scanner.kuzu_extension"
+---- ok
+-STATEMENT ATTACH '${KUZU_ROOT_DIRECTORY}/extension/duckdb_scanner/test/duckdb_database/tinysnb.db' as tinysnb (dbtype 'duckdb');
+---- ok
+-STATEMENT LOAD FROM tinysnb_person RETURN *;
+---- 8
+0|Alice|1|True|False|35|5.000000|1900-01-01|2011-08-20 11:25:30|3 years 2 days 13:02:00|[10,5]|[Aida]|[[10,8],[6,7,8]]|1.731000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11
+10|Hubert Blaine Wolfeschlegelsteinhausenbergerdorff|2|False|True|83|4.900000|1990-11-27|2023-02-21 13:25:30|3 years 2 days 13:02:00|[10,11,12,3,4,5,6,7]|[Ad,De,Hi,Kye,Orlan]|[[7],[10],[6,7]]|1.323000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a18
+2|Bob|2|True|False|30|5.100000|1900-01-01|2008-11-03 15:25:30.000526|10 years 5 months 13:00:00.000024|[12,8]|[Bobby]|[[8,9],[9,10]]|0.990000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a12
+3|Carol|1|False|True|45|5.000000|1940-06-22|1911-08-20 02:32:21|48:24:11|[4,5]|[Carmen,Fred]|[[8,10]]|1.000000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a13
+5|Dan|2|False|True|20|4.800000|1950-07-23|2031-11-30 12:25:30|10 years 5 months 13:00:00.000024|[1,9]|[Wolfeschlegelstein,Daniel]|[[7,4],[8,8],[9]]|1.300000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14
+7|Elizabeth|1|False|True|20|4.700000|1980-10-26|1976-12-23 11:21:42|48:24:11|[2]|[Ein]|[[6],[7],[8]]|1.463000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a15
+8|Farooq|2|True|False|25|4.500000|1980-10-26|1972-07-31 13:22:30.678559|00:18:00.024|[3,4,5,6,7]|[Fesdwe]|[[8]]|1.510000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a16
+9|Greg|2|False|False|40|4.900000|1980-10-26|1976-12-23 11:21:42|10 years 5 months 13:00:00.000024|[1]|[Grad]|[[10]]|1.600000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17
+-STATEMENT LOAD FROM tinysnb_organisation RETURN *;
+---- 3
+1|ABFsUni|325|3.700000|-2|10 years 5 months 13 hours 24 us|3 years 5 days|1.000000|{revenue: 138, "location": ['toronto','montr,eal'], stock: {price: [96,56], volume: 1000}}|3.12
+4|CsWork|934|4.100000|-100|2 years 4 days 10 hours|26 years 52 days 48:00:00|0.780000|{revenue: 152, "location": ["vanco,uver north area"], stock: {price: [15,78,671], volume: 432}}|abcd
+6|DEsWork|824|4.100000|7|2 years 4 hours 22 us 34 minutes|82:00:00.1|0.520000|{revenue: 558, "location": ['very long city name','new york'], stock: {price: [22], volume: 99}}|2023-12-15
+-STATEMENT LOAD FROM tinysnb_movies RETURN *;
+---- 3
+Roma|298|the movie is very interesting and funny|{rating: 1223.000000, stars: 100, "views": 10003, "release": 2011-02-11 16:44:22, release_ns: 2011-02-11 16:44:22.123456, release_ms: 2011-02-11 16:44:22.123, release_sec: 2011-02-11 16:44:22, release_tz: 2011-02-11 16:44:22.123456+00, film: 2013-02-22, u8: 1, u16: 15, u32: 200, u64: 4, hugedata: -15}|pure ascii characters|{}
+Sóló cón tu párejâ|126| this is a very very good movie|{rating: 5.300000, stars: 2, "views": 152, "release": 2011-08-20 11:25:30, release_ns: 2011-08-20 11:25:30.123456, release_ms: 2011-08-20 11:25:30.123, release_sec: 2011-08-20 11:25:30, release_tz: 2011-08-20 11:25:30.123456+00, film: 2012-05-11, u8: 220, u16: 20, u32: 1, u64: 180, hugedata: 1844674407370955161811111111}|\xAA\xABinteresting\x0B|{audience1=52, audience53=42}
+The 😂😃🧘🏻♂️🌍🌦️🍞🚗 movie|2544| the movie is very very good|{rating: 7.000000, stars: 10, "views": 982, "release": 2018-11-13 13:33:11, release_ns: 2018-11-13 13:33:11.123456, release_ms: 2018-11-13 13:33:11.123, release_sec: 2018-11-13 13:33:11, release_tz: 2018-11-13 13:33:11.123456+00, film: 2014-09-12, u8: 12, u16: 120, u32: 55, u64: 1, hugedata: -1844674407370955161511}|\xAB\xCD|{audience1=33}
+-STATEMENT LOAD FROM tinysnb_tableOfTypes RETURN count(*);
+---- 1
+49999
+-STATEMENT LOAD FROM tinysnb_tableOfTypes WHERE id = 49992 or id = 28532 RETURN *;
+---- 2
+28532|74|72.472423|True|1977-08-16|TKn|[94,92]|[AUSrJTUWVOESDor,ODOS6RfqMhsFO9aFUa,ziauQj]|[[123,55,181],[32]]|{ID: 666, "name": DiqSQ5u5UhS8aZi}
+49992|50|31.582059|False|2056-05-02||[62,24,94]|[LpQO8OT3x45a]|[[268,281,166],[144,16,126,208,298],[22,287]]|{ID: 936, "name": sGPSafxMAhKiP}
+-STATEMENT LOAD FROM tinysnb_person1 RETURN *;
+---- error
+Catalog exception: Table: person1 does not exist.
+-STATEMENT DETACH tinysnb;
+---- ok
+-STATEMENT LOAD FROM tinysnb_person RETURN *;
+---- error
+Binder exception: No database named tinysnb has been attached.
+-LOG AttachMultipleDuckDB
+-STATEMENT ATTACH '${KUZU_ROOT_DIRECTORY}/extension/duckdb_scanner/test/duckdb_database/tinysnb.db' (dbtype 'duckdb');
+---- ok
+-STATEMENT ATTACH '${KUZU_ROOT_DIRECTORY}/extension/duckdb_scanner/test/duckdb_database/other.db' as other (dbtype 'duckdb');
+---- ok
+-STATEMENT LOAD FROM other_person RETURN *;
+---- 4
+1
+2
+3
+5
+-STATEMENT LOAD FROM tinysnb_person RETURN count(*);
+---- 1
+8
+
+-CASE InvalidDuckDBDatabase
+-STATEMENT LOAD FROM tinysnb1_person RETURN *;
+---- error
+Binder exception: No database named tinysnb1 has been attached.
diff --git a/extension/httpfs/CMakeLists.txt b/extension/httpfs/CMakeLists.txt
index 813386e6a70..8c6b0f1d4f5 100644
--- a/extension/httpfs/CMakeLists.txt
+++ b/extension/httpfs/CMakeLists.txt
@@ -31,6 +31,7 @@ set_target_properties(httpfs PROPERTIES
PREFIX "lib"
SUFFIX ".kuzu_extension"
)
+
set_target_properties(httpfs
PROPERTIES
ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build"
diff --git a/extension/httpfs/src/crypto.cpp b/extension/httpfs/src/crypto.cpp
index 4d345170e80..637f79754ec 100644
--- a/extension/httpfs/src/crypto.cpp
+++ b/extension/httpfs/src/crypto.cpp
@@ -26,10 +26,10 @@ void hmac256(const std::string& message, const char* secret, size_t secretLen, h
}
if (mbedtls_md_setup(&hmacCtx, mdType, 1) ||
- mbedtls_md_hmac_starts(
- &hmacCtx, reinterpret_cast(secret), secretLen) ||
- mbedtls_md_hmac_update(
- &hmacCtx, reinterpret_cast(message.c_str()), message.length()) ||
+ mbedtls_md_hmac_starts(&hmacCtx, reinterpret_cast(secret),
+ secretLen) ||
+ mbedtls_md_hmac_update(&hmacCtx, reinterpret_cast(message.c_str()),
+ message.length()) ||
mbedtls_md_hmac_finish(&hmacCtx, reinterpret_cast(out))) {
throw common::RuntimeException("HMAC256 Error");
}
diff --git a/extension/httpfs/src/httpfs.cpp b/extension/httpfs/src/httpfs.cpp
index 3da57dedf63..e0e1d9ac000 100644
--- a/extension/httpfs/src/httpfs.cpp
+++ b/extension/httpfs/src/httpfs.cpp
@@ -16,8 +16,8 @@ HTTPResponse::HTTPResponse(httplib::Response& res, const std::string& url)
}
HTTPFileInfo::HTTPFileInfo(std::string path, FileSystem* fileSystem, int flags)
- : FileInfo{std::move(path), fileSystem}, flags{flags}, length{0},
- availableBuffer{0}, bufferIdx{0}, fileOffset{0}, bufferStartPos{0}, bufferEndPos{0} {}
+ : FileInfo{std::move(path), fileSystem}, flags{flags}, length{0}, availableBuffer{0},
+ bufferIdx{0}, fileOffset{0}, bufferStartPos{0}, bufferEndPos{0} {}
void HTTPFileInfo::initialize() {
initializeClient();
@@ -40,8 +40,8 @@ void HTTPFileInfo::initialize() {
hfs->getRangeRequest(this, this->path, {}, 0, nullptr /* buffer */, 2);
if (rangeRequest->code != 206) {
// LCOV_EXCL_START
- throw IOException(stringFormat(
- "Unable to connect to URL \"{}\": {} ({})", this->path, res->code, res->error));
+ throw IOException(stringFormat("Unable to connect to URL \"{}\": {} ({})",
+ this->path, res->code, res->error));
// LCOV_EXCL_STOP
}
auto rangeFound = rangeRequest->headers["Content-Range"].find("/");
@@ -92,13 +92,13 @@ void HTTPFileInfo::initialize() {
}
} catch (std::invalid_argument& e) {
// LCOV_EXCL_START
- throw IOException(stringFormat(
- "Invalid Content-Length header received: {}", res->headers["Content-Length"]));
+ throw IOException(stringFormat("Invalid Content-Length header received: {}",
+ res->headers["Content-Length"]));
// LCOV_EXCL_STOP
} catch (std::out_of_range& e) {
// LCOV_EXCL_START
- throw IOException(stringFormat(
- "Invalid Content-Length header received: {}", res->headers["Content-Length"]));
+ throw IOException(stringFormat("Invalid Content-Length header received: {}",
+ res->headers["Content-Length"]));
// LCOV_EXCL_STOP
}
}
@@ -116,8 +116,8 @@ std::unique_ptr HTTPFileSystem::openFile(const std::string& pa
return std::move(httpFileInfo);
}
-std::vector HTTPFileSystem::glob(
- main::ClientContext* /*context*/, const std::string& path) const {
+std::vector HTTPFileSystem::glob(main::ClientContext* /*context*/,
+ const std::string& path) const {
// Glob is not supported on HTTPFS, simply return the path itself.
return {path};
}
@@ -126,8 +126,8 @@ bool HTTPFileSystem::canHandleFile(const std::string& path) const {
return path.rfind("https://", 0) == 0 || path.rfind("http://", 0) == 0;
}
-void HTTPFileSystem::readFromFile(
- common::FileInfo* fileInfo, void* buffer, uint64_t numBytes, uint64_t position) const {
+void HTTPFileSystem::readFromFile(common::FileInfo* fileInfo, void* buffer, uint64_t numBytes,
+ uint64_t position) const {
auto httpFileInfo = ku_dynamic_cast(fileInfo);
auto numBytesToRead = numBytes;
auto bufferOffset = 0;
@@ -158,8 +158,8 @@ void HTTPFileSystem::readFromFile(
}
if (numBytesToRead > 0 && httpFileInfo->availableBuffer == 0) {
- auto newBufferAvailableSize = std::min(
- httpFileInfo->READ_BUFFER_LEN, httpFileInfo->length - httpFileInfo->fileOffset);
+ auto newBufferAvailableSize = std::min(httpFileInfo->READ_BUFFER_LEN,
+ httpFileInfo->length - httpFileInfo->fileOffset);
// Bypass buffer if we read more than buffer size.
if (numBytesToRead > newBufferAvailableSize) {
@@ -261,7 +261,9 @@ std::unique_ptr HTTPFileSystem::runRequestWithRetry(
status = res->status;
response = res.value();
}
- } catch (IOException& e) { exception = std::current_exception(); }
+ } catch (IOException& e) {
+ exception = std::current_exception();
+ }
if (err == httplib::Error::Success) {
switch (status) {
@@ -292,8 +294,8 @@ std::unique_ptr HTTPFileSystem::runRequestWithRetry(
std::rethrow_exception(exception);
} else if (err == httplib::Error::Success) {
// LCOV_EXCL_START
- throw IOException(stringFormat(
- "Request returned HTTP {} for HTTP {} to '{}'", status, method, url));
+ throw IOException(stringFormat("Request returned HTTP {} for HTTP {} to '{}'",
+ status, method, url));
// LCOV_EXCL_STOP
} else {
// LCOV_EXCL_START
@@ -305,8 +307,8 @@ std::unique_ptr HTTPFileSystem::runRequestWithRetry(
}
}
-std::unique_ptr HTTPFileSystem::headRequest(
- FileInfo* fileInfo, const std::string& url, HeaderMap headerMap) const {
+std::unique_ptr HTTPFileSystem::headRequest(FileInfo* fileInfo,
+ const std::string& url, HeaderMap headerMap) const {
auto httpFileInfo = ku_dynamic_cast(fileInfo);
auto parsedURL = parseUrl(url);
auto host = parsedURL.first;
@@ -330,8 +332,8 @@ std::unique_ptr HTTPFileSystem::getRangeRequest(FileInfo* fileInfo
auto hostPath = parsedURL.second;
auto headers = getHTTPHeaders(headerMap);
- headers->insert(std::make_pair(
- "Range", stringFormat("bytes={}-{}", fileOffset, fileOffset + bufferLen - 1)));
+ headers->insert(std::make_pair("Range",
+ stringFormat("bytes={}-{}", fileOffset, fileOffset + bufferLen - 1)));
uint64_t bufferOffset = 0;
diff --git a/extension/httpfs/src/httpfs_extension.cpp b/extension/httpfs/src/httpfs_extension.cpp
index 61b358d777f..1814a55a815 100644
--- a/extension/httpfs/src/httpfs_extension.cpp
+++ b/extension/httpfs/src/httpfs_extension.cpp
@@ -12,18 +12,18 @@ void HttpfsExtension::load(main::ClientContext* context) {
db->registerFileSystem(std::make_unique());
db->registerFileSystem(std::make_unique());
db->addExtensionOption("s3_access_key_id", common::LogicalTypeID::STRING, common::Value{""});
- db->addExtensionOption(
- "s3_secret_access_key", common::LogicalTypeID::STRING, common::Value{""});
- db->addExtensionOption(
- "s3_endpoint", common::LogicalTypeID::STRING, common::Value{"s3.amazonaws.com"});
+ db->addExtensionOption("s3_secret_access_key", common::LogicalTypeID::STRING,
+ common::Value{""});
+ db->addExtensionOption("s3_endpoint", common::LogicalTypeID::STRING,
+ common::Value{"s3.amazonaws.com"});
db->addExtensionOption("s3_url_style", common::LogicalTypeID::STRING, common::Value{"vhost"});
db->addExtensionOption("s3_region", common::LogicalTypeID::STRING, common::Value{"us-east-1"});
db->addExtensionOption("s3_uploader_max_num_parts_per_file", common::LogicalTypeID::INT64,
common::Value{(int64_t)800000000000});
- db->addExtensionOption(
- "s3_uploader_max_filesize", common::LogicalTypeID::INT64, common::Value{(int64_t)10000});
- db->addExtensionOption(
- "s3_uploader_threads_limit", common::LogicalTypeID::INT64, common::Value{(int64_t)50});
+ db->addExtensionOption("s3_uploader_max_filesize", common::LogicalTypeID::INT64,
+ common::Value{(int64_t)10000});
+ db->addExtensionOption("s3_uploader_threads_limit", common::LogicalTypeID::INT64,
+ common::Value{(int64_t)50});
AWSEnvironmentCredentialsProvider::setOptionValue(context);
}
diff --git a/extension/httpfs/src/include/httpfs.h b/extension/httpfs/src/include/httpfs.h
index 5aaa6fc3507..47d377c8db3 100644
--- a/extension/httpfs/src/include/httpfs.h
+++ b/extension/httpfs/src/include/httpfs.h
@@ -63,8 +63,8 @@ class HTTPFileSystem : public common::FileSystem {
main::ClientContext* context = nullptr,
common::FileLockType lock_type = common::FileLockType::NO_LOCK) override;
- std::vector glob(
- main::ClientContext* context, const std::string& path) const override;
+ std::vector glob(main::ClientContext* context,
+ const std::string& path) const override;
bool canHandleFile(const std::string& path) const override;
@@ -88,8 +88,8 @@ class HTTPFileSystem : public common::FileSystem {
const std::function& request, const std::string& url,
std::string method, const std::function& retry = {});
- virtual std::unique_ptr headRequest(
- common::FileInfo* fileInfo, const std::string& url, HeaderMap headerMap) const;
+ virtual std::unique_ptr headRequest(common::FileInfo* fileInfo,
+ const std::string& url, HeaderMap headerMap) const;
virtual std::unique_ptr getRangeRequest(common::FileInfo* fileInfo,
const std::string& url, HeaderMap headerMap, uint64_t fileOffset, char* buffer,
diff --git a/extension/httpfs/src/include/s3fs.h b/extension/httpfs/src/include/s3fs.h
index b7ee438acf9..925a14db232 100644
--- a/extension/httpfs/src/include/s3fs.h
+++ b/extension/httpfs/src/include/s3fs.h
@@ -112,8 +112,8 @@ class S3FileSystem final : public HTTPFileSystem {
main::ClientContext* context = nullptr,
common::FileLockType lock_type = common::FileLockType::NO_LOCK) override;
- std::vector glob(
- main::ClientContext* context, const std::string& path) const override;
+ std::vector glob(main::ClientContext* context,
+ const std::string& path) const override;
bool canHandleFile(const std::string& path) const override;
@@ -128,8 +128,8 @@ class S3FileSystem final : public HTTPFileSystem {
void writeFile(common::FileInfo* fileInfo, const uint8_t* buffer, uint64_t numBytes,
uint64_t offset) const override;
- std::shared_ptr allocateWriteBuffer(
- uint16_t writeBufferIdx, uint64_t partSize, uint16_t maxThreads);
+ std::shared_ptr allocateWriteBuffer(uint16_t writeBufferIdx, uint64_t partSize,
+ uint16_t maxThreads);
void flushAllBuffers(S3FileInfo* fileInfo);
@@ -140,8 +140,8 @@ class S3FileSystem final : public HTTPFileSystem {
std::string payloadHash = "", std::string contentType = "");
protected:
- std::unique_ptr headRequest(
- common::FileInfo* fileInfo, const std::string& url, HeaderMap headerMap) const override;
+ std::unique_ptr headRequest(common::FileInfo* fileInfo, const std::string& url,
+ HeaderMap headerMap) const override;
std::unique_ptr getRangeRequest(common::FileInfo* fileInfo,
const std::string& url, HeaderMap headerMap, uint64_t fileOffset, char* buffer,
@@ -179,8 +179,8 @@ struct AWSListObjectV2 {
static constexpr char OPEN_PREFIX_TAG[] = "";
static constexpr char CLOSE_PREFIX_TAG[] = "";
- static std::string request(
- std::string& path, S3AuthParams& authParams, std::string& continuationToken);
+ static std::string request(std::string& path, S3AuthParams& authParams,
+ std::string& continuationToken);
static void parseKey(std::string& awsResponse, std::vector& result);
static std::vector parseCommonPrefix(std::string& awsResponse);
static std::string parseContinuationToken(std::string& awsResponse);
diff --git a/extension/httpfs/src/s3fs.cpp b/extension/httpfs/src/s3fs.cpp
index 8b70e88ab29..fc3e0578bf7 100644
--- a/extension/httpfs/src/s3fs.cpp
+++ b/extension/httpfs/src/s3fs.cpp
@@ -130,7 +130,7 @@ std::unique_ptr S3FileSystem::openFile(const std::string& path
bool likes(const char* string, uint64_t slen, const char* pattern, uint64_t plen) {
uint64_t sidx = 0;
uint64_t pidx = 0;
-main_loop : {
+main_loop: {
// main matching loop
while (sidx < slen && pidx < plen) {
char s = string[sidx];
@@ -188,7 +188,7 @@ main_loop : {
// we are finished only if we have consumed the full pattern
return pidx == plen && sidx == slen;
}
-parse_bracket : {
+parse_bracket: {
// inside a bracket
if (pidx == plen) {
return false;
@@ -287,8 +287,8 @@ static bool match(std::vector::const_iterator key,
return key == key_end && pattern == pattern_end;
}
-std::vector S3FileSystem::glob(
- main::ClientContext* context, const std::string& path) const {
+std::vector S3FileSystem::glob(main::ClientContext* context,
+ const std::string& path) const {
auto s3AuthParams = getS3AuthParams(context);
auto parsedS3URL = parseS3URL(path, s3AuthParams);
auto parsedGlobURL = parsedS3URL.trimmedS3URL;
@@ -311,8 +311,8 @@ std::vector S3FileSystem::glob(
commonPrefixes.pop_back();
std::string commonPrefixContinuationToken = "";
do {
- auto prefixRequest = AWSListObjectV2::request(
- prefixPath, s3AuthParams, commonPrefixContinuationToken);
+ auto prefixRequest = AWSListObjectV2::request(prefixPath, s3AuthParams,
+ commonPrefixContinuationToken);
AWSListObjectV2::parseKey(prefixRequest, s3Keys);
auto commonPrefixesToInsert = AWSListObjectV2::parseCommonPrefix(prefixRequest);
commonPrefixes.insert(commonPrefixes.end(), commonPrefixesToInsert.begin(),
@@ -455,8 +455,8 @@ std::string S3FileSystem::initializeMultiPartUpload(S3FileInfo* fileInfo) const
return getUploadID(result);
}
-void S3FileSystem::writeFile(
- common::FileInfo* fileInfo, const uint8_t* buffer, uint64_t numBytes, uint64_t offset) const {
+void S3FileSystem::writeFile(common::FileInfo* fileInfo, const uint8_t* buffer, uint64_t numBytes,
+ uint64_t offset) const {
auto s3FileInfo = ku_dynamic_cast(fileInfo);
if (!((s3FileInfo->flags & O_ACCMODE) & O_WRONLY)) {
throw IOException("Write called on a file which is not open in write mode.");
@@ -485,8 +485,8 @@ void S3FileSystem::writeFile(
}
}
-std::shared_ptr S3FileSystem::allocateWriteBuffer(
- uint16_t writeBufferIdx, uint64_t partSize, uint16_t maxThreads) {
+std::shared_ptr S3FileSystem::allocateWriteBuffer(uint16_t writeBufferIdx,
+ uint64_t partSize, uint16_t maxThreads) {
std::unique_lock lck(bufferInfoLock);
if (numUsedBuffers >= maxThreads) {
bufferInfoCV.wait(lck, [&] { return numUsedBuffers < maxThreads; });
@@ -509,8 +509,8 @@ void S3FileSystem::flushAllBuffers(S3FileInfo* fileInfo) {
}
}
std::unique_lock lck(fileInfo->uploadsInProgressLock);
- fileInfo->uploadsInProgressCV.wait(
- lck, [fileInfo] { return fileInfo->uploadsInProgress == 0; });
+ fileInfo->uploadsInProgressCV.wait(lck,
+ [fileInfo] { return fileInfo->uploadsInProgress == 0; });
fileInfo->rethrowIOError();
}
@@ -650,8 +650,8 @@ HeaderMap S3FileSystem::createS3Header(std::string url, std::string query, std::
return res;
}
-std::unique_ptr S3FileSystem::headRequest(
- common::FileInfo* fileInfo, const std::string& url, HeaderMap /*headerMap*/) const {
+std::unique_ptr S3FileSystem::headRequest(common::FileInfo* fileInfo,
+ const std::string& url, HeaderMap /*headerMap*/) const {
auto& authParams = ku_dynamic_cast(fileInfo)->authParams;
auto parsedS3URL = parseS3URL(url, authParams);
auto httpURL = parsedS3URL.getHTTPURL();
@@ -666,8 +666,8 @@ std::unique_ptr S3FileSystem::getRangeRequest(common::FileInfo* fi
auto parsedS3URL = parseS3URL(url, authParams);
auto s3HTTPUrl = parsedS3URL.getHTTPURL();
auto headers = createS3Header(parsedS3URL.path, "", parsedS3URL.host, "s3", "GET", authParams);
- return HTTPFileSystem::getRangeRequest(
- fileInfo, s3HTTPUrl, headers, fileOffset, buffer, bufferLen);
+ return HTTPFileSystem::getRangeRequest(fileInfo, s3HTTPUrl, headers, fileOffset, buffer,
+ bufferLen);
}
std::unique_ptr S3FileSystem::postRequest(common::FileInfo* fileInfo,
@@ -680,8 +680,8 @@ std::unique_ptr S3FileSystem::postRequest(common::FileInfo* fileIn
auto payloadHash = getPayloadHash(inputBuffer, inputBufferLen);
auto headers = createS3Header(parsedS3URL.path, httpParams, parsedS3URL.host, "s3", "POST",
authParams, payloadHash, "application/octet-stream");
- return HTTPFileSystem::postRequest(
- fileInfo, httpURL, headers, outputBuffer, outputBufferLen, inputBuffer, inputBufferLen);
+ return HTTPFileSystem::postRequest(fileInfo, httpURL, headers, outputBuffer, outputBufferLen,
+ inputBuffer, inputBufferLen);
}
std::unique_ptr S3FileSystem::putRequest(common::FileInfo* fileInfo,
@@ -708,8 +708,8 @@ std::string S3FileSystem::getPayloadHash(const uint8_t* buffer, uint64_t bufferL
}
}
-void S3FileSystem::flushBuffer(
- S3FileInfo* fileInfo, std::shared_ptr bufferToFlush) const {
+void S3FileSystem::flushBuffer(S3FileInfo* fileInfo,
+ std::shared_ptr bufferToFlush) const {
if (bufferToFlush->numBytesWritten == 0) {
return;
}
@@ -734,8 +734,8 @@ void S3FileSystem::flushBuffer(
uploadThread.detach();
}
-void S3FileSystem::uploadBuffer(
- S3FileInfo* fileInfo, std::shared_ptr bufferToUpload) {
+void S3FileSystem::uploadBuffer(S3FileInfo* fileInfo,
+ std::shared_ptr bufferToUpload) {
auto s3FileSystem = ku_dynamic_cast(fileInfo->fileSystem);
std::string queryParam =
"partNumber=" + std::to_string(bufferToUpload->partID + 1) + "&" +
@@ -797,8 +797,8 @@ std::string S3FileSystem::getUploadID(const std::string& response) {
return response.substr(openTagPos, closeTagPos - openTagPos);
}
-std::string AWSListObjectV2::request(
- std::string& path, S3AuthParams& authParams, std::string& continuationToken) {
+std::string AWSListObjectV2::request(std::string& path, S3AuthParams& authParams,
+ std::string& continuationToken) {
auto parsedURL = S3FileSystem::parseS3URL(path, authParams);
std::string requestPath;
if (authParams.urlStyle == "path") {
@@ -830,8 +830,8 @@ std::string AWSListObjectV2::request(
listObjectV2URL.c_str(), *headers,
[&](const httplib::Response& response) {
if (response.status >= 400) {
- throw IOException{common::stringFormat(
- "HTTP GET error on '{}' (HTTP {})", listObjectV2URL, response.status)};
+ throw IOException{common::stringFormat("HTTP GET error on '{}' (HTTP {})",
+ listObjectV2URL, response.status)};
}
return true;
},
diff --git a/extension/httpfs/test/CMakeLists.txt b/extension/httpfs/test/CMakeLists.txt
index 08b6fe8acfb..e69de29bb2d 100644
--- a/extension/httpfs/test/CMakeLists.txt
+++ b/extension/httpfs/test/CMakeLists.txt
@@ -1,11 +0,0 @@
-add_definitions(-DTEST_FILES_DIR="extension/httpfs/test/test_files")
-add_subdirectory(${CMAKE_SOURCE_DIR}/test/gtest ${CMAKE_CURRENT_BINARY_DIR}/test/gtest EXCLUDE_FROM_ALL)
-# Make gtest available to subdirectories.
-add_library(GTest::GTest INTERFACE IMPORTED)
-target_link_libraries(GTest::GTest INTERFACE gtest_main)
-target_link_libraries(GTest::GTest INTERFACE gmock_main)
-enable_testing()
-add_subdirectory(${CMAKE_SOURCE_DIR}/test/test_helper ${CMAKE_CURRENT_BINARY_DIR}/test/test_helper)
-add_subdirectory(${CMAKE_SOURCE_DIR}/test/test_runner ${CMAKE_CURRENT_BINARY_DIR}/test/test_runner)
-add_subdirectory(${CMAKE_SOURCE_DIR}/test/graph_test ${CMAKE_CURRENT_BINARY_DIR}/test/graph_test)
-add_subdirectory(${CMAKE_SOURCE_DIR}/test/runner ${CMAKE_CURRENT_BINARY_DIR}/test/runner)
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/aes.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/aes.h
index e9466f8ad21..49bf0c97cb2 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/aes.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/aes.h
@@ -160,8 +160,8 @@ void mbedtls_aes_xts_free(mbedtls_aes_xts_context* ctx);
* \return #MBEDTLS_ERR_AES_INVALID_KEY_LENGTH on failure.
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_aes_setkey_enc(
- mbedtls_aes_context* ctx, const unsigned char* key, unsigned int keybits);
+int mbedtls_aes_setkey_enc(mbedtls_aes_context* ctx, const unsigned char* key,
+ unsigned int keybits);
/**
* \brief This function sets the decryption key.
@@ -179,8 +179,8 @@ int mbedtls_aes_setkey_enc(
* \return #MBEDTLS_ERR_AES_INVALID_KEY_LENGTH on failure.
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_aes_setkey_dec(
- mbedtls_aes_context* ctx, const unsigned char* key, unsigned int keybits);
+int mbedtls_aes_setkey_dec(mbedtls_aes_context* ctx, const unsigned char* key,
+ unsigned int keybits);
#if defined(MBEDTLS_CIPHER_MODE_XTS)
/**
@@ -200,8 +200,8 @@ int mbedtls_aes_setkey_dec(
* \return #MBEDTLS_ERR_AES_INVALID_KEY_LENGTH on failure.
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_aes_xts_setkey_enc(
- mbedtls_aes_xts_context* ctx, const unsigned char* key, unsigned int keybits);
+int mbedtls_aes_xts_setkey_enc(mbedtls_aes_xts_context* ctx, const unsigned char* key,
+ unsigned int keybits);
/**
* \brief This function prepares an XTS context for decryption and
@@ -220,8 +220,8 @@ int mbedtls_aes_xts_setkey_enc(
* \return #MBEDTLS_ERR_AES_INVALID_KEY_LENGTH on failure.
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_aes_xts_setkey_dec(
- mbedtls_aes_xts_context* ctx, const unsigned char* key, unsigned int keybits);
+int mbedtls_aes_xts_setkey_dec(mbedtls_aes_xts_context* ctx, const unsigned char* key,
+ unsigned int keybits);
#endif /* MBEDTLS_CIPHER_MODE_XTS */
/**
@@ -248,8 +248,8 @@ int mbedtls_aes_xts_setkey_dec(
* \return \c 0 on success.
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_aes_crypt_ecb(
- mbedtls_aes_context* ctx, int mode, const unsigned char input[16], unsigned char output[16]);
+int mbedtls_aes_crypt_ecb(mbedtls_aes_context* ctx, int mode, const unsigned char input[16],
+ unsigned char output[16]);
#if defined(MBEDTLS_CIPHER_MODE_CBC)
/**
@@ -567,8 +567,8 @@ int mbedtls_aes_crypt_ctr(mbedtls_aes_context* ctx, size_t length, size_t* nc_of
* \return \c 0 on success.
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_internal_aes_encrypt(
- mbedtls_aes_context* ctx, const unsigned char input[16], unsigned char output[16]);
+int mbedtls_internal_aes_encrypt(mbedtls_aes_context* ctx, const unsigned char input[16],
+ unsigned char output[16]);
/**
* \brief Internal AES block decryption function. This is only
@@ -582,8 +582,8 @@ int mbedtls_internal_aes_encrypt(
* \return \c 0 on success.
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_internal_aes_decrypt(
- mbedtls_aes_context* ctx, const unsigned char input[16], unsigned char output[16]);
+int mbedtls_internal_aes_decrypt(mbedtls_aes_context* ctx, const unsigned char input[16],
+ unsigned char output[16]);
#if defined(MBEDTLS_SELF_TEST)
/**
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/aria.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/aria.h
index c1bed14d24e..93b62ab30ea 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/aria.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/aria.h
@@ -103,8 +103,8 @@ void mbedtls_aria_free(mbedtls_aria_context* ctx);
* \return \c 0 on success.
* \return A negative error code on failure.
*/
-int mbedtls_aria_setkey_enc(
- mbedtls_aria_context* ctx, const unsigned char* key, unsigned int keybits);
+int mbedtls_aria_setkey_enc(mbedtls_aria_context* ctx, const unsigned char* key,
+ unsigned int keybits);
/**
* \brief This function sets the decryption key.
@@ -121,8 +121,8 @@ int mbedtls_aria_setkey_enc(
* \return \c 0 on success.
* \return A negative error code on failure.
*/
-int mbedtls_aria_setkey_dec(
- mbedtls_aria_context* ctx, const unsigned char* key, unsigned int keybits);
+int mbedtls_aria_setkey_dec(mbedtls_aria_context* ctx, const unsigned char* key,
+ unsigned int keybits);
/**
* \brief This function performs an ARIA single-block encryption or
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/asn1.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/asn1.h
index 4d0ec240956..69f0c0cc2b5 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/asn1.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/asn1.h
@@ -317,8 +317,8 @@ int mbedtls_asn1_get_enum(unsigned char** p, const unsigned char* end, int* val)
* \return An ASN.1 error code if the input does not start with
* a valid ASN.1 BIT STRING.
*/
-int mbedtls_asn1_get_bitstring(
- unsigned char** p, const unsigned char* end, mbedtls_asn1_bitstring* bs);
+int mbedtls_asn1_get_bitstring(unsigned char** p, const unsigned char* end,
+ mbedtls_asn1_bitstring* bs);
/**
* \brief Retrieve a bitstring ASN.1 tag without unused bits and its
@@ -387,8 +387,8 @@ int mbedtls_asn1_get_bitstring_null(unsigned char** p, const unsigned char* end,
* \return An ASN.1 error code if the input does not start with
* a valid ASN.1 SEQUENCE.
*/
-int mbedtls_asn1_get_sequence_of(
- unsigned char** p, const unsigned char* end, mbedtls_asn1_sequence* cur, int tag);
+int mbedtls_asn1_get_sequence_of(unsigned char** p, const unsigned char* end,
+ mbedtls_asn1_sequence* cur, int tag);
/**
* \brief Free a heap-allocated linked list presentation of
* an ASN.1 sequence, including the first element.
@@ -540,8 +540,8 @@ int mbedtls_asn1_get_mpi(unsigned char** p, const unsigned char* end, mbedtls_mp
*
* \return 0 if successful or a specific ASN.1 or MPI error code.
*/
-int mbedtls_asn1_get_alg(
- unsigned char** p, const unsigned char* end, mbedtls_asn1_buf* alg, mbedtls_asn1_buf* params);
+int mbedtls_asn1_get_alg(unsigned char** p, const unsigned char* end, mbedtls_asn1_buf* alg,
+ mbedtls_asn1_buf* params);
/**
* \brief Retrieve an AlgorithmIdentifier ASN.1 sequence with NULL or no
@@ -570,8 +570,8 @@ int mbedtls_asn1_get_alg_null(unsigned char** p, const unsigned char* end, mbedt
*
* \return NULL if not found, or a pointer to the existing entry.
*/
-const mbedtls_asn1_named_data* mbedtls_asn1_find_named_data(
- const mbedtls_asn1_named_data* list, const char* oid, size_t len);
+const mbedtls_asn1_named_data* mbedtls_asn1_find_named_data(const mbedtls_asn1_named_data* list,
+ const char* oid, size_t len);
/**
* \brief Free a mbedtls_asn1_named_data entry
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/base64.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/base64.h
index 7fca8a8f4d6..89c5ff25526 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/base64.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/base64.h
@@ -54,8 +54,8 @@ extern "C" {
* \note Call this function with dlen = 0 to obtain the
* required buffer size in *olen
*/
-int mbedtls_base64_encode(
- unsigned char* dst, size_t dlen, size_t* olen, const unsigned char* src, size_t slen);
+int mbedtls_base64_encode(unsigned char* dst, size_t dlen, size_t* olen, const unsigned char* src,
+ size_t slen);
/**
* \brief Decode a base64-formatted buffer
@@ -74,8 +74,8 @@ int mbedtls_base64_encode(
* \note Call this function with *dst = NULL or dlen = 0 to obtain
* the required buffer size in *olen
*/
-int mbedtls_base64_decode(
- unsigned char* dst, size_t dlen, size_t* olen, const unsigned char* src, size_t slen);
+int mbedtls_base64_decode(unsigned char* dst, size_t dlen, size_t* olen, const unsigned char* src,
+ size_t slen);
#if defined(MBEDTLS_SELF_TEST)
/**
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/bignum.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/bignum.h
index 630dd0c6f08..8c87780e175 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/bignum.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/bignum.h
@@ -434,8 +434,8 @@ int mbedtls_mpi_read_string(mbedtls_mpi* X, int radix, const char* s);
* size of \p buf required for a successful call.
* \return Another negative error code on different kinds of failure.
*/
-int mbedtls_mpi_write_string(
- const mbedtls_mpi* X, int radix, char* buf, size_t buflen, size_t* olen);
+int mbedtls_mpi_write_string(const mbedtls_mpi* X, int radix, char* buf, size_t buflen,
+ size_t* olen);
#if defined(MBEDTLS_FS_IO)
/**
@@ -850,8 +850,8 @@ int mbedtls_mpi_exp_mod(mbedtls_mpi* X, const mbedtls_mpi* A, const mbedtls_mpi*
* as a big-endian representation of an MPI; this can
* be relevant in applications like deterministic ECDSA.
*/
-int mbedtls_mpi_fill_random(
- mbedtls_mpi* X, size_t size, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng);
+int mbedtls_mpi_fill_random(mbedtls_mpi* X, size_t size,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng);
/** Generate a random number uniformly in a range.
*
@@ -946,8 +946,8 @@ int mbedtls_mpi_inv_mod(mbedtls_mpi* X, const mbedtls_mpi* A, const mbedtls_mpi*
* \return #MBEDTLS_ERR_MPI_NOT_ACCEPTABLE if \p X is not prime.
* \return Another negative error code on other kinds of failure.
*/
-int mbedtls_mpi_is_prime_ext(
- const mbedtls_mpi* X, int rounds, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng);
+int mbedtls_mpi_is_prime_ext(const mbedtls_mpi* X, int rounds,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng);
/**
* \brief Flags for mbedtls_mpi_gen_prime()
*
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/camellia.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/camellia.h
index 30e7f25672b..f409c12374b 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/camellia.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/camellia.h
@@ -86,8 +86,8 @@ void mbedtls_camellia_free(mbedtls_camellia_context* ctx);
* \return \c 0 if successful.
* \return A negative error code on failure.
*/
-int mbedtls_camellia_setkey_enc(
- mbedtls_camellia_context* ctx, const unsigned char* key, unsigned int keybits);
+int mbedtls_camellia_setkey_enc(mbedtls_camellia_context* ctx, const unsigned char* key,
+ unsigned int keybits);
/**
* \brief Perform a CAMELLIA key schedule operation for decryption.
@@ -101,8 +101,8 @@ int mbedtls_camellia_setkey_enc(
* \return \c 0 if successful.
* \return A negative error code on failure.
*/
-int mbedtls_camellia_setkey_dec(
- mbedtls_camellia_context* ctx, const unsigned char* key, unsigned int keybits);
+int mbedtls_camellia_setkey_dec(mbedtls_camellia_context* ctx, const unsigned char* key,
+ unsigned int keybits);
/**
* \brief Perform a CAMELLIA-ECB block encryption/decryption operation.
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/ccm.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/ccm.h
index d51849b1352..c8880f6883b 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/ccm.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/ccm.h
@@ -357,8 +357,8 @@ int mbedtls_ccm_starts(mbedtls_ccm_context* ctx, int mode, const unsigned char*
* \p ctx is in an invalid state,
* \p total_ad_len is greater than \c 0xFF00.
*/
-int mbedtls_ccm_set_lengths(
- mbedtls_ccm_context* ctx, size_t total_ad_len, size_t plaintext_len, size_t tag_len);
+int mbedtls_ccm_set_lengths(mbedtls_ccm_context* ctx, size_t total_ad_len, size_t plaintext_len,
+ size_t tag_len);
/**
* \brief This function feeds an input buffer as associated data
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/cipher.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/cipher.h
index ff68fce6b71..27bbd307a8f 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/cipher.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/cipher.h
@@ -428,8 +428,8 @@ const mbedtls_cipher_info_t* mbedtls_cipher_info_from_type(const mbedtls_cipher_
* given \p cipher_id.
* \return \c NULL if the associated cipher information is not found.
*/
-const mbedtls_cipher_info_t* mbedtls_cipher_info_from_values(
- const mbedtls_cipher_id_t cipher_id, int key_bitlen, const mbedtls_cipher_mode_t mode);
+const mbedtls_cipher_info_t* mbedtls_cipher_info_from_values(const mbedtls_cipher_id_t cipher_id,
+ int key_bitlen, const mbedtls_cipher_mode_t mode);
/**
* \brief Retrieve the identifier for a cipher info structure.
@@ -649,8 +649,8 @@ int mbedtls_cipher_setup(mbedtls_cipher_context_t* ctx, const mbedtls_cipher_inf
* \return #MBEDTLS_ERR_CIPHER_ALLOC_FAILED if allocation of the
* cipher-specific context fails.
*/
-int mbedtls_cipher_setup_psa(
- mbedtls_cipher_context_t* ctx, const mbedtls_cipher_info_t* cipher_info, size_t taglen);
+int mbedtls_cipher_setup_psa(mbedtls_cipher_context_t* ctx,
+ const mbedtls_cipher_info_t* cipher_info, size_t taglen);
#endif /* MBEDTLS_USE_PSA_CRYPTO */
/**
@@ -987,8 +987,8 @@ int mbedtls_cipher_write_tag(mbedtls_cipher_context_t* ctx, unsigned char* tag,
* \return \c 0 on success.
* \return A specific error code on failure.
*/
-int mbedtls_cipher_check_tag(
- mbedtls_cipher_context_t* ctx, const unsigned char* tag, size_t tag_len);
+int mbedtls_cipher_check_tag(mbedtls_cipher_context_t* ctx, const unsigned char* tag,
+ size_t tag_len);
#endif /* MBEDTLS_GCM_C || MBEDTLS_CHACHAPOLY_C */
/**
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/des.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/des.h
index e5734d05852..656af738ba1 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/des.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/des.h
@@ -191,8 +191,8 @@ int mbedtls_des_setkey_dec(mbedtls_des_context* ctx, const unsigned char key[MBE
* \return 0
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_des3_set2key_enc(
- mbedtls_des3_context* ctx, const unsigned char key[MBEDTLS_DES_KEY_SIZE * 2]);
+int mbedtls_des3_set2key_enc(mbedtls_des3_context* ctx,
+ const unsigned char key[MBEDTLS_DES_KEY_SIZE * 2]);
/**
* \brief Triple-DES key schedule (112-bit, decryption)
@@ -203,8 +203,8 @@ int mbedtls_des3_set2key_enc(
* \return 0
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_des3_set2key_dec(
- mbedtls_des3_context* ctx, const unsigned char key[MBEDTLS_DES_KEY_SIZE * 2]);
+int mbedtls_des3_set2key_dec(mbedtls_des3_context* ctx,
+ const unsigned char key[MBEDTLS_DES_KEY_SIZE * 2]);
/**
* \brief Triple-DES key schedule (168-bit, encryption)
@@ -215,8 +215,8 @@ int mbedtls_des3_set2key_dec(
* \return 0
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_des3_set3key_enc(
- mbedtls_des3_context* ctx, const unsigned char key[MBEDTLS_DES_KEY_SIZE * 3]);
+int mbedtls_des3_set3key_enc(mbedtls_des3_context* ctx,
+ const unsigned char key[MBEDTLS_DES_KEY_SIZE * 3]);
/**
* \brief Triple-DES key schedule (168-bit, decryption)
@@ -227,8 +227,8 @@ int mbedtls_des3_set3key_enc(
* \return 0
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_des3_set3key_dec(
- mbedtls_des3_context* ctx, const unsigned char key[MBEDTLS_DES_KEY_SIZE * 3]);
+int mbedtls_des3_set3key_dec(mbedtls_des3_context* ctx,
+ const unsigned char key[MBEDTLS_DES_KEY_SIZE * 3]);
/**
* \brief DES-ECB block encryption/decryption
@@ -244,8 +244,8 @@ int mbedtls_des3_set3key_dec(
* instead.
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_des_crypt_ecb(
- mbedtls_des_context* ctx, const unsigned char input[8], unsigned char output[8]);
+int mbedtls_des_crypt_ecb(mbedtls_des_context* ctx, const unsigned char input[8],
+ unsigned char output[8]);
#if defined(MBEDTLS_CIPHER_MODE_CBC)
/**
@@ -285,8 +285,8 @@ int mbedtls_des_crypt_cbc(mbedtls_des_context* ctx, int mode, size_t length, uns
* \return 0 if successful
*/
MBEDTLS_CHECK_RETURN_TYPICAL
-int mbedtls_des3_crypt_ecb(
- mbedtls_des3_context* ctx, const unsigned char input[8], unsigned char output[8]);
+int mbedtls_des3_crypt_ecb(mbedtls_des3_context* ctx, const unsigned char input[8],
+ unsigned char output[8]);
#if defined(MBEDTLS_CIPHER_MODE_CBC)
/**
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/entropy.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/entropy.h
index e440444c00d..a1cc701fd6e 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/entropy.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/entropy.h
@@ -96,8 +96,8 @@ extern "C" {
* \return 0 if no critical failures occurred,
* MBEDTLS_ERR_ENTROPY_SOURCE_FAILED otherwise
*/
-typedef int (*mbedtls_entropy_f_source_ptr)(
- void* data, unsigned char* output, size_t len, size_t* olen);
+typedef int (
+ *mbedtls_entropy_f_source_ptr)(void* data, unsigned char* output, size_t len, size_t* olen);
/**
* \brief Entropy source state
@@ -206,8 +206,8 @@ int mbedtls_entropy_func(void* data, unsigned char* output, size_t len);
*
* \return 0 if successful
*/
-int mbedtls_entropy_update_manual(
- mbedtls_entropy_context* ctx, const unsigned char* data, size_t len);
+int mbedtls_entropy_update_manual(mbedtls_entropy_context* ctx, const unsigned char* data,
+ size_t len);
#if defined(MBEDTLS_ENTROPY_NV_SEED)
/**
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/oid.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/oid.h
index 7bcd50f2099..de20988a156 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/oid.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/oid.h
@@ -677,8 +677,8 @@ int mbedtls_oid_get_oid_by_ec_grp(mbedtls_ecp_group_id grp_id, const char** oid,
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
-int mbedtls_oid_get_sig_alg(
- const mbedtls_asn1_buf* oid, mbedtls_md_type_t* md_alg, mbedtls_pk_type_t* pk_alg);
+int mbedtls_oid_get_sig_alg(const mbedtls_asn1_buf* oid, mbedtls_md_type_t* md_alg,
+ mbedtls_pk_type_t* pk_alg);
/**
* \brief Translate SignatureAlgorithm OID into description
@@ -700,8 +700,8 @@ int mbedtls_oid_get_sig_alg_desc(const mbedtls_asn1_buf* oid, const char** desc)
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
-int mbedtls_oid_get_oid_by_sig_alg(
- mbedtls_pk_type_t pk_alg, mbedtls_md_type_t md_alg, const char** oid, size_t* olen);
+int mbedtls_oid_get_oid_by_sig_alg(mbedtls_pk_type_t pk_alg, mbedtls_md_type_t md_alg,
+ const char** oid, size_t* olen);
/**
* \brief Translate hash algorithm OID into md_type
@@ -780,8 +780,8 @@ int mbedtls_oid_get_cipher_alg(const mbedtls_asn1_buf* oid, mbedtls_cipher_type_
*
* \return 0 if successful, or MBEDTLS_ERR_OID_NOT_FOUND
*/
-int mbedtls_oid_get_pkcs12_pbe_alg(
- const mbedtls_asn1_buf* oid, mbedtls_md_type_t* md_alg, mbedtls_cipher_type_t* cipher_alg);
+int mbedtls_oid_get_pkcs12_pbe_alg(const mbedtls_asn1_buf* oid, mbedtls_md_type_t* md_alg,
+ mbedtls_cipher_type_t* cipher_alg);
#endif /* MBEDTLS_PKCS12_C */
#ifdef __cplusplus
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/platform.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/platform.h
index 15d22ab92c1..3f4a5650b52 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/platform.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/platform.h
@@ -143,8 +143,8 @@ extern void mbedtls_free(void* ptr);
*
* \return \c 0.
*/
-int mbedtls_platform_set_calloc_free(
- void* (*calloc_func)(size_t, size_t), void (*free_func)(void*));
+int mbedtls_platform_set_calloc_free(void* (*calloc_func)(size_t, size_t),
+ void (*free_func)(void*));
#endif /* MBEDTLS_PLATFORM_FREE_MACRO && MBEDTLS_PLATFORM_CALLOC_MACRO */
#else /* !MBEDTLS_PLATFORM_MEMORY */
#define mbedtls_free free
diff --git a/extension/httpfs/third_party/mbedtls/include/mbedtls/rsa.h b/extension/httpfs/third_party/mbedtls/include/mbedtls/rsa.h
index a9669d5447d..e2fa3e7db74 100644
--- a/extension/httpfs/third_party/mbedtls/include/mbedtls/rsa.h
+++ b/extension/httpfs/third_party/mbedtls/include/mbedtls/rsa.h
@@ -394,8 +394,8 @@ int mbedtls_rsa_export_raw(const mbedtls_rsa_context* ctx, unsigned char* N, siz
* \return A non-zero error code on failure.
*
*/
-int mbedtls_rsa_export_crt(
- const mbedtls_rsa_context* ctx, mbedtls_mpi* DP, mbedtls_mpi* DQ, mbedtls_mpi* QP);
+int mbedtls_rsa_export_crt(const mbedtls_rsa_context* ctx, mbedtls_mpi* DP, mbedtls_mpi* DQ,
+ mbedtls_mpi* QP);
/**
* \brief This function retrieves the length of RSA modulus in Bytes.
diff --git a/extension/httpfs/third_party/mbedtls/library/aes.cpp b/extension/httpfs/third_party/mbedtls/library/aes.cpp
index 677594d6bda..f5879491401 100644
--- a/extension/httpfs/third_party/mbedtls/library/aes.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/aes.cpp
@@ -41,12 +41,12 @@
#endif
#if defined(MBEDTLS_SELF_TEST)
-//#if defined(MBEDTLS_PLATFORM_C)
-//#include "mbedtls/platform.h"
-//#else
-//#include
-//#define mbedtls_printf printf
-//#endif /* MBEDTLS_PLATFORM_C */
+// #if defined(MBEDTLS_PLATFORM_C)
+// #include "mbedtls/platform.h"
+// #else
+// #include
+// #define mbedtls_printf printf
+// #endif /* MBEDTLS_PLATFORM_C */
#endif /* MBEDTLS_SELF_TEST */
#if !defined(MBEDTLS_AES_ALT)
@@ -325,7 +325,7 @@ static uint32_t RCON[10];
* Tables generation code
*/
#define ROTL8(x) (((x) << 8) & 0xFFFFFFFF) | ((x) >> 24)
-#define XTIME(x) (((x) << 1) ^ (((x)&0x80) ? 0x1B : 0x00))
+#define XTIME(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1B : 0x00))
#define MUL(x, y) (((x) && (y)) ? pow[(log[(x)] + log[(y)]) % 255] : 0)
static int aes_init_done = 0;
@@ -472,8 +472,8 @@ void mbedtls_aes_xts_free(mbedtls_aes_xts_context* ctx) {
* AES key schedule (encryption)
*/
#if !defined(MBEDTLS_AES_SETKEY_ENC_ALT)
-int mbedtls_aes_setkey_enc(
- mbedtls_aes_context* ctx, const unsigned char* key, unsigned int keybits) {
+int mbedtls_aes_setkey_enc(mbedtls_aes_context* ctx, const unsigned char* key,
+ unsigned int keybits) {
unsigned int i;
uint32_t* RK;
@@ -583,8 +583,8 @@ int mbedtls_aes_setkey_enc(
* AES key schedule (decryption)
*/
#if !defined(MBEDTLS_AES_SETKEY_DEC_ALT)
-int mbedtls_aes_setkey_dec(
- mbedtls_aes_context* ctx, const unsigned char* key, unsigned int keybits) {
+int mbedtls_aes_setkey_dec(mbedtls_aes_context* ctx, const unsigned char* key,
+ unsigned int keybits) {
int i, j, ret;
mbedtls_aes_context cty;
uint32_t* RK;
@@ -668,8 +668,8 @@ static int mbedtls_aes_xts_decode_keys(const unsigned char* key, unsigned int ke
return 0;
}
-int mbedtls_aes_xts_setkey_enc(
- mbedtls_aes_xts_context* ctx, const unsigned char* key, unsigned int keybits) {
+int mbedtls_aes_xts_setkey_enc(mbedtls_aes_xts_context* ctx, const unsigned char* key,
+ unsigned int keybits) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
const unsigned char *key1, *key2;
unsigned int key1bits, key2bits;
@@ -690,8 +690,8 @@ int mbedtls_aes_xts_setkey_enc(
return mbedtls_aes_setkey_enc(&ctx->crypt, key1, key1bits);
}
-int mbedtls_aes_xts_setkey_dec(
- mbedtls_aes_xts_context* ctx, const unsigned char* key, unsigned int keybits) {
+int mbedtls_aes_xts_setkey_dec(mbedtls_aes_xts_context* ctx, const unsigned char* key,
+ unsigned int keybits) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
const unsigned char *key1, *key2;
unsigned int key1bits, key2bits;
@@ -747,8 +747,8 @@ int mbedtls_aes_xts_setkey_dec(
* AES-ECB block encryption
*/
#if !defined(MBEDTLS_AES_ENCRYPT_ALT)
-int mbedtls_internal_aes_encrypt(
- mbedtls_aes_context* ctx, const unsigned char input[16], unsigned char output[16]) {
+int mbedtls_internal_aes_encrypt(mbedtls_aes_context* ctx, const unsigned char input[16],
+ unsigned char output[16]) {
int i;
uint32_t* RK = ctx->rk;
struct {
@@ -807,8 +807,8 @@ int mbedtls_internal_aes_encrypt(
* AES-ECB block decryption
*/
#if !defined(MBEDTLS_AES_DECRYPT_ALT)
-int mbedtls_internal_aes_decrypt(
- mbedtls_aes_context* ctx, const unsigned char input[16], unsigned char output[16]) {
+int mbedtls_internal_aes_decrypt(mbedtls_aes_context* ctx, const unsigned char input[16],
+ unsigned char output[16]) {
int i;
uint32_t* RK = ctx->rk;
struct {
@@ -866,8 +866,8 @@ int mbedtls_internal_aes_decrypt(
/*
* AES-ECB block encryption/decryption
*/
-int mbedtls_aes_crypt_ecb(
- mbedtls_aes_context* ctx, int mode, const unsigned char input[16], unsigned char output[16]) {
+int mbedtls_aes_crypt_ecb(mbedtls_aes_context* ctx, int mode, const unsigned char input[16],
+ unsigned char output[16]) {
AES_VALIDATE_RET(ctx != NULL);
AES_VALIDATE_RET(input != NULL);
AES_VALIDATE_RET(output != NULL);
@@ -1274,34 +1274,34 @@ int mbedtls_aes_crypt_ctr(mbedtls_aes_context* ctx, size_t length, size_t* nc_of
*
* http://csrc.nist.gov/archive/aes/rijndael/rijndael-vals.zip
*/
-static const unsigned char aes_test_ecb_dec[3][16] = {
- {0x44, 0x41, 0x6A, 0xC2, 0xD1, 0xF5, 0x3C, 0x58, 0x33, 0x03, 0x91, 0x7E, 0x6B, 0xE9, 0xEB,
- 0xE0},
+static const unsigned char aes_test_ecb_dec[3][16] = {{0x44, 0x41, 0x6A, 0xC2, 0xD1, 0xF5, 0x3C,
+ 0x58, 0x33, 0x03, 0x91, 0x7E, 0x6B, 0xE9,
+ 0xEB, 0xE0},
{0x48, 0xE3, 0x1E, 0x9E, 0x25, 0x67, 0x18, 0xF2, 0x92, 0x29, 0x31, 0x9C, 0x19, 0xF1, 0x5B,
0xA4},
{0x05, 0x8C, 0xCF, 0xFD, 0xBB, 0xCB, 0x38, 0x2D, 0x1F, 0x6F, 0x56, 0x58, 0x5D, 0x8A, 0x4A,
0xDE}};
-static const unsigned char aes_test_ecb_enc[3][16] = {
- {0xC3, 0x4C, 0x05, 0x2C, 0xC0, 0xDA, 0x8D, 0x73, 0x45, 0x1A, 0xFE, 0x5F, 0x03, 0xBE, 0x29,
- 0x7F},
+static const unsigned char aes_test_ecb_enc[3][16] = {{0xC3, 0x4C, 0x05, 0x2C, 0xC0, 0xDA, 0x8D,
+ 0x73, 0x45, 0x1A, 0xFE, 0x5F, 0x03, 0xBE,
+ 0x29, 0x7F},
{0xF3, 0xF6, 0x75, 0x2A, 0xE8, 0xD7, 0x83, 0x11, 0x38, 0xF0, 0x41, 0x56, 0x06, 0x31, 0xB1,
0x14},
{0x8B, 0x79, 0xEE, 0xCC, 0x93, 0xA0, 0xEE, 0x5D, 0xFF, 0x30, 0xB4, 0xEA, 0x21, 0x63, 0x6D,
0xA4}};
#if defined(MBEDTLS_CIPHER_MODE_CBC)
-static const unsigned char aes_test_cbc_dec[3][16] = {
- {0xFA, 0xCA, 0x37, 0xE0, 0xB0, 0xC8, 0x53, 0x73, 0xDF, 0x70, 0x6E, 0x73, 0xF7, 0xC9, 0xAF,
- 0x86},
+static const unsigned char aes_test_cbc_dec[3][16] = {{0xFA, 0xCA, 0x37, 0xE0, 0xB0, 0xC8, 0x53,
+ 0x73, 0xDF, 0x70, 0x6E, 0x73, 0xF7, 0xC9,
+ 0xAF, 0x86},
{0x5D, 0xF6, 0x78, 0xDD, 0x17, 0xBA, 0x4E, 0x75, 0xB6, 0x17, 0x68, 0xC6, 0xAD, 0xEF, 0x7C,
0x7B},
{0x48, 0x04, 0xE1, 0x81, 0x8F, 0xE6, 0x29, 0x75, 0x19, 0xA3, 0xE8, 0x8C, 0x57, 0x31, 0x04,
0x13}};
-static const unsigned char aes_test_cbc_enc[3][16] = {
- {0x8A, 0x05, 0xFC, 0x5E, 0x09, 0x5A, 0xF4, 0x84, 0x8A, 0x08, 0xD3, 0x28, 0xD3, 0x68, 0x8E,
- 0x3D},
+static const unsigned char aes_test_cbc_enc[3][16] = {{0x8A, 0x05, 0xFC, 0x5E, 0x09, 0x5A, 0xF4,
+ 0x84, 0x8A, 0x08, 0xD3, 0x28, 0xD3, 0x68,
+ 0x8E, 0x3D},
{0x7B, 0xD9, 0x66, 0xD5, 0x3A, 0xD8, 0xC1, 0xBB, 0x85, 0xD2, 0xAD, 0xFA, 0xE8, 0x7B, 0xB1,
0x04},
{0xFE, 0x3C, 0x53, 0x65, 0x3E, 0x2F, 0x45, 0xB5, 0x6F, 0xCD, 0x88, 0xB2, 0xCC, 0x89, 0x8F,
@@ -1314,17 +1314,17 @@ static const unsigned char aes_test_cbc_enc[3][16] = {
*
* http://csrc.nist.gov/publications/nistpubs/800-38a/sp800-38a.pdf
*/
-static const unsigned char aes_test_cfb128_key[3][32] = {
- {0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F,
- 0x3C},
+static const unsigned char aes_test_cfb128_key[3][32] = {{0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2,
+ 0xA6, 0xAB, 0xF7, 0x15, 0x88, 0x09,
+ 0xCF, 0x4F, 0x3C},
{0x8E, 0x73, 0xB0, 0xF7, 0xDA, 0x0E, 0x64, 0x52, 0xC8, 0x10, 0xF3, 0x2B, 0x80, 0x90, 0x79, 0xE5,
0x62, 0xF8, 0xEA, 0xD2, 0x52, 0x2C, 0x6B, 0x7B},
{0x60, 0x3D, 0xEB, 0x10, 0x15, 0xCA, 0x71, 0xBE, 0x2B, 0x73, 0xAE, 0xF0, 0x85, 0x7D, 0x77, 0x81,
0x1F, 0x35, 0x2C, 0x07, 0x3B, 0x61, 0x08, 0xD7, 0x2D, 0x98, 0x10, 0xA3, 0x09, 0x14, 0xDF,
0xF4}};
-static const unsigned char aes_test_cfb128_iv[16] = {
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F};
+static const unsigned char aes_test_cfb128_iv[16] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F};
static const unsigned char aes_test_cfb128_pt[64] = {0x6B, 0xC1, 0xBE, 0xE2, 0x2E, 0x40, 0x9F, 0x96,
0xE9, 0x3D, 0x7E, 0x11, 0x73, 0x93, 0x17, 0x2A, 0xAE, 0x2D, 0x8A, 0x57, 0x1E, 0x03, 0xAC, 0x9C,
@@ -1356,17 +1356,17 @@ static const unsigned char aes_test_cfb128_ct[3][64] = {
*
* https://csrc.nist.gov/publications/detail/sp/800-38a/final
*/
-static const unsigned char aes_test_ofb_key[3][32] = {
- {0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F,
- 0x3C},
+static const unsigned char aes_test_ofb_key[3][32] = {{0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2,
+ 0xA6, 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF,
+ 0x4F, 0x3C},
{0x8E, 0x73, 0xB0, 0xF7, 0xDA, 0x0E, 0x64, 0x52, 0xC8, 0x10, 0xF3, 0x2B, 0x80, 0x90, 0x79, 0xE5,
0x62, 0xF8, 0xEA, 0xD2, 0x52, 0x2C, 0x6B, 0x7B},
{0x60, 0x3D, 0xEB, 0x10, 0x15, 0xCA, 0x71, 0xBE, 0x2B, 0x73, 0xAE, 0xF0, 0x85, 0x7D, 0x77, 0x81,
0x1F, 0x35, 0x2C, 0x07, 0x3B, 0x61, 0x08, 0xD7, 0x2D, 0x98, 0x10, 0xA3, 0x09, 0x14, 0xDF,
0xF4}};
-static const unsigned char aes_test_ofb_iv[16] = {
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F};
+static const unsigned char aes_test_ofb_iv[16] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
+ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F};
static const unsigned char aes_test_ofb_pt[64] = {0x6B, 0xC1, 0xBE, 0xE2, 0x2E, 0x40, 0x9F, 0x96,
0xE9, 0x3D, 0x7E, 0x11, 0x73, 0x93, 0x17, 0x2A, 0xAE, 0x2D, 0x8A, 0x57, 0x1E, 0x03, 0xAC, 0x9C,
@@ -1399,25 +1399,25 @@ static const unsigned char aes_test_ofb_ct[3][64] = {
* http://www.faqs.org/rfcs/rfc3686.html
*/
-static const unsigned char aes_test_ctr_key[3][16] = {
- {0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67, 0xCC, 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77, 0xF3,
- 0x9E},
+static const unsigned char aes_test_ctr_key[3][16] = {{0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67,
+ 0xCC, 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77,
+ 0xF3, 0x9E},
{0x7E, 0x24, 0x06, 0x78, 0x17, 0xFA, 0xE0, 0xD7, 0x43, 0xD6, 0xCE, 0x1F, 0x32, 0x53, 0x91,
0x63},
{0x76, 0x91, 0xBE, 0x03, 0x5E, 0x50, 0x20, 0xA8, 0xAC, 0x6E, 0x61, 0x85, 0x29, 0xF9, 0xA0,
0xDC}};
-static const unsigned char aes_test_ctr_nonce_counter[3][16] = {
- {0x00, 0x00, 0x00, 0x30, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
- 0x01},
+static const unsigned char aes_test_ctr_nonce_counter[3][16] = {{0x00, 0x00, 0x00, 0x30, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x6C, 0xB6, 0xDB, 0xC0, 0x54, 0x3B, 0x59, 0xDA, 0x48, 0xD9, 0x0B, 0x00, 0x00, 0x00,
0x01},
{0x00, 0xE0, 0x01, 0x7B, 0x27, 0x77, 0x7F, 0x3F, 0x4A, 0x17, 0x86, 0xF0, 0x00, 0x00, 0x00,
0x01}};
-static const unsigned char aes_test_ctr_pt[3][48] = {
- {0x53, 0x69, 0x6E, 0x67, 0x6C, 0x65, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x6D, 0x73,
- 0x67},
+static const unsigned char aes_test_ctr_pt[3][48] = {{0x53, 0x69, 0x6E, 0x67, 0x6C, 0x65, 0x20,
+ 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x6D,
+ 0x73, 0x67},
{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E,
@@ -1427,9 +1427,9 @@ static const unsigned char aes_test_ctr_pt[3][48] = {
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0x20, 0x21, 0x22, 0x23}};
-static const unsigned char aes_test_ctr_ct[3][48] = {
- {0xE4, 0x09, 0x5D, 0x4F, 0xB7, 0xA7, 0xB3, 0x79, 0x2D, 0x61, 0x75, 0xA3, 0x26, 0x13, 0x11,
- 0xB8},
+static const unsigned char aes_test_ctr_ct[3][48] = {{0xE4, 0x09, 0x5D, 0x4F, 0xB7, 0xA7, 0xB3,
+ 0x79, 0x2D, 0x61, 0x75, 0xA3, 0x26, 0x13,
+ 0x11, 0xB8},
{0x51, 0x04, 0xA1, 0x06, 0x16, 0x8A, 0x72, 0xD9, 0x79, 0x0D, 0x41, 0xEE, 0x8E, 0xDA, 0xD3, 0x88,
0xEB, 0x2E, 0x1E, 0xFC, 0x46, 0xDA, 0x57, 0xC8, 0xFC, 0xE6, 0x30, 0xDF, 0x91, 0x41, 0xBE,
0x28},
@@ -1535,8 +1535,8 @@ int mbedtls_aes_self_test(int verbose) {
mode = i & 1;
if (verbose != 0)
- mbedtls_printf(
- " AES-ECB-%3u (%s): ", keybits, (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
+ mbedtls_printf(" AES-ECB-%3u (%s): ", keybits,
+ (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
memset(buf, 0, 16);
@@ -1588,8 +1588,8 @@ int mbedtls_aes_self_test(int verbose) {
mode = i & 1;
if (verbose != 0)
- mbedtls_printf(
- " AES-CBC-%3u (%s): ", keybits, (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
+ mbedtls_printf(" AES-CBC-%3u (%s): ", keybits,
+ (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
memset(iv, 0, 16);
memset(prv, 0, 16);
@@ -1652,8 +1652,8 @@ int mbedtls_aes_self_test(int verbose) {
mode = i & 1;
if (verbose != 0)
- mbedtls_printf(
- " AES-CFB128-%3u (%s): ", keybits, (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
+ mbedtls_printf(" AES-CFB128-%3u (%s): ", keybits,
+ (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
memcpy(iv, aes_test_cfb128_iv, 16);
memcpy(key, aes_test_cfb128_key[u], keybits / 8);
@@ -1707,8 +1707,8 @@ int mbedtls_aes_self_test(int verbose) {
mode = i & 1;
if (verbose != 0)
- mbedtls_printf(
- " AES-OFB-%3u (%s): ", keybits, (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
+ mbedtls_printf(" AES-OFB-%3u (%s): ", keybits,
+ (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
memcpy(iv, aes_test_ofb_iv, 16);
memcpy(key, aes_test_ofb_key[u], keybits / 8);
@@ -1813,8 +1813,8 @@ int mbedtls_aes_self_test(int verbose) {
mode = i & 1;
if (verbose != 0)
- mbedtls_printf(
- " AES-XTS-128 (%s): ", (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
+ mbedtls_printf(" AES-XTS-128 (%s): ",
+ (mode == MBEDTLS_AES_DECRYPT) ? "dec" : "enc");
memset(key, 0, sizeof(key));
memcpy(key, aes_test_xts_key[u], 32);
diff --git a/extension/httpfs/third_party/mbedtls/library/aria.cpp b/extension/httpfs/third_party/mbedtls/library/aria.cpp
index bfc460fd4ad..678c298516f 100644
--- a/extension/httpfs/third_party/mbedtls/library/aria.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/aria.cpp
@@ -87,7 +87,7 @@ static inline uint32_t aria_p1(uint32_t x) {
#endif /* x86 gnuc */
#endif /* MBEDTLS_HAVE_ASM && GNUC */
#if !defined(ARIA_P1)
-#define ARIA_P1(x) ((((x) >> 8) & 0x00FF00FF) ^ (((x)&0x00FF00FF) << 8))
+#define ARIA_P1(x) ((((x) >> 8) & 0x00FF00FF) ^ (((x) & 0x00FF00FF) << 8))
#endif
/*
@@ -283,8 +283,8 @@ static const uint8_t aria_is2[256] = {0x30, 0x68, 0x99, 0x1B, 0x87, 0xB9, 0x21,
/*
* Helper for key schedule: r = FO( p, k ) ^ x
*/
-static void aria_fo_xor(
- uint32_t r[4], const uint32_t p[4], const uint32_t k[4], const uint32_t x[4]) {
+static void aria_fo_xor(uint32_t r[4], const uint32_t p[4], const uint32_t k[4],
+ const uint32_t x[4]) {
uint32_t a, b, c, d;
a = p[0] ^ k[0];
@@ -304,8 +304,8 @@ static void aria_fo_xor(
/*
* Helper for key schedule: r = FE( p, k ) ^ x
*/
-static void aria_fe_xor(
- uint32_t r[4], const uint32_t p[4], const uint32_t k[4], const uint32_t x[4]) {
+static void aria_fe_xor(uint32_t r[4], const uint32_t p[4], const uint32_t k[4],
+ const uint32_t x[4]) {
uint32_t a, b, c, d;
a = p[0] ^ k[0];
@@ -352,8 +352,8 @@ static void aria_rot128(uint32_t r[4], const uint32_t a[4], const uint32_t b[4],
/*
* Set encryption key
*/
-int mbedtls_aria_setkey_enc(
- mbedtls_aria_context* ctx, const unsigned char* key, unsigned int keybits) {
+int mbedtls_aria_setkey_enc(mbedtls_aria_context* ctx, const unsigned char* key,
+ unsigned int keybits) {
/* round constant masks */
const uint32_t rc[3][4] = {{0xB7C17C51, 0x940A2227, 0xE8AB13FE, 0xE06E9AFA},
{0xCC4AB16D, 0x20C8219E, 0xD5B128FF, 0xB0E25DEF},
@@ -411,8 +411,8 @@ int mbedtls_aria_setkey_enc(
/*
* Set decryption key
*/
-int mbedtls_aria_setkey_dec(
- mbedtls_aria_context* ctx, const unsigned char* key, unsigned int keybits) {
+int mbedtls_aria_setkey_dec(mbedtls_aria_context* ctx, const unsigned char* key,
+ unsigned int keybits) {
int i, j, k, ret;
ARIA_VALIDATE_RET(ctx != NULL);
ARIA_VALIDATE_RET(key != NULL);
@@ -906,8 +906,8 @@ int mbedtls_aria_self_test(int verbose) {
memcpy(iv, aria_test2_iv, MBEDTLS_ARIA_BLOCKSIZE);
memset(buf, 0xAA, sizeof(buf));
j = 0;
- mbedtls_aria_crypt_cfb128(
- &ctx, MBEDTLS_ARIA_DECRYPT, 48, &j, iv, aria_test2_cfb_ct[i], buf);
+ mbedtls_aria_crypt_cfb128(&ctx, MBEDTLS_ARIA_DECRYPT, 48, &j, iv, aria_test2_cfb_ct[i],
+ buf);
if (memcmp(buf, aria_test2_pt, 48) != 0)
ARIA_SELF_TEST_IF_FAIL;
}
diff --git a/extension/httpfs/third_party/mbedtls/library/asn1parse.cpp b/extension/httpfs/third_party/mbedtls/library/asn1parse.cpp
index be18c6b07c3..f1d4af27c66 100644
--- a/extension/httpfs/third_party/mbedtls/library/asn1parse.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/asn1parse.cpp
@@ -185,8 +185,8 @@ int mbedtls_asn1_get_mpi(unsigned char** p, const unsigned char* end, mbedtls_mp
}
#endif /* MBEDTLS_BIGNUM_C */
-int mbedtls_asn1_get_bitstring(
- unsigned char** p, const unsigned char* end, mbedtls_asn1_bitstring* bs) {
+int mbedtls_asn1_get_bitstring(unsigned char** p, const unsigned char* end,
+ mbedtls_asn1_bitstring* bs) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
/* Certificate type is a single byte bitstring */
@@ -226,8 +226,8 @@ int mbedtls_asn1_traverse_sequence_of(unsigned char** p, const unsigned char* en
size_t len;
/* Get main sequence tag */
- if ((ret = mbedtls_asn1_get_tag(
- p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
+ if ((ret = mbedtls_asn1_get_tag(p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
return (ret);
}
@@ -315,21 +315,21 @@ static int asn1_get_sequence_of_cb(void* ctx, int tag, unsigned char* start, siz
/*
* Parses and splits an ASN.1 "SEQUENCE OF "
*/
-int mbedtls_asn1_get_sequence_of(
- unsigned char** p, const unsigned char* end, mbedtls_asn1_sequence* cur, int tag) {
+int mbedtls_asn1_get_sequence_of(unsigned char** p, const unsigned char* end,
+ mbedtls_asn1_sequence* cur, int tag) {
asn1_get_sequence_of_cb_ctx_t cb_ctx = {tag, cur};
memset(cur, 0, sizeof(mbedtls_asn1_sequence));
- return (mbedtls_asn1_traverse_sequence_of(
- p, end, 0xFF, tag, 0, 0, asn1_get_sequence_of_cb, &cb_ctx));
+ return (mbedtls_asn1_traverse_sequence_of(p, end, 0xFF, tag, 0, 0, asn1_get_sequence_of_cb,
+ &cb_ctx));
}
-int mbedtls_asn1_get_alg(
- unsigned char** p, const unsigned char* end, mbedtls_asn1_buf* alg, mbedtls_asn1_buf* params) {
+int mbedtls_asn1_get_alg(unsigned char** p, const unsigned char* end, mbedtls_asn1_buf* alg,
+ mbedtls_asn1_buf* params) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t len;
- if ((ret = mbedtls_asn1_get_tag(
- p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
+ if ((ret = mbedtls_asn1_get_tag(p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
return (ret);
if ((end - *p) < 1)
@@ -399,8 +399,8 @@ void mbedtls_asn1_free_named_data_list(mbedtls_asn1_named_data** head) {
}
}
-const mbedtls_asn1_named_data* mbedtls_asn1_find_named_data(
- const mbedtls_asn1_named_data* list, const char* oid, size_t len) {
+const mbedtls_asn1_named_data* mbedtls_asn1_find_named_data(const mbedtls_asn1_named_data* list,
+ const char* oid, size_t len) {
while (list != NULL) {
if (list->oid.len == len && memcmp(list->oid.p, oid, len) == 0) {
break;
diff --git a/extension/httpfs/third_party/mbedtls/library/base64.cpp b/extension/httpfs/third_party/mbedtls/library/base64.cpp
index 31870bcd7de..cea5cedebc0 100644
--- a/extension/httpfs/third_party/mbedtls/library/base64.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/base64.cpp
@@ -36,13 +36,13 @@
#endif /* MBEDTLS_PLATFORM_C */
#endif /* MBEDTLS_SELF_TEST */
-#define BASE64_SIZE_T_MAX ((size_t)-1) /* SIZE_T_MAX is not standard */
+#define BASE64_SIZE_T_MAX ((size_t) - 1) /* SIZE_T_MAX is not standard */
/*
* Encode a buffer into base64 format
*/
-int mbedtls_base64_encode(
- unsigned char* dst, size_t dlen, size_t* olen, const unsigned char* src, size_t slen) {
+int mbedtls_base64_encode(unsigned char* dst, size_t dlen, size_t* olen, const unsigned char* src,
+ size_t slen) {
size_t i, n;
int C1, C2, C3;
unsigned char* p;
@@ -103,8 +103,8 @@ int mbedtls_base64_encode(
/*
* Decode a base64-formatted buffer
*/
-int mbedtls_base64_decode(
- unsigned char* dst, size_t dlen, size_t* olen, const unsigned char* src, size_t slen) {
+int mbedtls_base64_decode(unsigned char* dst, size_t dlen, size_t* olen, const unsigned char* src,
+ size_t slen) {
size_t i; /* index in source */
size_t n; /* number of digits or trailing = in source */
uint32_t x; /* value accumulator */
diff --git a/extension/httpfs/third_party/mbedtls/library/bignum.cpp b/extension/httpfs/third_party/mbedtls/library/bignum.cpp
index 13fb1d1a9e8..39b733381db 100644
--- a/extension/httpfs/third_party/mbedtls/library/bignum.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/bignum.cpp
@@ -63,7 +63,7 @@
#define biL (ciL << 3) /* bits in limb */
#define biH (ciL << 2) /* half limb size */
-#define MPI_SIZE_T_MAX ((size_t)-1) /* SIZE_T_MAX is not standard */
+#define MPI_SIZE_T_MAX ((size_t) - 1) /* SIZE_T_MAX is not standard */
/*
* Convert between bits/chars and number of limbs
@@ -482,8 +482,8 @@ static int mpi_write_hlp(mbedtls_mpi* X, int radix, char** p, const size_t bufle
/*
* Export into an ASCII string
*/
-int mbedtls_mpi_write_string(
- const mbedtls_mpi* X, int radix, char* buf, size_t buflen, size_t* olen) {
+int mbedtls_mpi_write_string(const mbedtls_mpi* X, int radix, char* buf, size_t buflen,
+ size_t* olen) {
int ret = 0;
size_t n;
char* p;
@@ -1113,8 +1113,8 @@ int mbedtls_mpi_add_abs(mbedtls_mpi* X, const mbedtls_mpi* A, const mbedtls_mpi*
* \return 1 if `l < r`.
* 0 if `l >= r`.
*/
-static mbedtls_mpi_uint mpi_sub_hlp(
- size_t n, mbedtls_mpi_uint* d, const mbedtls_mpi_uint* l, const mbedtls_mpi_uint* r) {
+static mbedtls_mpi_uint mpi_sub_hlp(size_t n, mbedtls_mpi_uint* d, const mbedtls_mpi_uint* l,
+ const mbedtls_mpi_uint* r) {
size_t i;
mbedtls_mpi_uint c = 0, t, z;
@@ -1439,8 +1439,8 @@ int mbedtls_mpi_mul_int(mbedtls_mpi* X, const mbedtls_mpi* A, mbedtls_mpi_uint b
* Unsigned integer divide - double mbedtls_mpi_uint dividend, u1/u0, and
* mbedtls_mpi_uint divisor, d
*/
-static mbedtls_mpi_uint mbedtls_int_div_int(
- mbedtls_mpi_uint u1, mbedtls_mpi_uint u0, mbedtls_mpi_uint d, mbedtls_mpi_uint* r) {
+static mbedtls_mpi_uint mbedtls_int_div_int(mbedtls_mpi_uint u1, mbedtls_mpi_uint u0,
+ mbedtls_mpi_uint d, mbedtls_mpi_uint* r) {
#if defined(MBEDTLS_HAVE_UDBL)
mbedtls_t_udbl dividend, quotient;
#else
@@ -1533,8 +1533,8 @@ static mbedtls_mpi_uint mbedtls_int_div_int(
/*
* Division by mbedtls_mpi: A = Q * B + R (HAC 14.20)
*/
-int mbedtls_mpi_div_mpi(
- mbedtls_mpi* Q, mbedtls_mpi* R, const mbedtls_mpi* A, const mbedtls_mpi* B) {
+int mbedtls_mpi_div_mpi(mbedtls_mpi* Q, mbedtls_mpi* R, const mbedtls_mpi* A,
+ const mbedtls_mpi* B) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t i, n, t, k;
mbedtls_mpi X, Y, Z, T1, T2;
@@ -1837,8 +1837,8 @@ static void mpi_montmul(mbedtls_mpi* A, const mbedtls_mpi* B, const mbedtls_mpi*
*
* See mpi_montmul() regarding constraints and guarantees on the parameters.
*/
-static void mpi_montred(
- mbedtls_mpi* A, const mbedtls_mpi* N, mbedtls_mpi_uint mm, const mbedtls_mpi* T) {
+static void mpi_montred(mbedtls_mpi* A, const mbedtls_mpi* N, mbedtls_mpi_uint mm,
+ const mbedtls_mpi* T) {
mbedtls_mpi_uint z = 1;
mbedtls_mpi U;
@@ -2216,8 +2216,8 @@ int mbedtls_mpi_gcd(mbedtls_mpi* G, const mbedtls_mpi* A, const mbedtls_mpi* B)
* The size and sign of X are unchanged.
* n_bytes must not be 0.
*/
-static int mpi_fill_random_internal(
- mbedtls_mpi* X, size_t n_bytes, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
+static int mpi_fill_random_internal(mbedtls_mpi* X, size_t n_bytes,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
const size_t limbs = CHARS_TO_LIMBS(n_bytes);
const size_t overhead = (limbs * ciL) - n_bytes;
@@ -2241,8 +2241,8 @@ static int mpi_fill_random_internal(
* regardless of the platform endianness (useful when f_rng is actually
* deterministic, eg for tests).
*/
-int mbedtls_mpi_fill_random(
- mbedtls_mpi* X, size_t size, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
+int mbedtls_mpi_fill_random(mbedtls_mpi* X, size_t size,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t const limbs = CHARS_TO_LIMBS(size);
@@ -2473,8 +2473,8 @@ static int mpi_check_small_factors(const mbedtls_mpi* X) {
/*
* Miller-Rabin pseudo-primality test (HAC 4.24)
*/
-static int mpi_miller_rabin(
- const mbedtls_mpi* X, size_t rounds, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
+static int mpi_miller_rabin(const mbedtls_mpi* X, size_t rounds,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
int ret, count;
size_t i, j, k, s;
mbedtls_mpi W, R, T, A, RR;
@@ -2562,8 +2562,8 @@ static int mpi_miller_rabin(
/*
* Pseudo-primality test: small factors, then Miller-Rabin
*/
-int mbedtls_mpi_is_prime_ext(
- const mbedtls_mpi* X, int rounds, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
+int mbedtls_mpi_is_prime_ext(const mbedtls_mpi* X, int rounds,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
mbedtls_mpi XX;
MPI_VALIDATE_RET(X != NULL);
@@ -2722,8 +2722,8 @@ int mbedtls_mpi_gen_prime(mbedtls_mpi* X, size_t nbits, int flags,
#define GCD_PAIR_COUNT 3
-static const int gcd_pairs[GCD_PAIR_COUNT][3] = {
- {693, 609, 21}, {1764, 868, 28}, {768454923, 542167814, 1}};
+static const int gcd_pairs[GCD_PAIR_COUNT][3] = {{693, 609, 21}, {1764, 868, 28},
+ {768454923, 542167814, 1}};
/*
* Checkup routine
diff --git a/extension/httpfs/third_party/mbedtls/library/camellia.cpp b/extension/httpfs/third_party/mbedtls/library/camellia.cpp
index 15ebd26ec8a..3827792d937 100644
--- a/extension/httpfs/third_party/mbedtls/library/camellia.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/camellia.cpp
@@ -199,10 +199,10 @@ static const signed char transposes[2][20] = {
#define SHIFT_AND_PLACE(INDEX, OFFSET) \
{ \
- TK[0] = KC[(OFFSET)*4 + 0]; \
- TK[1] = KC[(OFFSET)*4 + 1]; \
- TK[2] = KC[(OFFSET)*4 + 2]; \
- TK[3] = KC[(OFFSET)*4 + 3]; \
+ TK[0] = KC[(OFFSET) * 4 + 0]; \
+ TK[1] = KC[(OFFSET) * 4 + 1]; \
+ TK[2] = KC[(OFFSET) * 4 + 2]; \
+ TK[3] = KC[(OFFSET) * 4 + 3]; \
\
for (i = 1; i <= 4; i++) \
if (shifts[(INDEX)][(OFFSET)][i - 1]) \
@@ -248,8 +248,8 @@ void mbedtls_camellia_free(mbedtls_camellia_context* ctx) {
/*
* Camellia key schedule (encryption)
*/
-int mbedtls_camellia_setkey_enc(
- mbedtls_camellia_context* ctx, const unsigned char* key, unsigned int keybits) {
+int mbedtls_camellia_setkey_enc(mbedtls_camellia_context* ctx, const unsigned char* key,
+ unsigned int keybits) {
int idx;
size_t i;
uint32_t* RK;
@@ -361,8 +361,8 @@ int mbedtls_camellia_setkey_enc(
/*
* Camellia key schedule (decryption)
*/
-int mbedtls_camellia_setkey_dec(
- mbedtls_camellia_context* ctx, const unsigned char* key, unsigned int keybits) {
+int mbedtls_camellia_setkey_dec(mbedtls_camellia_context* ctx, const unsigned char* key,
+ unsigned int keybits) {
int idx, ret;
size_t i;
mbedtls_camellia_context cty;
@@ -658,9 +658,9 @@ static const unsigned char camellia_test_ecb_cipher[3][CAMELLIA_TESTS_ECB][16] =
#if defined(MBEDTLS_CIPHER_MODE_CBC)
#define CAMELLIA_TESTS_CBC 3
-static const unsigned char camellia_test_cbc_key[3][32] = {
- {0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE, 0xD2, 0xA6, 0xAB, 0xF7, 0x15, 0x88, 0x09, 0xCF, 0x4F,
- 0x3C},
+static const unsigned char camellia_test_cbc_key[3][32] = {{0x2B, 0x7E, 0x15, 0x16, 0x28, 0xAE,
+ 0xD2, 0xA6, 0xAB, 0xF7, 0x15, 0x88,
+ 0x09, 0xCF, 0x4F, 0x3C},
{0x8E, 0x73, 0xB0, 0xF7, 0xDA, 0x0E, 0x64, 0x52, 0xC8, 0x10, 0xF3, 0x2B, 0x80, 0x90, 0x79, 0xE5,
0x62, 0xF8, 0xEA, 0xD2, 0x52, 0x2C, 0x6B, 0x7B},
{0x60, 0x3D, 0xEB, 0x10, 0x15, 0xCA, 0x71, 0xBE, 0x2B, 0x73, 0xAE, 0xF0, 0x85, 0x7D, 0x77, 0x81,
@@ -709,9 +709,9 @@ static const unsigned char camellia_test_cbc_cipher[3][CAMELLIA_TESTS_CBC][16] =
* http://www.faqs.org/rfcs/rfc5528.html
*/
-static const unsigned char camellia_test_ctr_key[3][16] = {
- {0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10, 0x67, 0xCC, 0x4B, 0xF7, 0xA5, 0x76, 0x55, 0x77, 0xF3,
- 0x9E},
+static const unsigned char camellia_test_ctr_key[3][16] = {{0xAE, 0x68, 0x52, 0xF8, 0x12, 0x10,
+ 0x67, 0xCC, 0x4B, 0xF7, 0xA5, 0x76,
+ 0x55, 0x77, 0xF3, 0x9E},
{0x7E, 0x24, 0x06, 0x78, 0x17, 0xFA, 0xE0, 0xD7, 0x43, 0xD6, 0xCE, 0x1F, 0x32, 0x53, 0x91,
0x63},
{0x76, 0x91, 0xBE, 0x03, 0x5E, 0x50, 0x20, 0xA8, 0xAC, 0x6E, 0x61, 0x85, 0x29, 0xF9, 0xA0,
@@ -725,9 +725,9 @@ static const unsigned char camellia_test_ctr_nonce_counter[3][16] = {
{0x00, 0xE0, 0x01, 0x7B, 0x27, 0x77, 0x7F, 0x3F, 0x4A, 0x17, 0x86, 0xF0, 0x00, 0x00, 0x00,
0x01}};
-static const unsigned char camellia_test_ctr_pt[3][48] = {
- {0x53, 0x69, 0x6E, 0x67, 0x6C, 0x65, 0x20, 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20, 0x6D, 0x73,
- 0x67},
+static const unsigned char camellia_test_ctr_pt[3][48] = {{0x53, 0x69, 0x6E, 0x67, 0x6C, 0x65, 0x20,
+ 0x62, 0x6C, 0x6F, 0x63, 0x6B, 0x20,
+ 0x6D, 0x73, 0x67},
{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E,
@@ -737,9 +737,9 @@ static const unsigned char camellia_test_ctr_pt[3][48] = {
0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E,
0x1F, 0x20, 0x21, 0x22, 0x23}};
-static const unsigned char camellia_test_ctr_ct[3][48] = {
- {0xD0, 0x9D, 0xC2, 0x9A, 0x82, 0x14, 0x61, 0x9A, 0x20, 0x87, 0x7C, 0x76, 0xDB, 0x1F, 0x0B,
- 0x3F},
+static const unsigned char camellia_test_ctr_ct[3][48] = {{0xD0, 0x9D, 0xC2, 0x9A, 0x82, 0x14, 0x61,
+ 0x9A, 0x20, 0x87, 0x7C, 0x76, 0xDB,
+ 0x1F, 0x0B, 0x3F},
{0xDB, 0xF3, 0xC7, 0x8D, 0xC0, 0x83, 0x96, 0xD4, 0xDA, 0x7C, 0x90, 0x77, 0x65, 0xBB, 0xCB, 0x44,
0x2B, 0x8E, 0x8E, 0x0F, 0x31, 0xF0, 0xDC, 0xA7, 0x2C, 0x74, 0x17, 0xE3, 0x53, 0x60, 0xE0,
0x48},
@@ -871,8 +871,8 @@ int mbedtls_camellia_self_test(int verbose) {
v = i & 1;
if (verbose != 0)
- mbedtls_printf(
- " CAMELLIA-CTR-128 (%s): ", (v == MBEDTLS_CAMELLIA_DECRYPT) ? "dec" : "enc");
+ mbedtls_printf(" CAMELLIA-CTR-128 (%s): ",
+ (v == MBEDTLS_CAMELLIA_DECRYPT) ? "dec" : "enc");
memcpy(nonce_counter, camellia_test_ctr_nonce_counter[u], 16);
memcpy(key, camellia_test_ctr_key[u], 16);
diff --git a/extension/httpfs/third_party/mbedtls/library/cipher.cpp b/extension/httpfs/third_party/mbedtls/library/cipher.cpp
index ecf3b1d1f28..22a9b08c7f3 100644
--- a/extension/httpfs/third_party/mbedtls/library/cipher.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/cipher.cpp
@@ -119,8 +119,8 @@ const mbedtls_cipher_info_t* mbedtls_cipher_info_from_string(const char* cipher_
return (NULL);
}
-const mbedtls_cipher_info_t* mbedtls_cipher_info_from_values(
- const mbedtls_cipher_id_t cipher_id, int key_bitlen, const mbedtls_cipher_mode_t mode) {
+const mbedtls_cipher_info_t* mbedtls_cipher_info_from_values(const mbedtls_cipher_id_t cipher_id,
+ int key_bitlen, const mbedtls_cipher_mode_t mode) {
const mbedtls_cipher_definition_t* def;
for (def = mbedtls_cipher_definitions; def->info != NULL; def++)
@@ -200,8 +200,8 @@ int mbedtls_cipher_setup(mbedtls_cipher_context_t* ctx, const mbedtls_cipher_inf
}
#if defined(MBEDTLS_USE_PSA_CRYPTO)
-int mbedtls_cipher_setup_psa(
- mbedtls_cipher_context_t* ctx, const mbedtls_cipher_info_t* cipher_info, size_t taglen) {
+int mbedtls_cipher_setup_psa(mbedtls_cipher_context_t* ctx,
+ const mbedtls_cipher_info_t* cipher_info, size_t taglen) {
psa_algorithm_t alg;
mbedtls_cipher_context_psa* cipher_psa;
@@ -408,8 +408,8 @@ int mbedtls_cipher_reset(mbedtls_cipher_context_t* ctx) {
}
#if defined(MBEDTLS_GCM_C) || defined(MBEDTLS_CHACHAPOLY_C)
-int mbedtls_cipher_update_ad(
- mbedtls_cipher_context_t* ctx, const unsigned char* ad, size_t ad_len) {
+int mbedtls_cipher_update_ad(mbedtls_cipher_context_t* ctx, const unsigned char* ad,
+ size_t ad_len) {
CIPHER_VALIDATE_RET(ctx != NULL);
CIPHER_VALIDATE_RET(ad_len == 0 || ad != NULL);
if (ctx->cipher_info == NULL)
@@ -443,8 +443,8 @@ int mbedtls_cipher_update_ad(
if (result != 0)
return (result);
- return (mbedtls_chachapoly_update_aad(
- (mbedtls_chachapoly_context*)ctx->cipher_ctx, ad, ad_len));
+ return (mbedtls_chachapoly_update_aad((mbedtls_chachapoly_context*)ctx->cipher_ctx, ad,
+ ad_len));
}
#endif
@@ -485,8 +485,8 @@ int mbedtls_cipher_update(mbedtls_cipher_context_t* ctx, const unsigned char* in
*olen = ilen;
- if (0 != (ret = ctx->cipher_info->base->ecb_func(
- ctx->cipher_ctx, ctx->operation, input, output))) {
+ if (0 != (ret = ctx->cipher_info->base->ecb_func(ctx->cipher_ctx, ctx->operation, input,
+ output))) {
return (ret);
}
@@ -495,23 +495,23 @@ int mbedtls_cipher_update(mbedtls_cipher_context_t* ctx, const unsigned char* in
#if defined(MBEDTLS_GCM_C)
if (ctx->cipher_info->mode == MBEDTLS_MODE_GCM) {
- return (mbedtls_gcm_update(
- (mbedtls_gcm_context*)ctx->cipher_ctx, input, ilen, output, ilen, olen));
+ return (mbedtls_gcm_update((mbedtls_gcm_context*)ctx->cipher_ctx, input, ilen, output, ilen,
+ olen));
}
#endif
#if defined(MBEDTLS_CCM_C)
if (ctx->cipher_info->mode == MBEDTLS_MODE_CCM_STAR_NO_TAG) {
- return (mbedtls_ccm_update(
- (mbedtls_ccm_context*)ctx->cipher_ctx, input, ilen, output, ilen, olen));
+ return (mbedtls_ccm_update((mbedtls_ccm_context*)ctx->cipher_ctx, input, ilen, output, ilen,
+ olen));
}
#endif
#if defined(MBEDTLS_CHACHAPOLY_C)
if (ctx->cipher_info->type == MBEDTLS_CIPHER_CHACHA20_POLY1305) {
*olen = ilen;
- return (mbedtls_chachapoly_update(
- (mbedtls_chachapoly_context*)ctx->cipher_ctx, ilen, input, output));
+ return (mbedtls_chachapoly_update((mbedtls_chachapoly_context*)ctx->cipher_ctx, ilen, input,
+ output));
}
#endif
@@ -581,8 +581,8 @@ int mbedtls_cipher_update(mbedtls_cipher_context_t* ctx, const unsigned char* in
* Process remaining full blocks
*/
if (ilen) {
- if (0 != (ret = ctx->cipher_info->base->cbc_func(
- ctx->cipher_ctx, ctx->operation, ilen, ctx->iv, input, output))) {
+ if (0 != (ret = ctx->cipher_info->base->cbc_func(ctx->cipher_ctx, ctx->operation, ilen,
+ ctx->iv, input, output))) {
return (ret);
}
@@ -608,8 +608,8 @@ int mbedtls_cipher_update(mbedtls_cipher_context_t* ctx, const unsigned char* in
#if defined(MBEDTLS_CIPHER_MODE_OFB)
if (ctx->cipher_info->mode == MBEDTLS_MODE_OFB) {
- if (0 != (ret = ctx->cipher_info->base->ofb_func(
- ctx->cipher_ctx, ilen, &ctx->unprocessed_len, ctx->iv, input, output))) {
+ if (0 != (ret = ctx->cipher_info->base->ofb_func(ctx->cipher_ctx, ilen,
+ &ctx->unprocessed_len, ctx->iv, input, output))) {
return (ret);
}
@@ -639,8 +639,8 @@ int mbedtls_cipher_update(mbedtls_cipher_context_t* ctx, const unsigned char* in
return (MBEDTLS_ERR_CIPHER_FEATURE_UNAVAILABLE);
}
- ret = ctx->cipher_info->base->xts_func(
- ctx->cipher_ctx, ctx->operation, ilen, ctx->iv, input, output);
+ ret = ctx->cipher_info->base->xts_func(ctx->cipher_ctx, ctx->operation, ilen, ctx->iv,
+ input, output);
if (ret != 0) {
return (ret);
}
@@ -869,8 +869,8 @@ int mbedtls_cipher_finish(mbedtls_cipher_context_t* ctx, unsigned char* output,
return (0);
}
- ctx->add_padding(
- ctx->unprocessed_data, mbedtls_cipher_get_iv_size(ctx), ctx->unprocessed_len);
+ ctx->add_padding(ctx->unprocessed_data, mbedtls_cipher_get_iv_size(ctx),
+ ctx->unprocessed_len);
} else if (mbedtls_cipher_get_block_size(ctx) != ctx->unprocessed_len) {
/*
* For decrypt operations, expect a full block,
@@ -986,8 +986,8 @@ int mbedtls_cipher_write_tag(mbedtls_cipher_context_t* ctx, unsigned char* tag,
size_t output_length;
/* The code here doesn't yet support alternative implementations
* that can delay up to a block of output. */
- return (mbedtls_gcm_finish(
- (mbedtls_gcm_context*)ctx->cipher_ctx, NULL, 0, &output_length, tag, tag_len));
+ return (mbedtls_gcm_finish((mbedtls_gcm_context*)ctx->cipher_ctx, NULL, 0, &output_length,
+ tag, tag_len));
}
#endif
@@ -1004,8 +1004,8 @@ int mbedtls_cipher_write_tag(mbedtls_cipher_context_t* ctx, unsigned char* tag,
return (0);
}
-int mbedtls_cipher_check_tag(
- mbedtls_cipher_context_t* ctx, const unsigned char* tag, size_t tag_len) {
+int mbedtls_cipher_check_tag(mbedtls_cipher_context_t* ctx, const unsigned char* tag,
+ size_t tag_len) {
unsigned char check_tag[16];
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
@@ -1201,8 +1201,8 @@ static int mbedtls_cipher_aead_encrypt(mbedtls_cipher_context_t* ctx, const unsi
#if defined(MBEDTLS_CCM_C)
if (MBEDTLS_MODE_CCM == ctx->cipher_info->mode) {
*olen = ilen;
- return (mbedtls_ccm_encrypt_and_tag(
- ctx->cipher_ctx, ilen, iv, iv_len, ad, ad_len, input, output, tag, tag_len));
+ return (mbedtls_ccm_encrypt_and_tag(ctx->cipher_ctx, ilen, iv, iv_len, ad, ad_len, input,
+ output, tag, tag_len));
}
#endif /* MBEDTLS_CCM_C */
#if defined(MBEDTLS_CHACHAPOLY_C)
@@ -1213,8 +1213,8 @@ static int mbedtls_cipher_aead_encrypt(mbedtls_cipher_context_t* ctx, const unsi
}
*olen = ilen;
- return (mbedtls_chachapoly_encrypt_and_tag(
- ctx->cipher_ctx, ilen, iv, ad, ad_len, input, output, tag));
+ return (mbedtls_chachapoly_encrypt_and_tag(ctx->cipher_ctx, ilen, iv, ad, ad_len, input,
+ output, tag));
}
#endif /* MBEDTLS_CHACHAPOLY_C */
@@ -1274,8 +1274,8 @@ static int mbedtls_cipher_aead_decrypt(mbedtls_cipher_context_t* ctx, const unsi
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
*olen = ilen;
- ret = mbedtls_ccm_auth_decrypt(
- ctx->cipher_ctx, ilen, iv, iv_len, ad, ad_len, input, output, tag, tag_len);
+ ret = mbedtls_ccm_auth_decrypt(ctx->cipher_ctx, ilen, iv, iv_len, ad, ad_len, input, output,
+ tag, tag_len);
if (ret == MBEDTLS_ERR_CCM_AUTH_FAILED)
ret = MBEDTLS_ERR_CIPHER_AUTH_FAILED;
@@ -1293,8 +1293,8 @@ static int mbedtls_cipher_aead_decrypt(mbedtls_cipher_context_t* ctx, const unsi
}
*olen = ilen;
- ret = mbedtls_chachapoly_auth_decrypt(
- ctx->cipher_ctx, ilen, iv, ad, ad_len, tag, input, output);
+ ret = mbedtls_chachapoly_auth_decrypt(ctx->cipher_ctx, ilen, iv, ad, ad_len, tag, input,
+ output);
if (ret == MBEDTLS_ERR_CHACHAPOLY_AUTH_FAILED)
ret = MBEDTLS_ERR_CIPHER_AUTH_FAILED;
@@ -1347,8 +1347,8 @@ int mbedtls_cipher_auth_encrypt_ext(mbedtls_cipher_context_t* ctx, const unsigne
if (output_len < ilen + tag_len)
return (MBEDTLS_ERR_CIPHER_BAD_INPUT_DATA);
- int ret = mbedtls_cipher_aead_encrypt(
- ctx, iv, iv_len, ad, ad_len, input, ilen, output, olen, output + ilen, tag_len);
+ int ret = mbedtls_cipher_aead_encrypt(ctx, iv, iv_len, ad, ad_len, input, ilen, output, olen,
+ output + ilen, tag_len);
*olen += tag_len;
return (ret);
#else
diff --git a/extension/httpfs/third_party/mbedtls/library/cipher_wrap.cpp b/extension/httpfs/third_party/mbedtls/library/cipher_wrap.cpp
index 3b7b86399ea..7de01cc3ac6 100644
--- a/extension/httpfs/third_party/mbedtls/library/cipher_wrap.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/cipher_wrap.cpp
@@ -112,8 +112,8 @@ static void ccm_ctx_free(void* ctx) {
#if defined(MBEDTLS_AES_C)
-static int aes_crypt_ecb_wrap(
- void* ctx, mbedtls_operation_t operation, const unsigned char* input, unsigned char* output) {
+static int aes_crypt_ecb_wrap(void* ctx, mbedtls_operation_t operation, const unsigned char* input,
+ unsigned char* output) {
return mbedtls_aes_crypt_ecb((mbedtls_aes_context*)ctx, operation, input, output);
}
@@ -127,8 +127,8 @@ static int aes_crypt_cbc_wrap(void* ctx, mbedtls_operation_t operation, size_t l
#if defined(MBEDTLS_CIPHER_MODE_CFB)
static int aes_crypt_cfb128_wrap(void* ctx, mbedtls_operation_t operation, size_t length,
size_t* iv_off, unsigned char* iv, const unsigned char* input, unsigned char* output) {
- return mbedtls_aes_crypt_cfb128(
- (mbedtls_aes_context*)ctx, operation, length, iv_off, iv, input, output);
+ return mbedtls_aes_crypt_cfb128((mbedtls_aes_context*)ctx, operation, length, iv_off, iv, input,
+ output);
}
#endif /* MBEDTLS_CIPHER_MODE_CFB */
@@ -143,8 +143,8 @@ static int aes_crypt_ofb_wrap(void* ctx, size_t length, size_t* iv_off, unsigned
static int aes_crypt_ctr_wrap(void* ctx, size_t length, size_t* nc_off,
unsigned char* nonce_counter, unsigned char* stream_block, const unsigned char* input,
unsigned char* output) {
- return mbedtls_aes_crypt_ctr(
- (mbedtls_aes_context*)ctx, length, nc_off, nonce_counter, stream_block, input, output);
+ return mbedtls_aes_crypt_ctr((mbedtls_aes_context*)ctx, length, nc_off, nonce_counter,
+ stream_block, input, output);
}
#endif /* MBEDTLS_CIPHER_MODE_CTR */
@@ -214,57 +214,57 @@ static const mbedtls_cipher_base_t aes_info = {MBEDTLS_CIPHER_ID_AES, aes_crypt_
#endif
aes_setkey_enc_wrap, aes_setkey_dec_wrap, aes_ctx_alloc, aes_ctx_free};
-static const mbedtls_cipher_info_t aes_128_ecb_info = {
- MBEDTLS_CIPHER_AES_128_ECB, MBEDTLS_MODE_ECB, 128, "AES-128-ECB", 0, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_128_ecb_info = {MBEDTLS_CIPHER_AES_128_ECB, MBEDTLS_MODE_ECB,
+ 128, "AES-128-ECB", 0, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_192_ecb_info = {
- MBEDTLS_CIPHER_AES_192_ECB, MBEDTLS_MODE_ECB, 192, "AES-192-ECB", 0, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_192_ecb_info = {MBEDTLS_CIPHER_AES_192_ECB, MBEDTLS_MODE_ECB,
+ 192, "AES-192-ECB", 0, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_256_ecb_info = {
- MBEDTLS_CIPHER_AES_256_ECB, MBEDTLS_MODE_ECB, 256, "AES-256-ECB", 0, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_256_ecb_info = {MBEDTLS_CIPHER_AES_256_ECB, MBEDTLS_MODE_ECB,
+ 256, "AES-256-ECB", 0, 0, 16, &aes_info};
#if defined(MBEDTLS_CIPHER_MODE_CBC)
-static const mbedtls_cipher_info_t aes_128_cbc_info = {
- MBEDTLS_CIPHER_AES_128_CBC, MBEDTLS_MODE_CBC, 128, "AES-128-CBC", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_128_cbc_info = {MBEDTLS_CIPHER_AES_128_CBC, MBEDTLS_MODE_CBC,
+ 128, "AES-128-CBC", 16, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_192_cbc_info = {
- MBEDTLS_CIPHER_AES_192_CBC, MBEDTLS_MODE_CBC, 192, "AES-192-CBC", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_192_cbc_info = {MBEDTLS_CIPHER_AES_192_CBC, MBEDTLS_MODE_CBC,
+ 192, "AES-192-CBC", 16, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_256_cbc_info = {
- MBEDTLS_CIPHER_AES_256_CBC, MBEDTLS_MODE_CBC, 256, "AES-256-CBC", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_256_cbc_info = {MBEDTLS_CIPHER_AES_256_CBC, MBEDTLS_MODE_CBC,
+ 256, "AES-256-CBC", 16, 0, 16, &aes_info};
#endif /* MBEDTLS_CIPHER_MODE_CBC */
#if defined(MBEDTLS_CIPHER_MODE_CFB)
-static const mbedtls_cipher_info_t aes_128_cfb128_info = {
- MBEDTLS_CIPHER_AES_128_CFB128, MBEDTLS_MODE_CFB, 128, "AES-128-CFB128", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_128_cfb128_info = {MBEDTLS_CIPHER_AES_128_CFB128,
+ MBEDTLS_MODE_CFB, 128, "AES-128-CFB128", 16, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_192_cfb128_info = {
- MBEDTLS_CIPHER_AES_192_CFB128, MBEDTLS_MODE_CFB, 192, "AES-192-CFB128", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_192_cfb128_info = {MBEDTLS_CIPHER_AES_192_CFB128,
+ MBEDTLS_MODE_CFB, 192, "AES-192-CFB128", 16, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_256_cfb128_info = {
- MBEDTLS_CIPHER_AES_256_CFB128, MBEDTLS_MODE_CFB, 256, "AES-256-CFB128", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_256_cfb128_info = {MBEDTLS_CIPHER_AES_256_CFB128,
+ MBEDTLS_MODE_CFB, 256, "AES-256-CFB128", 16, 0, 16, &aes_info};
#endif /* MBEDTLS_CIPHER_MODE_CFB */
#if defined(MBEDTLS_CIPHER_MODE_OFB)
-static const mbedtls_cipher_info_t aes_128_ofb_info = {
- MBEDTLS_CIPHER_AES_128_OFB, MBEDTLS_MODE_OFB, 128, "AES-128-OFB", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_128_ofb_info = {MBEDTLS_CIPHER_AES_128_OFB, MBEDTLS_MODE_OFB,
+ 128, "AES-128-OFB", 16, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_192_ofb_info = {
- MBEDTLS_CIPHER_AES_192_OFB, MBEDTLS_MODE_OFB, 192, "AES-192-OFB", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_192_ofb_info = {MBEDTLS_CIPHER_AES_192_OFB, MBEDTLS_MODE_OFB,
+ 192, "AES-192-OFB", 16, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_256_ofb_info = {
- MBEDTLS_CIPHER_AES_256_OFB, MBEDTLS_MODE_OFB, 256, "AES-256-OFB", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_256_ofb_info = {MBEDTLS_CIPHER_AES_256_OFB, MBEDTLS_MODE_OFB,
+ 256, "AES-256-OFB", 16, 0, 16, &aes_info};
#endif /* MBEDTLS_CIPHER_MODE_OFB */
#if defined(MBEDTLS_CIPHER_MODE_CTR)
-static const mbedtls_cipher_info_t aes_128_ctr_info = {
- MBEDTLS_CIPHER_AES_128_CTR, MBEDTLS_MODE_CTR, 128, "AES-128-CTR", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_128_ctr_info = {MBEDTLS_CIPHER_AES_128_CTR, MBEDTLS_MODE_CTR,
+ 128, "AES-128-CTR", 16, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_192_ctr_info = {
- MBEDTLS_CIPHER_AES_192_CTR, MBEDTLS_MODE_CTR, 192, "AES-192-CTR", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_192_ctr_info = {MBEDTLS_CIPHER_AES_192_CTR, MBEDTLS_MODE_CTR,
+ 192, "AES-192-CTR", 16, 0, 16, &aes_info};
-static const mbedtls_cipher_info_t aes_256_ctr_info = {
- MBEDTLS_CIPHER_AES_256_CTR, MBEDTLS_MODE_CTR, 256, "AES-256-CTR", 16, 0, 16, &aes_info};
+static const mbedtls_cipher_info_t aes_256_ctr_info = {MBEDTLS_CIPHER_AES_256_CTR, MBEDTLS_MODE_CTR,
+ 256, "AES-256-CTR", 16, 0, 16, &aes_info};
#endif /* MBEDTLS_CIPHER_MODE_CTR */
#if defined(MBEDTLS_CIPHER_MODE_XTS)
@@ -318,11 +318,11 @@ static const mbedtls_cipher_base_t xts_aes_info = {MBEDTLS_CIPHER_ID_AES, NULL,
#endif
xts_aes_setkey_enc_wrap, xts_aes_setkey_dec_wrap, xts_aes_ctx_alloc, xts_aes_ctx_free};
-static const mbedtls_cipher_info_t aes_128_xts_info = {
- MBEDTLS_CIPHER_AES_128_XTS, MBEDTLS_MODE_XTS, 256, "AES-128-XTS", 16, 0, 16, &xts_aes_info};
+static const mbedtls_cipher_info_t aes_128_xts_info = {MBEDTLS_CIPHER_AES_128_XTS, MBEDTLS_MODE_XTS,
+ 256, "AES-128-XTS", 16, 0, 16, &xts_aes_info};
-static const mbedtls_cipher_info_t aes_256_xts_info = {
- MBEDTLS_CIPHER_AES_256_XTS, MBEDTLS_MODE_XTS, 512, "AES-256-XTS", 16, 0, 16, &xts_aes_info};
+static const mbedtls_cipher_info_t aes_256_xts_info = {MBEDTLS_CIPHER_AES_256_XTS, MBEDTLS_MODE_XTS,
+ 512, "AES-256-XTS", 16, 0, 16, &xts_aes_info};
#endif /* MBEDTLS_CIPHER_MODE_XTS */
#if defined(MBEDTLS_GCM_C)
@@ -425,24 +425,24 @@ static const mbedtls_cipher_info_t aes_256_ccm_star_no_tag_info = {
#if defined(MBEDTLS_CAMELLIA_C)
-static int camellia_crypt_ecb_wrap(
- void* ctx, mbedtls_operation_t operation, const unsigned char* input, unsigned char* output) {
+static int camellia_crypt_ecb_wrap(void* ctx, mbedtls_operation_t operation,
+ const unsigned char* input, unsigned char* output) {
return mbedtls_camellia_crypt_ecb((mbedtls_camellia_context*)ctx, operation, input, output);
}
#if defined(MBEDTLS_CIPHER_MODE_CBC)
static int camellia_crypt_cbc_wrap(void* ctx, mbedtls_operation_t operation, size_t length,
unsigned char* iv, const unsigned char* input, unsigned char* output) {
- return mbedtls_camellia_crypt_cbc(
- (mbedtls_camellia_context*)ctx, operation, length, iv, input, output);
+ return mbedtls_camellia_crypt_cbc((mbedtls_camellia_context*)ctx, operation, length, iv, input,
+ output);
}
#endif /* MBEDTLS_CIPHER_MODE_CBC */
#if defined(MBEDTLS_CIPHER_MODE_CFB)
static int camellia_crypt_cfb128_wrap(void* ctx, mbedtls_operation_t operation, size_t length,
size_t* iv_off, unsigned char* iv, const unsigned char* input, unsigned char* output) {
- return mbedtls_camellia_crypt_cfb128(
- (mbedtls_camellia_context*)ctx, operation, length, iv_off, iv, input, output);
+ return mbedtls_camellia_crypt_cfb128((mbedtls_camellia_context*)ctx, operation, length, iv_off,
+ iv, input, output);
}
#endif /* MBEDTLS_CIPHER_MODE_CFB */
@@ -450,8 +450,8 @@ static int camellia_crypt_cfb128_wrap(void* ctx, mbedtls_operation_t operation,
static int camellia_crypt_ctr_wrap(void* ctx, size_t length, size_t* nc_off,
unsigned char* nonce_counter, unsigned char* stream_block, const unsigned char* input,
unsigned char* output) {
- return mbedtls_camellia_crypt_ctr(
- (mbedtls_camellia_context*)ctx, length, nc_off, nonce_counter, stream_block, input, output);
+ return mbedtls_camellia_crypt_ctr((mbedtls_camellia_context*)ctx, length, nc_off, nonce_counter,
+ stream_block, input, output);
}
#endif /* MBEDTLS_CIPHER_MODE_CTR */
@@ -546,8 +546,8 @@ static const mbedtls_cipher_info_t camellia_256_ctr_info = {MBEDTLS_CIPHER_CAMEL
#if defined(MBEDTLS_GCM_C)
static int gcm_camellia_setkey_wrap(void* ctx, const unsigned char* key, unsigned int key_bitlen) {
- return mbedtls_gcm_setkey(
- (mbedtls_gcm_context*)ctx, MBEDTLS_CIPHER_ID_CAMELLIA, key, key_bitlen);
+ return mbedtls_gcm_setkey((mbedtls_gcm_context*)ctx, MBEDTLS_CIPHER_ID_CAMELLIA, key,
+ key_bitlen);
}
static const mbedtls_cipher_base_t gcm_camellia_info = {
@@ -592,8 +592,8 @@ static const mbedtls_cipher_info_t camellia_256_gcm_info = {MBEDTLS_CIPHER_CAMEL
#if defined(MBEDTLS_CCM_C)
static int ccm_camellia_setkey_wrap(void* ctx, const unsigned char* key, unsigned int key_bitlen) {
- return mbedtls_ccm_setkey(
- (mbedtls_ccm_context*)ctx, MBEDTLS_CIPHER_ID_CAMELLIA, key, key_bitlen);
+ return mbedtls_ccm_setkey((mbedtls_ccm_context*)ctx, MBEDTLS_CIPHER_ID_CAMELLIA, key,
+ key_bitlen);
}
static const mbedtls_cipher_base_t ccm_camellia_info = {
@@ -652,8 +652,8 @@ static const mbedtls_cipher_info_t camellia_256_ccm_star_no_tag_info = {
#if defined(MBEDTLS_ARIA_C)
-static int aria_crypt_ecb_wrap(
- void* ctx, mbedtls_operation_t operation, const unsigned char* input, unsigned char* output) {
+static int aria_crypt_ecb_wrap(void* ctx, mbedtls_operation_t operation, const unsigned char* input,
+ unsigned char* output) {
(void)operation;
return mbedtls_aria_crypt_ecb((mbedtls_aria_context*)ctx, input, output);
}
@@ -668,8 +668,8 @@ static int aria_crypt_cbc_wrap(void* ctx, mbedtls_operation_t operation, size_t
#if defined(MBEDTLS_CIPHER_MODE_CFB)
static int aria_crypt_cfb128_wrap(void* ctx, mbedtls_operation_t operation, size_t length,
size_t* iv_off, unsigned char* iv, const unsigned char* input, unsigned char* output) {
- return mbedtls_aria_crypt_cfb128(
- (mbedtls_aria_context*)ctx, operation, length, iv_off, iv, input, output);
+ return mbedtls_aria_crypt_cfb128((mbedtls_aria_context*)ctx, operation, length, iv_off, iv,
+ input, output);
}
#endif /* MBEDTLS_CIPHER_MODE_CFB */
@@ -677,8 +677,8 @@ static int aria_crypt_cfb128_wrap(void* ctx, mbedtls_operation_t operation, size
static int aria_crypt_ctr_wrap(void* ctx, size_t length, size_t* nc_off,
unsigned char* nonce_counter, unsigned char* stream_block, const unsigned char* input,
unsigned char* output) {
- return mbedtls_aria_crypt_ctr(
- (mbedtls_aria_context*)ctx, length, nc_off, nonce_counter, stream_block, input, output);
+ return mbedtls_aria_crypt_ctr((mbedtls_aria_context*)ctx, length, nc_off, nonce_counter,
+ stream_block, input, output);
}
#endif /* MBEDTLS_CIPHER_MODE_CTR */
@@ -728,24 +728,24 @@ static const mbedtls_cipher_base_t aria_info = {MBEDTLS_CIPHER_ID_ARIA, aria_cry
#endif
aria_setkey_enc_wrap, aria_setkey_dec_wrap, aria_ctx_alloc, aria_ctx_free};
-static const mbedtls_cipher_info_t aria_128_ecb_info = {
- MBEDTLS_CIPHER_ARIA_128_ECB, MBEDTLS_MODE_ECB, 128, "ARIA-128-ECB", 0, 0, 16, &aria_info};
+static const mbedtls_cipher_info_t aria_128_ecb_info = {MBEDTLS_CIPHER_ARIA_128_ECB,
+ MBEDTLS_MODE_ECB, 128, "ARIA-128-ECB", 0, 0, 16, &aria_info};
-static const mbedtls_cipher_info_t aria_192_ecb_info = {
- MBEDTLS_CIPHER_ARIA_192_ECB, MBEDTLS_MODE_ECB, 192, "ARIA-192-ECB", 0, 0, 16, &aria_info};
+static const mbedtls_cipher_info_t aria_192_ecb_info = {MBEDTLS_CIPHER_ARIA_192_ECB,
+ MBEDTLS_MODE_ECB, 192, "ARIA-192-ECB", 0, 0, 16, &aria_info};
-static const mbedtls_cipher_info_t aria_256_ecb_info = {
- MBEDTLS_CIPHER_ARIA_256_ECB, MBEDTLS_MODE_ECB, 256, "ARIA-256-ECB", 0, 0, 16, &aria_info};
+static const mbedtls_cipher_info_t aria_256_ecb_info = {MBEDTLS_CIPHER_ARIA_256_ECB,
+ MBEDTLS_MODE_ECB, 256, "ARIA-256-ECB", 0, 0, 16, &aria_info};
#if defined(MBEDTLS_CIPHER_MODE_CBC)
-static const mbedtls_cipher_info_t aria_128_cbc_info = {
- MBEDTLS_CIPHER_ARIA_128_CBC, MBEDTLS_MODE_CBC, 128, "ARIA-128-CBC", 16, 0, 16, &aria_info};
+static const mbedtls_cipher_info_t aria_128_cbc_info = {MBEDTLS_CIPHER_ARIA_128_CBC,
+ MBEDTLS_MODE_CBC, 128, "ARIA-128-CBC", 16, 0, 16, &aria_info};
-static const mbedtls_cipher_info_t aria_192_cbc_info = {
- MBEDTLS_CIPHER_ARIA_192_CBC, MBEDTLS_MODE_CBC, 192, "ARIA-192-CBC", 16, 0, 16, &aria_info};
+static const mbedtls_cipher_info_t aria_192_cbc_info = {MBEDTLS_CIPHER_ARIA_192_CBC,
+ MBEDTLS_MODE_CBC, 192, "ARIA-192-CBC", 16, 0, 16, &aria_info};
-static const mbedtls_cipher_info_t aria_256_cbc_info = {
- MBEDTLS_CIPHER_ARIA_256_CBC, MBEDTLS_MODE_CBC, 256, "ARIA-256-CBC", 16, 0, 16, &aria_info};
+static const mbedtls_cipher_info_t aria_256_cbc_info = {MBEDTLS_CIPHER_ARIA_256_CBC,
+ MBEDTLS_MODE_CBC, 256, "ARIA-256-CBC", 16, 0, 16, &aria_info};
#endif /* MBEDTLS_CIPHER_MODE_CBC */
#if defined(MBEDTLS_CIPHER_MODE_CFB)
@@ -760,14 +760,14 @@ static const mbedtls_cipher_info_t aria_256_cfb128_info = {MBEDTLS_CIPHER_ARIA_2
#endif /* MBEDTLS_CIPHER_MODE_CFB */
#if defined(MBEDTLS_CIPHER_MODE_CTR)
-static const mbedtls_cipher_info_t aria_128_ctr_info = {
- MBEDTLS_CIPHER_ARIA_128_CTR, MBEDTLS_MODE_CTR, 128, "ARIA-128-CTR", 16, 0, 16, &aria_info};
+static const mbedtls_cipher_info_t aria_128_ctr_info = {MBEDTLS_CIPHER_ARIA_128_CTR,
+ MBEDTLS_MODE_CTR, 128, "ARIA-128-CTR", 16, 0, 16, &aria_info};
-static const mbedtls_cipher_info_t aria_192_ctr_info = {
- MBEDTLS_CIPHER_ARIA_192_CTR, MBEDTLS_MODE_CTR, 192, "ARIA-192-CTR", 16, 0, 16, &aria_info};
+static const mbedtls_cipher_info_t aria_192_ctr_info = {MBEDTLS_CIPHER_ARIA_192_CTR,
+ MBEDTLS_MODE_CTR, 192, "ARIA-192-CTR", 16, 0, 16, &aria_info};
-static const mbedtls_cipher_info_t aria_256_ctr_info = {
- MBEDTLS_CIPHER_ARIA_256_CTR, MBEDTLS_MODE_CTR, 256, "ARIA-256-CTR", 16, 0, 16, &aria_info};
+static const mbedtls_cipher_info_t aria_256_ctr_info = {MBEDTLS_CIPHER_ARIA_256_CTR,
+ MBEDTLS_MODE_CTR, 256, "ARIA-256-CTR", 16, 0, 16, &aria_info};
#endif /* MBEDTLS_CIPHER_MODE_CTR */
#if defined(MBEDTLS_GCM_C)
@@ -870,14 +870,14 @@ static const mbedtls_cipher_info_t aria_256_ccm_star_no_tag_info = {
#if defined(MBEDTLS_DES_C)
-static int des_crypt_ecb_wrap(
- void* ctx, mbedtls_operation_t operation, const unsigned char* input, unsigned char* output) {
+static int des_crypt_ecb_wrap(void* ctx, mbedtls_operation_t operation, const unsigned char* input,
+ unsigned char* output) {
((void)operation);
return mbedtls_des_crypt_ecb((mbedtls_des_context*)ctx, input, output);
}
-static int des3_crypt_ecb_wrap(
- void* ctx, mbedtls_operation_t operation, const unsigned char* input, unsigned char* output) {
+static int des3_crypt_ecb_wrap(void* ctx, mbedtls_operation_t operation, const unsigned char* input,
+ unsigned char* output) {
((void)operation);
return mbedtls_des3_crypt_ecb((mbedtls_des3_context*)ctx, input, output);
}
@@ -1064,8 +1064,8 @@ static int chacha20_setkey_wrap(void* ctx, const unsigned char* key, unsigned in
return (0);
}
-static int chacha20_stream_wrap(
- void* ctx, size_t length, const unsigned char* input, unsigned char* output) {
+static int chacha20_stream_wrap(void* ctx, size_t length, const unsigned char* input,
+ unsigned char* output) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
ret = mbedtls_chacha20_update(ctx, length, input, output);
@@ -1112,8 +1112,8 @@ static const mbedtls_cipher_base_t chacha20_base_info = {MBEDTLS_CIPHER_ID_CHACH
chacha20_stream_wrap,
#endif
chacha20_setkey_wrap, chacha20_setkey_wrap, chacha20_ctx_alloc, chacha20_ctx_free};
-static const mbedtls_cipher_info_t chacha20_info = {
- MBEDTLS_CIPHER_CHACHA20, MBEDTLS_MODE_STREAM, 256, "CHACHA20", 12, 0, 1, &chacha20_base_info};
+static const mbedtls_cipher_info_t chacha20_info = {MBEDTLS_CIPHER_CHACHA20, MBEDTLS_MODE_STREAM,
+ 256, "CHACHA20", 12, 0, 1, &chacha20_base_info};
#endif /* MBEDTLS_CHACHA20_C */
#if defined(MBEDTLS_CHACHAPOLY_C)
@@ -1170,8 +1170,8 @@ static const mbedtls_cipher_info_t chachapoly_info = {MBEDTLS_CIPHER_CHACHA20_PO
#endif /* MBEDTLS_CHACHAPOLY_C */
#if defined(MBEDTLS_CIPHER_NULL_CIPHER)
-static int null_crypt_stream(
- void* ctx, size_t length, const unsigned char* input, unsigned char* output) {
+static int null_crypt_stream(void* ctx, size_t length, const unsigned char* input,
+ unsigned char* output) {
((void)ctx);
memmove(output, input, length);
return (0);
@@ -1214,8 +1214,8 @@ static const mbedtls_cipher_base_t null_base_info = {MBEDTLS_CIPHER_ID_NULL, NUL
#endif
null_setkey, null_setkey, null_ctx_alloc, null_ctx_free};
-static const mbedtls_cipher_info_t null_cipher_info = {
- MBEDTLS_CIPHER_NULL, MBEDTLS_MODE_STREAM, 0, "NULL", 0, 0, 1, &null_base_info};
+static const mbedtls_cipher_info_t null_cipher_info = {MBEDTLS_CIPHER_NULL, MBEDTLS_MODE_STREAM, 0,
+ "NULL", 0, 0, 1, &null_base_info};
#endif /* defined(MBEDTLS_CIPHER_NULL_CIPHER) */
#if defined(MBEDTLS_NIST_KW_C)
@@ -1234,13 +1234,13 @@ static void kw_ctx_free(void* ctx) {
}
static int kw_aes_setkey_wrap(void* ctx, const unsigned char* key, unsigned int key_bitlen) {
- return mbedtls_nist_kw_setkey(
- (mbedtls_nist_kw_context*)ctx, MBEDTLS_CIPHER_ID_AES, key, key_bitlen, 1);
+ return mbedtls_nist_kw_setkey((mbedtls_nist_kw_context*)ctx, MBEDTLS_CIPHER_ID_AES, key,
+ key_bitlen, 1);
}
static int kw_aes_setkey_unwrap(void* ctx, const unsigned char* key, unsigned int key_bitlen) {
- return mbedtls_nist_kw_setkey(
- (mbedtls_nist_kw_context*)ctx, MBEDTLS_CIPHER_ID_AES, key, key_bitlen, 0);
+ return mbedtls_nist_kw_setkey((mbedtls_nist_kw_context*)ctx, MBEDTLS_CIPHER_ID_AES, key,
+ key_bitlen, 0);
}
static const mbedtls_cipher_base_t kw_aes_info = {
@@ -1270,23 +1270,23 @@ static const mbedtls_cipher_base_t kw_aes_info = {
kw_ctx_free,
};
-static const mbedtls_cipher_info_t aes_128_nist_kw_info = {
- MBEDTLS_CIPHER_AES_128_KW, MBEDTLS_MODE_KW, 128, "AES-128-KW", 0, 0, 16, &kw_aes_info};
+static const mbedtls_cipher_info_t aes_128_nist_kw_info = {MBEDTLS_CIPHER_AES_128_KW,
+ MBEDTLS_MODE_KW, 128, "AES-128-KW", 0, 0, 16, &kw_aes_info};
-static const mbedtls_cipher_info_t aes_192_nist_kw_info = {
- MBEDTLS_CIPHER_AES_192_KW, MBEDTLS_MODE_KW, 192, "AES-192-KW", 0, 0, 16, &kw_aes_info};
+static const mbedtls_cipher_info_t aes_192_nist_kw_info = {MBEDTLS_CIPHER_AES_192_KW,
+ MBEDTLS_MODE_KW, 192, "AES-192-KW", 0, 0, 16, &kw_aes_info};
-static const mbedtls_cipher_info_t aes_256_nist_kw_info = {
- MBEDTLS_CIPHER_AES_256_KW, MBEDTLS_MODE_KW, 256, "AES-256-KW", 0, 0, 16, &kw_aes_info};
+static const mbedtls_cipher_info_t aes_256_nist_kw_info = {MBEDTLS_CIPHER_AES_256_KW,
+ MBEDTLS_MODE_KW, 256, "AES-256-KW", 0, 0, 16, &kw_aes_info};
-static const mbedtls_cipher_info_t aes_128_nist_kwp_info = {
- MBEDTLS_CIPHER_AES_128_KWP, MBEDTLS_MODE_KWP, 128, "AES-128-KWP", 0, 0, 16, &kw_aes_info};
+static const mbedtls_cipher_info_t aes_128_nist_kwp_info = {MBEDTLS_CIPHER_AES_128_KWP,
+ MBEDTLS_MODE_KWP, 128, "AES-128-KWP", 0, 0, 16, &kw_aes_info};
-static const mbedtls_cipher_info_t aes_192_nist_kwp_info = {
- MBEDTLS_CIPHER_AES_192_KWP, MBEDTLS_MODE_KWP, 192, "AES-192-KWP", 0, 0, 16, &kw_aes_info};
+static const mbedtls_cipher_info_t aes_192_nist_kwp_info = {MBEDTLS_CIPHER_AES_192_KWP,
+ MBEDTLS_MODE_KWP, 192, "AES-192-KWP", 0, 0, 16, &kw_aes_info};
-static const mbedtls_cipher_info_t aes_256_nist_kwp_info = {
- MBEDTLS_CIPHER_AES_256_KWP, MBEDTLS_MODE_KWP, 256, "AES-256-KWP", 0, 0, 16, &kw_aes_info};
+static const mbedtls_cipher_info_t aes_256_nist_kwp_info = {MBEDTLS_CIPHER_AES_256_KWP,
+ MBEDTLS_MODE_KWP, 256, "AES-256-KWP", 0, 0, 16, &kw_aes_info};
#endif /* MBEDTLS_NIST_KW_C */
const mbedtls_cipher_definition_t mbedtls_cipher_definitions[] = {
diff --git a/extension/httpfs/third_party/mbedtls/library/cipher_wrap.h b/extension/httpfs/third_party/mbedtls/library/cipher_wrap.h
index ce7a08f0505..5da60eba5c0 100644
--- a/extension/httpfs/third_party/mbedtls/library/cipher_wrap.h
+++ b/extension/httpfs/third_party/mbedtls/library/cipher_wrap.h
@@ -43,8 +43,8 @@ struct mbedtls_cipher_base_t {
mbedtls_cipher_id_t cipher;
/** Encrypt using ECB */
- int (*ecb_func)(
- void* ctx, mbedtls_operation_t mode, const unsigned char* input, unsigned char* output);
+ int (*ecb_func)(void* ctx, mbedtls_operation_t mode, const unsigned char* input,
+ unsigned char* output);
#if defined(MBEDTLS_CIPHER_MODE_CBC)
/** Encrypt using CBC */
diff --git a/extension/httpfs/third_party/mbedtls/library/common.h b/extension/httpfs/third_party/mbedtls/library/common.h
index bf39453a222..1fe2fe6af06 100644
--- a/extension/httpfs/third_party/mbedtls/library/common.h
+++ b/extension/httpfs/third_party/mbedtls/library/common.h
@@ -72,7 +72,7 @@ extern void (*mbedtls_test_hook_test_fail)(const char* test, int line, const cha
* Given a multi-byte integer \p x, MBEDTLS_BYTE_n retrieves the n-th
* byte from x, where byte 0 is the least significant byte.
*/
-#define MBEDTLS_BYTE_0(x) ((uint8_t)((x)&0xff))
+#define MBEDTLS_BYTE_0(x) ((uint8_t)((x) & 0xff))
#define MBEDTLS_BYTE_1(x) ((uint8_t)(((x) >> 8) & 0xff))
#define MBEDTLS_BYTE_2(x) ((uint8_t)(((x) >> 16) & 0xff))
#define MBEDTLS_BYTE_3(x) ((uint8_t)(((x) >> 24) & 0xff))
diff --git a/extension/httpfs/third_party/mbedtls/library/constant_time.cpp b/extension/httpfs/third_party/mbedtls/library/constant_time.cpp
index ede6c964764..d554e3e657e 100644
--- a/extension/httpfs/third_party/mbedtls/library/constant_time.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/constant_time.cpp
@@ -154,8 +154,8 @@ size_t mbedtls_ct_size_mask_ge(size_t x, size_t y) {
* Constant flow with respect to c.
*/
MBEDTLS_STATIC_TESTABLE
-unsigned char mbedtls_ct_uchar_mask_of_range(
- unsigned char low, unsigned char high, unsigned char c) {
+unsigned char mbedtls_ct_uchar_mask_of_range(unsigned char low, unsigned char high,
+ unsigned char c) {
/* low_mask is: 0 if low <= c, 0x...ff if low > c */
unsigned low_mask = ((unsigned)c - low) >> 8;
/* high_mask is: 0 if c <= high, 0x...ff if c > high */
@@ -276,8 +276,8 @@ static int mbedtls_ct_cond_select_sign(unsigned char condition, int if1, int if0
return ((int)ur - 1);
}
-void mbedtls_ct_mpi_uint_cond_assign(
- size_t n, mbedtls_mpi_uint* dest, const mbedtls_mpi_uint* src, unsigned char condition) {
+void mbedtls_ct_mpi_uint_cond_assign(size_t n, mbedtls_mpi_uint* dest, const mbedtls_mpi_uint* src,
+ unsigned char condition) {
size_t i;
/* MSVC has a warning about unary minus on unsigned integer types,
@@ -374,8 +374,8 @@ static void mbedtls_ct_mem_move_to_left(void* start, size_t total, size_t offset
#if defined(MBEDTLS_SSL_SOME_SUITES_USE_TLS_CBC)
-void mbedtls_ct_memcpy_if_eq(
- unsigned char* dest, const unsigned char* src, size_t len, size_t c1, size_t c2) {
+void mbedtls_ct_memcpy_if_eq(unsigned char* dest, const unsigned char* src, size_t len, size_t c1,
+ size_t c2) {
/* mask = c1 == c2 ? 0xff : 0x00 */
const size_t equal = mbedtls_ct_size_bool_eq(c1, c2);
const unsigned char mask = (unsigned char)mbedtls_ct_size_mask(equal);
@@ -609,8 +609,8 @@ int mbedtls_mpi_lt_mpi_ct(const mbedtls_mpi* X, const mbedtls_mpi* Y, unsigned*
#if defined(MBEDTLS_PKCS1_V15) && defined(MBEDTLS_RSA_C) && !defined(MBEDTLS_RSA_ALT)
-int mbedtls_ct_rsaes_pkcs1_v15_unpadding(
- unsigned char* input, size_t ilen, unsigned char* output, size_t output_max_len, size_t* olen) {
+int mbedtls_ct_rsaes_pkcs1_v15_unpadding(unsigned char* input, size_t ilen, unsigned char* output,
+ size_t output_max_len, size_t* olen) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t i, plaintext_max_size;
@@ -689,8 +689,8 @@ int mbedtls_ct_rsaes_pkcs1_v15_unpadding(
* Copy anyway to avoid revealing the length through timing, because
* revealing the length is as bad as revealing the padding validity
* for a Bleichenbacher attack. */
- plaintext_size = mbedtls_ct_uint_if(
- output_too_large, (unsigned)plaintext_max_size, (unsigned)plaintext_size);
+ plaintext_size = mbedtls_ct_uint_if(output_too_large, (unsigned)plaintext_max_size,
+ (unsigned)plaintext_size);
/* Move the plaintext to the leftmost position where it can start in
* the working buffer, i.e. make it start plaintext_max_size from
@@ -698,8 +698,8 @@ int mbedtls_ct_rsaes_pkcs1_v15_unpadding(
* does not depend on the plaintext size. After this move, the
* starting location of the plaintext is no longer sensitive
* information. */
- mbedtls_ct_mem_move_to_left(
- input + ilen - plaintext_max_size, plaintext_max_size, plaintext_max_size - plaintext_size);
+ mbedtls_ct_mem_move_to_left(input + ilen - plaintext_max_size, plaintext_max_size,
+ plaintext_max_size - plaintext_size);
/* Finally copy the decrypted plaintext plus trailing zeros into the output
* buffer. If output_max_len is 0, then output may be an invalid pointer
diff --git a/extension/httpfs/third_party/mbedtls/library/constant_time_internal.h b/extension/httpfs/third_party/mbedtls/library/constant_time_internal.h
index 17d7d4ddf88..30eeccb6d19 100644
--- a/extension/httpfs/third_party/mbedtls/library/constant_time_internal.h
+++ b/extension/httpfs/third_party/mbedtls/library/constant_time_internal.h
@@ -154,8 +154,8 @@ unsigned mbedtls_ct_uint_if(unsigned condition, unsigned if1, unsigned if0);
* initialized MPI.
* \param condition Condition to test, must be 0 or 1.
*/
-void mbedtls_ct_mpi_uint_cond_assign(
- size_t n, mbedtls_mpi_uint* dest, const mbedtls_mpi_uint* src, unsigned char condition);
+void mbedtls_ct_mpi_uint_cond_assign(size_t n, mbedtls_mpi_uint* dest, const mbedtls_mpi_uint* src,
+ unsigned char condition);
#endif /* MBEDTLS_BIGNUM_C */
@@ -201,8 +201,8 @@ signed char mbedtls_ct_base64_dec_value(unsigned char c);
* \param c1 The first value to analyze in the condition.
* \param c2 The second value to analyze in the condition.
*/
-void mbedtls_ct_memcpy_if_eq(
- unsigned char* dest, const unsigned char* src, size_t len, size_t c1, size_t c2);
+void mbedtls_ct_memcpy_if_eq(unsigned char* dest, const unsigned char* src, size_t len, size_t c1,
+ size_t c2);
/** Copy data from a secret position with constant flow.
*
@@ -296,8 +296,8 @@ int mbedtls_ct_hmac(mbedtls_md_context_t* ctx, const unsigned char* add_data, si
* \return #MBEDTLS_ERR_RSA_INVALID_PADDING
* The input doesn't contain properly formatted padding.
*/
-int mbedtls_ct_rsaes_pkcs1_v15_unpadding(
- unsigned char* input, size_t ilen, unsigned char* output, size_t output_max_len, size_t* olen);
+int mbedtls_ct_rsaes_pkcs1_v15_unpadding(unsigned char* input, size_t ilen, unsigned char* output,
+ size_t output_max_len, size_t* olen);
#endif /* MBEDTLS_PKCS1_V15 && MBEDTLS_RSA_C && ! MBEDTLS_RSA_ALT */
diff --git a/extension/httpfs/third_party/mbedtls/library/constant_time_invasive.h b/extension/httpfs/third_party/mbedtls/library/constant_time_invasive.h
index fdb4c285db4..9dcd31bb45e 100644
--- a/extension/httpfs/third_party/mbedtls/library/constant_time_invasive.h
+++ b/extension/httpfs/third_party/mbedtls/library/constant_time_invasive.h
@@ -42,8 +42,8 @@
*
* \return All-bits-one if \p low <= \p c <= \p high, otherwise zero.
*/
-unsigned char mbedtls_ct_uchar_mask_of_range(
- unsigned char low, unsigned char high, unsigned char c);
+unsigned char mbedtls_ct_uchar_mask_of_range(unsigned char low, unsigned char high,
+ unsigned char c);
#endif /* MBEDTLS_TEST_HOOKS */
diff --git a/extension/httpfs/third_party/mbedtls/library/entropy.cpp b/extension/httpfs/third_party/mbedtls/library/entropy.cpp
index b3a1b52bfa0..533c2935462 100644
--- a/extension/httpfs/third_party/mbedtls/library/entropy.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/entropy.cpp
@@ -37,12 +37,12 @@
#endif
#if defined(MBEDTLS_SELF_TEST)
-//#if defined(MBEDTLS_PLATFORM_C)
-//#include "mbedtls/platform.h"
-//#else
-//#include
-//#define mbedtls_printf printf
-//#endif /* MBEDTLS_PLATFORM_C */
+// #if defined(MBEDTLS_PLATFORM_C)
+// #include "mbedtls/platform.h"
+// #else
+// #include
+// #define mbedtls_printf printf
+// #endif /* MBEDTLS_PLATFORM_C */
#endif /* MBEDTLS_SELF_TEST */
#define ENTROPY_MAX_LOOP 256 /**< Maximum amount to loop before error */
@@ -75,8 +75,8 @@ void mbedtls_entropy_init(mbedtls_entropy_context* ctx) {
MBEDTLS_ENTROPY_SOURCE_STRONG);
#endif
#if defined(MBEDTLS_ENTROPY_NV_SEED)
- mbedtls_entropy_add_source(
- ctx, mbedtls_nv_seed_poll, NULL, MBEDTLS_ENTROPY_BLOCK_SIZE, MBEDTLS_ENTROPY_SOURCE_STRONG);
+ mbedtls_entropy_add_source(ctx, mbedtls_nv_seed_poll, NULL, MBEDTLS_ENTROPY_BLOCK_SIZE,
+ MBEDTLS_ENTROPY_SOURCE_STRONG);
ctx->initial_entropy_run = 0;
#endif
#endif /* MBEDTLS_NO_DEFAULT_ENTROPY_SOURCES */
@@ -138,8 +138,8 @@ int mbedtls_entropy_add_source(mbedtls_entropy_context* ctx, mbedtls_entropy_f_s
/*
* Entropy accumulator update
*/
-static int entropy_update(
- mbedtls_entropy_context* ctx, unsigned char source_id, const unsigned char* data, size_t len) {
+static int entropy_update(mbedtls_entropy_context* ctx, unsigned char source_id,
+ const unsigned char* data, size_t len) {
unsigned char header[2];
unsigned char tmp[MBEDTLS_ENTROPY_BLOCK_SIZE];
size_t use_len = len;
@@ -190,8 +190,8 @@ static int entropy_update(
return (ret);
}
-int mbedtls_entropy_update_manual(
- mbedtls_entropy_context* ctx, const unsigned char* data, size_t len) {
+int mbedtls_entropy_update_manual(mbedtls_entropy_context* ctx, const unsigned char* data,
+ size_t len) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
#if defined(MBEDTLS_THREADING_C)
@@ -230,8 +230,8 @@ static int entropy_gather_internal(mbedtls_entropy_context* ctx) {
have_one_strong = 1;
olen = 0;
- if ((ret = ctx->source[i].f_source(
- ctx->source[i].p_source, buf, MBEDTLS_ENTROPY_MAX_GATHER, &olen)) != 0) {
+ if ((ret = ctx->source[i].f_source(ctx->source[i].p_source, buf, MBEDTLS_ENTROPY_MAX_GATHER,
+ &olen)) != 0) {
goto cleanup;
}
@@ -594,8 +594,8 @@ int mbedtls_entropy_self_test(int verbose) {
if ((ret = mbedtls_entropy_gather(&ctx)) != 0)
goto cleanup;
- ret = mbedtls_entropy_add_source(
- &ctx, entropy_dummy_source, NULL, 16, MBEDTLS_ENTROPY_SOURCE_WEAK);
+ ret = mbedtls_entropy_add_source(&ctx, entropy_dummy_source, NULL, 16,
+ MBEDTLS_ENTROPY_SOURCE_WEAK);
if (ret != 0)
goto cleanup;
diff --git a/extension/httpfs/third_party/mbedtls/library/gcm.cpp b/extension/httpfs/third_party/mbedtls/library/gcm.cpp
index d7cf192d13a..cccba00af15 100644
--- a/extension/httpfs/third_party/mbedtls/library/gcm.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/gcm.cpp
@@ -171,8 +171,8 @@ static const uint64_t last4[16] = {0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca
* Sets output to x times H using the precomputed tables.
* x and output are seen as elements of GF(2^128) as in [MGV].
*/
-static void gcm_mult(
- mbedtls_gcm_context* ctx, const unsigned char x[16], unsigned char output[16]) {
+static void gcm_mult(mbedtls_gcm_context* ctx, const unsigned char x[16],
+ unsigned char output[16]) {
int i = 0;
unsigned char lo, hi, rem;
uint64_t zh, zl;
@@ -831,15 +831,15 @@ int mbedtls_gcm_self_test(int verbose) {
if (ret != 0)
goto exit;
- ret = mbedtls_gcm_update_ad(
- &ctx, additional_test_data[add_index_test_data[i]], add_len_test_data[i]);
+ ret = mbedtls_gcm_update_ad(&ctx, additional_test_data[add_index_test_data[i]],
+ add_len_test_data[i]);
if (ret != 0)
goto exit;
if (pt_len_test_data[i] > 32) {
size_t rest_len = pt_len_test_data[i] - 32;
- ret = mbedtls_gcm_update(
- &ctx, pt_test_data[pt_index_test_data[i]], 32, buf, sizeof(buf), &olen);
+ ret = mbedtls_gcm_update(&ctx, pt_test_data[pt_index_test_data[i]], 32, buf,
+ sizeof(buf), &olen);
if (ret != 0)
goto exit;
if (olen != 32)
@@ -888,8 +888,8 @@ int mbedtls_gcm_self_test(int verbose) {
iv_len_test_data[i]);
if (ret != 0)
goto exit;
- ret = mbedtls_gcm_update_ad(
- &ctx, additional_test_data[add_index_test_data[i]], add_len_test_data[i]);
+ ret = mbedtls_gcm_update_ad(&ctx, additional_test_data[add_index_test_data[i]],
+ add_len_test_data[i]);
if (ret != 0)
goto exit;
@@ -909,8 +909,8 @@ int mbedtls_gcm_self_test(int verbose) {
if (olen != rest_len)
goto exit;
} else {
- ret = mbedtls_gcm_update(
- &ctx, ct_test_data[j * 6 + i], pt_len_test_data[i], buf, sizeof(buf), &olen);
+ ret = mbedtls_gcm_update(&ctx, ct_test_data[j * 6 + i], pt_len_test_data[i], buf,
+ sizeof(buf), &olen);
if (ret != 0)
goto exit;
if (olen != pt_len_test_data[i])
diff --git a/extension/httpfs/third_party/mbedtls/library/md.cpp b/extension/httpfs/third_party/mbedtls/library/md.cpp
index b8381bd6085..ee05ad9f7e0 100644
--- a/extension/httpfs/third_party/mbedtls/library/md.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/md.cpp
@@ -316,14 +316,14 @@ int mbedtls_md_clone(mbedtls_md_context_t* dst, const mbedtls_md_context_t* src)
#endif
#if defined(MBEDTLS_SHA224_C)
case MBEDTLS_MD_SHA224:
- mbedtls_sha256_clone(
- (mbedtls_sha256_context*)dst->md_ctx, (mbedtls_sha256_context*)src->md_ctx);
+ mbedtls_sha256_clone((mbedtls_sha256_context*)dst->md_ctx,
+ (mbedtls_sha256_context*)src->md_ctx);
break;
#endif
#if defined(MBEDTLS_SHA256_C)
case MBEDTLS_MD_SHA256:
- mbedtls_sha256_clone(
- (mbedtls_sha256_context*)dst->md_ctx, (mbedtls_sha256_context*)src->md_ctx);
+ mbedtls_sha256_clone((mbedtls_sha256_context*)dst->md_ctx,
+ (mbedtls_sha256_context*)src->md_ctx);
break;
#endif
#if defined(MBEDTLS_SHA384_C)
@@ -511,8 +511,8 @@ int mbedtls_md_finish(mbedtls_md_context_t* ctx, unsigned char* output) {
#endif
#if defined(MBEDTLS_SHA256_C)
case MBEDTLS_MD_SHA256:
- return (mbedtls_sha256_finish(
- (mbedtls_sha256_context*)(mbedtls_sha256_context*)ctx->md_ctx, output));
+ return (mbedtls_sha256_finish((mbedtls_sha256_context*)(mbedtls_sha256_context*)ctx->md_ctx,
+ output));
#endif
#if defined(MBEDTLS_SHA384_C)
case MBEDTLS_MD_SHA384:
diff --git a/extension/httpfs/third_party/mbedtls/library/oid.cpp b/extension/httpfs/third_party/mbedtls/library/oid.cpp
index 948d82e1240..1782225d719 100644
--- a/extension/httpfs/third_party/mbedtls/library/oid.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/oid.cpp
@@ -225,8 +225,8 @@ static const oid_x520_attr_t oid_x520_attr_type[] = {
"title",
},
{
- OID_DESCRIPTOR(
- MBEDTLS_OID_AT_DN_QUALIFIER, "id-at-dnQualifier", "Distinguished Name qualifier"),
+ OID_DESCRIPTOR(MBEDTLS_OID_AT_DN_QUALIFIER, "id-at-dnQualifier",
+ "Distinguished Name qualifier"),
"dnQualifier",
},
{
@@ -242,8 +242,8 @@ static const oid_x520_attr_t oid_x520_attr_type[] = {
"DC",
},
{
- OID_DESCRIPTOR(
- MBEDTLS_OID_AT_UNIQUE_IDENTIFIER, "id-at-uniqueIdentifier", "Unique Identifier"),
+ OID_DESCRIPTOR(MBEDTLS_OID_AT_UNIQUE_IDENTIFIER, "id-at-uniqueIdentifier",
+ "Unique Identifier"),
"uniqueIdentifier",
},
{
@@ -252,8 +252,8 @@ static const oid_x520_attr_t oid_x520_attr_type[] = {
}};
FN_OID_TYPED_FROM_ASN1(oid_x520_attr_t, x520_attr, oid_x520_attr_type)
-FN_OID_GET_ATTR1(
- mbedtls_oid_get_attr_short_name, oid_x520_attr_t, x520_attr, const char*, short_name)
+FN_OID_GET_ATTR1(mbedtls_oid_get_attr_short_name, oid_x520_attr_t, x520_attr, const char*,
+ short_name)
/*
* For X509 extensions
@@ -265,8 +265,8 @@ typedef struct {
static const oid_x509_ext_t oid_x509_ext[] = {
{
- OID_DESCRIPTOR(
- MBEDTLS_OID_BASIC_CONSTRAINTS, "id-ce-basicConstraints", "Basic Constraints"),
+ OID_DESCRIPTOR(MBEDTLS_OID_BASIC_CONSTRAINTS, "id-ce-basicConstraints",
+ "Basic Constraints"),
MBEDTLS_OID_X509_EXT_BASIC_CONSTRAINTS,
},
{
@@ -282,13 +282,13 @@ static const oid_x509_ext_t oid_x509_ext[] = {
MBEDTLS_OID_X509_EXT_SUBJECT_ALT_NAME,
},
{
- OID_DESCRIPTOR(
- MBEDTLS_OID_NS_CERT_TYPE, "id-netscape-certtype", "Netscape Certificate Type"),
+ OID_DESCRIPTOR(MBEDTLS_OID_NS_CERT_TYPE, "id-netscape-certtype",
+ "Netscape Certificate Type"),
MBEDTLS_OID_X509_EXT_NS_CERT_TYPE,
},
{
- OID_DESCRIPTOR(
- MBEDTLS_OID_CERTIFICATE_POLICIES, "id-ce-certificatePolicies", "Certificate Policies"),
+ OID_DESCRIPTOR(MBEDTLS_OID_CERTIFICATE_POLICIES, "id-ce-certificatePolicies",
+ "Certificate Policies"),
MBEDTLS_OID_X509_EXT_CERTIFICATE_POLICIES,
},
{
@@ -443,8 +443,8 @@ static const oid_sig_alg_t oid_sig_alg[] = {
FN_OID_TYPED_FROM_ASN1(oid_sig_alg_t, sig_alg, oid_sig_alg)
#if !defined(MBEDTLS_X509_REMOVE_INFO)
-FN_OID_GET_DESCRIPTOR_ATTR1(
- mbedtls_oid_get_sig_alg_desc, oid_sig_alg_t, sig_alg, const char*, description)
+FN_OID_GET_DESCRIPTOR_ATTR1(mbedtls_oid_get_sig_alg_desc, oid_sig_alg_t, sig_alg, const char*,
+ description)
#endif
FN_OID_GET_ATTR2(mbedtls_oid_get_sig_alg, oid_sig_alg_t, sig_alg, mbedtls_md_type_t, md_alg,
@@ -482,8 +482,8 @@ static const oid_pk_alg_t oid_pk_alg[] = {
FN_OID_TYPED_FROM_ASN1(oid_pk_alg_t, pk_alg, oid_pk_alg)
FN_OID_GET_ATTR1(mbedtls_oid_get_pk_alg, oid_pk_alg_t, pk_alg, mbedtls_pk_type_t, pk_alg)
-FN_OID_GET_OID_BY_ATTR1(
- mbedtls_oid_get_oid_by_pk_alg, oid_pk_alg_t, oid_pk_alg, mbedtls_pk_type_t, pk_alg)
+FN_OID_GET_OID_BY_ATTR1(mbedtls_oid_get_oid_by_pk_alg, oid_pk_alg_t, oid_pk_alg, mbedtls_pk_type_t,
+ pk_alg)
#if defined(MBEDTLS_ECP_C)
/*
@@ -569,8 +569,8 @@ static const oid_ecp_grp_t oid_ecp_grp[] = {
FN_OID_TYPED_FROM_ASN1(oid_ecp_grp_t, grp_id, oid_ecp_grp)
FN_OID_GET_ATTR1(mbedtls_oid_get_ec_grp, oid_ecp_grp_t, grp_id, mbedtls_ecp_group_id, grp_id)
-FN_OID_GET_OID_BY_ATTR1(
- mbedtls_oid_get_oid_by_ec_grp, oid_ecp_grp_t, oid_ecp_grp, mbedtls_ecp_group_id, grp_id)
+FN_OID_GET_OID_BY_ATTR1(mbedtls_oid_get_oid_by_ec_grp, oid_ecp_grp_t, oid_ecp_grp,
+ mbedtls_ecp_group_id, grp_id)
#endif /* MBEDTLS_ECP_C */
#if defined(MBEDTLS_CIPHER_C)
@@ -598,8 +598,8 @@ static const oid_cipher_alg_t oid_cipher_alg[] = {
};
FN_OID_TYPED_FROM_ASN1(oid_cipher_alg_t, cipher_alg, oid_cipher_alg)
-FN_OID_GET_ATTR1(
- mbedtls_oid_get_cipher_alg, oid_cipher_alg_t, cipher_alg, mbedtls_cipher_type_t, cipher_alg)
+FN_OID_GET_ATTR1(mbedtls_oid_get_cipher_alg, oid_cipher_alg_t, cipher_alg, mbedtls_cipher_type_t,
+ cipher_alg)
#endif /* MBEDTLS_CIPHER_C */
#if defined(MBEDTLS_MD_C)
@@ -662,8 +662,8 @@ static const oid_md_alg_t oid_md_alg[] = {
FN_OID_TYPED_FROM_ASN1(oid_md_alg_t, md_alg, oid_md_alg)
FN_OID_GET_ATTR1(mbedtls_oid_get_md_alg, oid_md_alg_t, md_alg, mbedtls_md_type_t, md_alg)
-FN_OID_GET_OID_BY_ATTR1(
- mbedtls_oid_get_oid_by_md, oid_md_alg_t, oid_md_alg, mbedtls_md_type_t, md_alg)
+FN_OID_GET_OID_BY_ATTR1(mbedtls_oid_get_oid_by_md, oid_md_alg_t, oid_md_alg, mbedtls_md_type_t,
+ md_alg)
/*
* For HMAC digestAlgorithm
diff --git a/extension/httpfs/third_party/mbedtls/library/pem.cpp b/extension/httpfs/third_party/mbedtls/library/pem.cpp
index c55fbd29f67..ea6532b6122 100644
--- a/extension/httpfs/third_party/mbedtls/library/pem.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/pem.cpp
@@ -77,8 +77,8 @@ static int pem_get_iv(const unsigned char* s, unsigned char* iv, size_t iv_len)
return (0);
}
-static int pem_pbkdf1(
- unsigned char* key, size_t keylen, unsigned char* iv, const unsigned char* pwd, size_t pwdlen) {
+static int pem_pbkdf1(unsigned char* key, size_t keylen, unsigned char* iv,
+ const unsigned char* pwd, size_t pwdlen) {
mbedtls_md5_context md5_ctx;
unsigned char md5sum[16];
size_t use_len;
diff --git a/extension/httpfs/third_party/mbedtls/library/pk.cpp b/extension/httpfs/third_party/mbedtls/library/pk.cpp
index 96f8d8149ad..a4c862862f8 100644
--- a/extension/httpfs/third_party/mbedtls/library/pk.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/pk.cpp
@@ -272,8 +272,8 @@ int mbedtls_pk_verify_restartable(mbedtls_pk_context* ctx, mbedtls_md_type_t md_
if ((ret = pk_restart_setup(rs_ctx, ctx->pk_info)) != 0)
return (ret);
- ret = ctx->pk_info->verify_rs_func(
- ctx->pk_ctx, md_alg, hash, hash_len, sig, sig_len, rs_ctx->rs_ctx);
+ ret = ctx->pk_info->verify_rs_func(ctx->pk_ctx, md_alg, hash, hash_len, sig, sig_len,
+ rs_ctx->rs_ctx);
if (ret != MBEDTLS_ERR_ECP_IN_PROGRESS)
mbedtls_pk_restart_free(rs_ctx);
@@ -390,8 +390,8 @@ int mbedtls_pk_sign_restartable(mbedtls_pk_context* ctx, mbedtls_md_type_t md_al
if (ctx->pk_info->sign_func == NULL)
return (MBEDTLS_ERR_PK_TYPE_MISMATCH);
- return (ctx->pk_info->sign_func(
- ctx->pk_ctx, md_alg, hash, hash_len, sig, sig_size, sig_len, f_rng, p_rng));
+ return (ctx->pk_info->sign_func(ctx->pk_ctx, md_alg, hash, hash_len, sig, sig_size, sig_len,
+ f_rng, p_rng));
}
/*
@@ -400,8 +400,8 @@ int mbedtls_pk_sign_restartable(mbedtls_pk_context* ctx, mbedtls_md_type_t md_al
int mbedtls_pk_sign(mbedtls_pk_context* ctx, mbedtls_md_type_t md_alg, const unsigned char* hash,
size_t hash_len, unsigned char* sig, size_t sig_size, size_t* sig_len,
int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
- return (mbedtls_pk_sign_restartable(
- ctx, md_alg, hash, hash_len, sig, sig_size, sig_len, f_rng, p_rng, NULL));
+ return (mbedtls_pk_sign_restartable(ctx, md_alg, hash, hash_len, sig, sig_size, sig_len, f_rng,
+ p_rng, NULL));
}
/*
diff --git a/extension/httpfs/third_party/mbedtls/library/pk_wrap.cpp b/extension/httpfs/third_party/mbedtls/library/pk_wrap.cpp
index b5eb7edd9e6..ce2499a4fb5 100644
--- a/extension/httpfs/third_party/mbedtls/library/pk_wrap.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/pk_wrap.cpp
@@ -139,12 +139,12 @@ static int rsa_encrypt_wrap(void* ctx, const unsigned char* input, size_t ilen,
return (mbedtls_rsa_pkcs1_encrypt(rsa, f_rng, p_rng, ilen, input, output));
}
-static int rsa_check_pair_wrap(
- const void* pub, const void* prv, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
+static int rsa_check_pair_wrap(const void* pub, const void* prv,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
(void)f_rng;
(void)p_rng;
- return (mbedtls_rsa_check_pub_priv(
- (const mbedtls_rsa_context*)pub, (const mbedtls_rsa_context*)prv));
+ return (mbedtls_rsa_check_pub_priv((const mbedtls_rsa_context*)pub,
+ (const mbedtls_rsa_context*)prv));
}
static void* rsa_alloc_wrap(void) {
@@ -345,10 +345,10 @@ static int eckey_sign_rs_wrap(void* ctx, mbedtls_md_type_t md_alg, const unsigne
#endif /* MBEDTLS_ECP_RESTARTABLE */
#endif /* MBEDTLS_ECDSA_C */
-static int eckey_check_pair(
- const void* pub, const void* prv, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
- return (mbedtls_ecp_check_pub_priv(
- (const mbedtls_ecp_keypair*)pub, (const mbedtls_ecp_keypair*)prv, f_rng, p_rng));
+static int eckey_check_pair(const void* pub, const void* prv,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
+ return (mbedtls_ecp_check_pub_priv((const mbedtls_ecp_keypair*)pub,
+ (const mbedtls_ecp_keypair*)prv, f_rng, p_rng));
}
static void* eckey_alloc_wrap(void) {
@@ -431,8 +431,8 @@ static int ecdsa_can_do(mbedtls_pk_type_t type) {
* An ASN.1 encoded signature is a sequence of two ASN.1 integers. Parse one of
* those integers and convert it to the fixed-length encoding expected by PSA.
*/
-static int extract_ecdsa_sig_int(
- unsigned char** from, const unsigned char* end, unsigned char* to, size_t to_len) {
+static int extract_ecdsa_sig_int(unsigned char** from, const unsigned char* end, unsigned char* to,
+ size_t to_len) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t unpadded_len, padding_len;
@@ -461,13 +461,13 @@ static int extract_ecdsa_sig_int(
* to a raw {r,s} buffer. Note: the provided sig buffer must be at least
* twice as big as int_size.
*/
-static int extract_ecdsa_sig(
- unsigned char** p, const unsigned char* end, unsigned char* sig, size_t int_size) {
+static int extract_ecdsa_sig(unsigned char** p, const unsigned char* end, unsigned char* sig,
+ size_t int_size) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t tmp_size;
- if ((ret = mbedtls_asn1_get_tag(
- p, end, &tmp_size, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
+ if ((ret = mbedtls_asn1_get_tag(p, end, &tmp_size,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
return (ret);
/* Extract r */
@@ -567,8 +567,8 @@ static int ecdsa_verify_wrap(void* ctx, mbedtls_md_type_t md_alg, const unsigned
static int ecdsa_sign_wrap(void* ctx, mbedtls_md_type_t md_alg, const unsigned char* hash,
size_t hash_len, unsigned char* sig, size_t sig_size, size_t* sig_len,
int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
- return (mbedtls_ecdsa_write_signature(
- (mbedtls_ecdsa_context*)ctx, md_alg, hash, hash_len, sig, sig_size, sig_len, f_rng, p_rng));
+ return (mbedtls_ecdsa_write_signature((mbedtls_ecdsa_context*)ctx, md_alg, hash, hash_len, sig,
+ sig_size, sig_len, f_rng, p_rng));
}
#if defined(MBEDTLS_ECP_RESTARTABLE)
@@ -689,8 +689,8 @@ static int rsa_alt_decrypt_wrap(void* ctx, const unsigned char* input, size_t il
}
#if defined(MBEDTLS_RSA_C)
-static int rsa_alt_check_pair(
- const void* pub, const void* prv, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
+static int rsa_alt_check_pair(const void* pub, const void* prv,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
unsigned char sig[MBEDTLS_MPI_MAX_SIZE];
unsigned char hash[32];
size_t sig_len = 0;
@@ -860,8 +860,8 @@ static int pk_ecdsa_sig_asn1_from_psa(unsigned char* sig, size_t* sig_len, size_
MBEDTLS_ASN1_CHK_ADD(len, asn1_write_mpibuf(&p, sig, rs_len));
MBEDTLS_ASN1_CHK_ADD(len, mbedtls_asn1_write_len(&p, sig, len));
- MBEDTLS_ASN1_CHK_ADD(
- len, mbedtls_asn1_write_tag(&p, sig, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE));
+ MBEDTLS_ASN1_CHK_ADD(len,
+ mbedtls_asn1_write_tag(&p, sig, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE));
memmove(sig, p, len);
*sig_len = len;
diff --git a/extension/httpfs/third_party/mbedtls/library/pk_wrap.h b/extension/httpfs/third_party/mbedtls/library/pk_wrap.h
index 24d5355e11d..9e9b27d199d 100644
--- a/extension/httpfs/third_party/mbedtls/library/pk_wrap.h
+++ b/extension/httpfs/third_party/mbedtls/library/pk_wrap.h
@@ -68,8 +68,8 @@ struct mbedtls_pk_info_t {
size_t* olen, size_t osize, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng);
/** Check public-private key pair */
- int (*check_pair_func)(
- const void* pub, const void* prv, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng);
+ int (*check_pair_func)(const void* pub, const void* prv,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng);
/** Allocate a new context */
void* (*ctx_alloc_func)(void);
diff --git a/extension/httpfs/third_party/mbedtls/library/pkparse.cpp b/extension/httpfs/third_party/mbedtls/library/pkparse.cpp
index 249bca6c062..055d693cd22 100644
--- a/extension/httpfs/third_party/mbedtls/library/pkparse.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/pkparse.cpp
@@ -243,8 +243,8 @@ static int pk_group_from_specified(const mbedtls_asn1_buf* params, mbedtls_ecp_g
* parameters FIELD-ID.&Type({IOSet}{@fieldType})
* }
*/
- if ((ret = mbedtls_asn1_get_tag(
- &p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
+ if ((ret = mbedtls_asn1_get_tag(&p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
return (ret);
end_field = p + len;
@@ -286,8 +286,8 @@ static int pk_group_from_specified(const mbedtls_asn1_buf* params, mbedtls_ecp_g
* -- with version equal to ecdpVer2 or ecdpVer3
* }
*/
- if ((ret = mbedtls_asn1_get_tag(
- &p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
+ if ((ret = mbedtls_asn1_get_tag(&p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
return (ret);
end_curve = p + len;
@@ -398,8 +398,8 @@ static int pk_group_id_from_group(const mbedtls_ecp_group* grp, mbedtls_ecp_grou
/*
* Parse a SpecifiedECDomain (SEC 1 C.2) and find the associated group ID
*/
-static int pk_group_id_from_specified(
- const mbedtls_asn1_buf* params, mbedtls_ecp_group_id* grp_id) {
+static int pk_group_id_from_specified(const mbedtls_asn1_buf* params,
+ mbedtls_ecp_group_id* grp_id) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
mbedtls_ecp_group grp;
@@ -463,8 +463,8 @@ static int pk_use_ecparams(const mbedtls_asn1_buf* params, mbedtls_ecp_group* gr
static int pk_get_ecpubkey(unsigned char** p, const unsigned char* end, mbedtls_ecp_keypair* key) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
- if ((ret = mbedtls_ecp_point_read_binary(
- &key->grp, &key->Q, (const unsigned char*)*p, end - *p)) == 0) {
+ if ((ret = mbedtls_ecp_point_read_binary(&key->grp, &key->Q, (const unsigned char*)*p,
+ end - *p)) == 0) {
ret = mbedtls_ecp_check_pubkey(&key->grp, &key->Q);
}
@@ -488,8 +488,8 @@ static int pk_get_rsapubkey(unsigned char** p, const unsigned char* end, mbedtls
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t len;
- if ((ret = mbedtls_asn1_get_tag(
- p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
+ if ((ret = mbedtls_asn1_get_tag(p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0)
return (MBEDTLS_ERROR_ADD(MBEDTLS_ERR_PK_INVALID_PUBKEY, ret));
if (*p + len != end)
@@ -559,8 +559,8 @@ static int pk_get_pk_alg(unsigned char** p, const unsigned char* end, mbedtls_pk
* algorithm AlgorithmIdentifier,
* subjectPublicKey BIT STRING }
*/
-int mbedtls_pk_parse_subpubkey(
- unsigned char** p, const unsigned char* end, mbedtls_pk_context* pk) {
+int mbedtls_pk_parse_subpubkey(unsigned char** p, const unsigned char* end,
+ mbedtls_pk_context* pk) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t len;
mbedtls_asn1_buf alg_params;
@@ -572,8 +572,8 @@ int mbedtls_pk_parse_subpubkey(
PK_VALIDATE_RET(end != NULL);
PK_VALIDATE_RET(pk != NULL);
- if ((ret = mbedtls_asn1_get_tag(
- p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
+ if ((ret = mbedtls_asn1_get_tag(p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
return (MBEDTLS_ERROR_ADD(MBEDTLS_ERR_PK_KEY_INVALID_FORMAT, ret));
}
@@ -644,8 +644,8 @@ static int asn1_get_nonzero_mpi(unsigned char** p, const unsigned char* end, mbe
/*
* Parse a PKCS#1 encoded private RSA key
*/
-static int pk_parse_key_pkcs1_der(
- mbedtls_rsa_context* rsa, const unsigned char* key, size_t keylen) {
+static int pk_parse_key_pkcs1_der(mbedtls_rsa_context* rsa, const unsigned char* key,
+ size_t keylen) {
int ret, version;
size_t len;
unsigned char *p, *end;
@@ -672,8 +672,8 @@ static int pk_parse_key_pkcs1_der(
* otherPrimeInfos OtherPrimeInfos OPTIONAL
* }
*/
- if ((ret = mbedtls_asn1_get_tag(
- &p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
+ if ((ret = mbedtls_asn1_get_tag(&p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
return (MBEDTLS_ERROR_ADD(MBEDTLS_ERR_PK_KEY_INVALID_FORMAT, ret));
}
@@ -807,8 +807,8 @@ static int pk_parse_key_sec1_der(mbedtls_ecp_keypair* eck, const unsigned char*
* publicKey [1] BIT STRING OPTIONAL
* }
*/
- if ((ret = mbedtls_asn1_get_tag(
- &p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
+ if ((ret = mbedtls_asn1_get_tag(&p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
return (MBEDTLS_ERROR_ADD(MBEDTLS_ERR_PK_KEY_INVALID_FORMAT, ret));
}
@@ -861,8 +861,8 @@ static int pk_parse_key_sec1_der(mbedtls_ecp_keypair* eck, const unsigned char*
return (MBEDTLS_ERROR_ADD(MBEDTLS_ERR_PK_KEY_INVALID_FORMAT, ret));
if (p + len != end2)
- return (MBEDTLS_ERROR_ADD(
- MBEDTLS_ERR_PK_KEY_INVALID_FORMAT, MBEDTLS_ERR_ASN1_LENGTH_MISMATCH));
+ return (MBEDTLS_ERROR_ADD(MBEDTLS_ERR_PK_KEY_INVALID_FORMAT,
+ MBEDTLS_ERR_ASN1_LENGTH_MISMATCH));
if ((ret = pk_get_ecpubkey(&p, end2, eck)) == 0)
pubkey_done = 1;
@@ -939,8 +939,8 @@ static int pk_parse_key_pkcs8_unencrypted_der(mbedtls_pk_context* pk, const unsi
* The PrivateKey OCTET STRING is a SEC1 ECPrivateKey
*/
- if ((ret = mbedtls_asn1_get_tag(
- &p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
+ if ((ret = mbedtls_asn1_get_tag(&p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
return (MBEDTLS_ERROR_ADD(MBEDTLS_ERR_PK_KEY_INVALID_FORMAT, ret));
}
@@ -1034,8 +1034,8 @@ static int pk_parse_key_pkcs8_encrypted_der(mbedtls_pk_context* pk, unsigned cha
* The EncryptedData OCTET STRING is a PKCS#8 PrivateKeyInfo
*
*/
- if ((ret = mbedtls_asn1_get_tag(
- &p, end, &len, MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
+ if ((ret = mbedtls_asn1_get_tag(&p, end, &len,
+ MBEDTLS_ASN1_CONSTRUCTED | MBEDTLS_ASN1_SEQUENCE)) != 0) {
return (MBEDTLS_ERROR_ADD(MBEDTLS_ERR_PK_KEY_INVALID_FORMAT, ret));
}
@@ -1067,8 +1067,8 @@ static int pk_parse_key_pkcs8_encrypted_der(mbedtls_pk_context* pk, unsigned cha
#endif /* MBEDTLS_PKCS12_C */
#if defined(MBEDTLS_PKCS5_C)
if (MBEDTLS_OID_CMP(MBEDTLS_OID_PKCS5_PBES2, &pbe_alg_oid) == 0) {
- if ((ret = mbedtls_pkcs5_pbes2(
- &pbe_params, MBEDTLS_PKCS5_DECRYPT, pwd, pwdlen, p, len, buf)) != 0) {
+ if ((ret = mbedtls_pkcs5_pbes2(&pbe_params, MBEDTLS_PKCS5_DECRYPT, pwd, pwdlen, p, len,
+ buf)) != 0) {
if (ret == MBEDTLS_ERR_PKCS5_PASSWORD_MISMATCH)
return (MBEDTLS_ERR_PK_PASSWORD_MISMATCH);
@@ -1165,8 +1165,8 @@ int mbedtls_pk_parse_key(mbedtls_pk_context* pk, const unsigned char* key, size_
if (key[keylen - 1] != '\0')
ret = MBEDTLS_ERR_PEM_NO_HEADER_FOOTER_PRESENT;
else
- ret = mbedtls_pem_read_buffer(
- &pem, "-----BEGIN PRIVATE KEY-----", "-----END PRIVATE KEY-----", key, NULL, 0, &len);
+ ret = mbedtls_pem_read_buffer(&pem, "-----BEGIN PRIVATE KEY-----",
+ "-----END PRIVATE KEY-----", key, NULL, 0, &len);
if (ret == 0) {
if ((ret = pk_parse_key_pkcs8_unencrypted_der(pk, pem.buf, pem.buflen, f_rng, p_rng)) !=
0) {
@@ -1186,8 +1186,8 @@ int mbedtls_pk_parse_key(mbedtls_pk_context* pk, const unsigned char* key, size_
ret = mbedtls_pem_read_buffer(&pem, "-----BEGIN ENCRYPTED PRIVATE KEY-----",
"-----END ENCRYPTED PRIVATE KEY-----", key, NULL, 0, &len);
if (ret == 0) {
- if ((ret = pk_parse_key_pkcs8_encrypted_der(
- pk, pem.buf, pem.buflen, pwd, pwdlen, f_rng, p_rng)) != 0) {
+ if ((ret = pk_parse_key_pkcs8_encrypted_der(pk, pem.buf, pem.buflen, pwd, pwdlen, f_rng,
+ p_rng)) != 0) {
mbedtls_pk_free(pk);
}
@@ -1328,8 +1328,8 @@ int mbedtls_pk_parse_public_key(mbedtls_pk_context* ctx, const unsigned char* ke
if (key[keylen - 1] != '\0')
ret = MBEDTLS_ERR_PEM_NO_HEADER_FOOTER_PRESENT;
else
- ret = mbedtls_pem_read_buffer(
- &pem, "-----BEGIN PUBLIC KEY-----", "-----END PUBLIC KEY-----", key, NULL, 0, &len);
+ ret = mbedtls_pem_read_buffer(&pem, "-----BEGIN PUBLIC KEY-----",
+ "-----END PUBLIC KEY-----", key, NULL, 0, &len);
if (ret == 0) {
/*
diff --git a/extension/httpfs/third_party/mbedtls/library/rsa.cpp b/extension/httpfs/third_party/mbedtls/library/rsa.cpp
index 641694ba623..9301e52271b 100644
--- a/extension/httpfs/third_party/mbedtls/library/rsa.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/rsa.cpp
@@ -366,8 +366,8 @@ int mbedtls_rsa_export(const mbedtls_rsa_context* ctx, mbedtls_mpi* N, mbedtls_m
* write DER encoded RSA keys. The helper function mbedtls_rsa_deduce_crt
* can be used in this case.
*/
-int mbedtls_rsa_export_crt(
- const mbedtls_rsa_context* ctx, mbedtls_mpi* DP, mbedtls_mpi* DQ, mbedtls_mpi* QP) {
+int mbedtls_rsa_export_crt(const mbedtls_rsa_context* ctx, mbedtls_mpi* DP, mbedtls_mpi* DQ,
+ mbedtls_mpi* QP) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
int is_priv;
RSA_VALIDATE_RET(ctx != NULL);
@@ -639,8 +639,8 @@ int mbedtls_rsa_check_pub_priv(const mbedtls_rsa_context* pub, const mbedtls_rsa
/*
* Do an RSA public key operation
*/
-int mbedtls_rsa_public(
- mbedtls_rsa_context* ctx, const unsigned char* input, unsigned char* output) {
+int mbedtls_rsa_public(mbedtls_rsa_context* ctx, const unsigned char* input,
+ unsigned char* output) {
int ret = MBEDTLS_ERR_ERROR_CORRUPTION_DETECTED;
size_t olen;
mbedtls_mpi T;
@@ -689,8 +689,8 @@ int mbedtls_rsa_public(
* DSS, and other systems. In : Advances in Cryptology-CRYPTO'96. Springer
* Berlin Heidelberg, 1996. p. 104-113.
*/
-static int rsa_prepare_blinding(
- mbedtls_rsa_context* ctx, int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
+static int rsa_prepare_blinding(mbedtls_rsa_context* ctx,
+ int (*f_rng)(void*, unsigned char*, size_t), void* p_rng) {
int ret, count = 0;
mbedtls_mpi R;
@@ -1363,14 +1363,14 @@ int mbedtls_rsa_pkcs1_decrypt(mbedtls_rsa_context* ctx, int (*f_rng)(void*, unsi
switch (ctx->padding) {
#if defined(MBEDTLS_PKCS1_V15)
case MBEDTLS_RSA_PKCS_V15:
- return mbedtls_rsa_rsaes_pkcs1_v15_decrypt(
- ctx, f_rng, p_rng, olen, input, output, output_max_len);
+ return mbedtls_rsa_rsaes_pkcs1_v15_decrypt(ctx, f_rng, p_rng, olen, input, output,
+ output_max_len);
#endif
#if defined(MBEDTLS_PKCS1_V21)
case MBEDTLS_RSA_PKCS_V21:
- return mbedtls_rsa_rsaes_oaep_decrypt(
- ctx, f_rng, p_rng, NULL, 0, olen, input, output, output_max_len);
+ return mbedtls_rsa_rsaes_oaep_decrypt(ctx, f_rng, p_rng, NULL, 0, olen, input, output,
+ output_max_len);
#endif
default:
@@ -1508,8 +1508,8 @@ int mbedtls_rsa_rsassa_pss_sign_ext(mbedtls_rsa_context* ctx,
int mbedtls_rsa_rsassa_pss_sign(mbedtls_rsa_context* ctx,
int (*f_rng)(void*, unsigned char*, size_t), void* p_rng, mbedtls_md_type_t md_alg,
unsigned int hashlen, const unsigned char* hash, unsigned char* sig) {
- return rsa_rsassa_pss_sign(
- ctx, f_rng, p_rng, md_alg, hashlen, hash, MBEDTLS_RSA_SALT_LEN_ANY, sig);
+ return rsa_rsassa_pss_sign(ctx, f_rng, p_rng, md_alg, hashlen, hash, MBEDTLS_RSA_SALT_LEN_ANY,
+ sig);
}
#endif /* MBEDTLS_PKCS1_V21 */
@@ -1867,8 +1867,8 @@ int mbedtls_rsa_rsassa_pss_verify(mbedtls_rsa_context* ctx, mbedtls_md_type_t md
mgf1_hash_id = (ctx->hash_id != MBEDTLS_MD_NONE) ? (mbedtls_md_type_t)ctx->hash_id : md_alg;
- return (mbedtls_rsa_rsassa_pss_verify_ext(
- ctx, md_alg, hashlen, hash, mgf1_hash_id, MBEDTLS_RSA_SALT_LEN_ANY, sig));
+ return (mbedtls_rsa_rsassa_pss_verify_ext(ctx, md_alg, hashlen, hash, mgf1_hash_id,
+ MBEDTLS_RSA_SALT_LEN_ANY, sig));
}
#endif /* MBEDTLS_PKCS1_V21 */
@@ -2163,8 +2163,8 @@ int mbedtls_rsa_self_test(int verbose) {
if (verbose != 0)
mbedtls_printf("passed\n PKCS#1 decryption : ");
- if (mbedtls_rsa_pkcs1_decrypt(
- &rsa, myrand, NULL, &len, rsa_ciphertext, rsa_decrypted, sizeof(rsa_decrypted)) != 0) {
+ if (mbedtls_rsa_pkcs1_decrypt(&rsa, myrand, NULL, &len, rsa_ciphertext, rsa_decrypted,
+ sizeof(rsa_decrypted)) != 0) {
if (verbose != 0)
mbedtls_printf("failed\n");
diff --git a/extension/httpfs/third_party/mbedtls/library/rsa_alt_helpers.cpp b/extension/httpfs/third_party/mbedtls/library/rsa_alt_helpers.cpp
index 72bed6b306c..baa3a058de5 100644
--- a/extension/httpfs/third_party/mbedtls/library/rsa_alt_helpers.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/rsa_alt_helpers.cpp
@@ -177,8 +177,8 @@ int mbedtls_rsa_deduce_primes(mbedtls_mpi const* N, mbedtls_mpi const* E, mbedtl
* Given P, Q and the public exponent E, deduce D.
* This is essentially a modular inversion.
*/
-int mbedtls_rsa_deduce_private_exponent(
- mbedtls_mpi const* P, mbedtls_mpi const* Q, mbedtls_mpi const* E, mbedtls_mpi* D) {
+int mbedtls_rsa_deduce_private_exponent(mbedtls_mpi const* P, mbedtls_mpi const* Q,
+ mbedtls_mpi const* E, mbedtls_mpi* D) {
int ret = 0;
mbedtls_mpi K, L;
diff --git a/extension/httpfs/third_party/mbedtls/library/rsa_alt_helpers.h b/extension/httpfs/third_party/mbedtls/library/rsa_alt_helpers.h
index f449213d613..e801fb12f82 100644
--- a/extension/httpfs/third_party/mbedtls/library/rsa_alt_helpers.h
+++ b/extension/httpfs/third_party/mbedtls/library/rsa_alt_helpers.h
@@ -110,8 +110,8 @@ int mbedtls_rsa_deduce_primes(mbedtls_mpi const* N, mbedtls_mpi const* E, mbedtl
* \note This function does not check whether P and Q are primes.
*
*/
-int mbedtls_rsa_deduce_private_exponent(
- mbedtls_mpi const* P, mbedtls_mpi const* Q, mbedtls_mpi const* E, mbedtls_mpi* D);
+int mbedtls_rsa_deduce_private_exponent(mbedtls_mpi const* P, mbedtls_mpi const* Q,
+ mbedtls_mpi const* E, mbedtls_mpi* D);
/**
* \brief Generate RSA-CRT parameters
diff --git a/extension/httpfs/third_party/mbedtls/library/sha1.cpp b/extension/httpfs/third_party/mbedtls/library/sha1.cpp
index 01e66959383..3bdbe695f98 100644
--- a/extension/httpfs/third_party/mbedtls/library/sha1.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/sha1.cpp
@@ -111,12 +111,12 @@ int mbedtls_internal_sha1_process(mbedtls_sha1_context* ctx, const unsigned char
local.W[14] = MBEDTLS_GET_UINT32_BE(data, 56);
local.W[15] = MBEDTLS_GET_UINT32_BE(data, 60);
-#define S(x, n) (((x) << (n)) | (((x)&0xFFFFFFFF) >> (32 - (n))))
+#define S(x, n) (((x) << (n)) | (((x) & 0xFFFFFFFF) >> (32 - (n))))
#define R(t) \
- (local.temp = local.W[((t)-3) & 0x0F] ^ local.W[((t)-8) & 0x0F] ^ local.W[((t)-14) & 0x0F] ^ \
- local.W[(t)&0x0F], \
- (local.W[(t)&0x0F] = S(local.temp, 1)))
+ (local.temp = local.W[((t) - 3) & 0x0F] ^ local.W[((t) - 8) & 0x0F] ^ \
+ local.W[((t) - 14) & 0x0F] ^ local.W[(t) & 0x0F], \
+ (local.W[(t) & 0x0F] = S(local.temp, 1)))
#define P(a, b, c, d, e, x) \
do { \
@@ -388,14 +388,14 @@ int mbedtls_sha1(const unsigned char* input, size_t ilen, unsigned char output[2
/*
* FIPS-180-1 test vectors
*/
-static const unsigned char sha1_test_buf[3][57] = {
- {"abc"}, {"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, {""}};
+static const unsigned char sha1_test_buf[3][57] = {{"abc"},
+ {"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, {""}};
static const size_t sha1_test_buflen[3] = {3, 56, 1000};
-static const unsigned char sha1_test_sum[3][20] = {
- {0xA9, 0x99, 0x3E, 0x36, 0x47, 0x06, 0x81, 0x6A, 0xBA, 0x3E, 0x25, 0x71, 0x78, 0x50, 0xC2, 0x6C,
- 0x9C, 0xD0, 0xD8, 0x9D},
+static const unsigned char sha1_test_sum[3][20] = {{0xA9, 0x99, 0x3E, 0x36, 0x47, 0x06, 0x81, 0x6A,
+ 0xBA, 0x3E, 0x25, 0x71, 0x78, 0x50, 0xC2,
+ 0x6C, 0x9C, 0xD0, 0xD8, 0x9D},
{0x84, 0x98, 0x3E, 0x44, 0x1C, 0x3B, 0xD2, 0x6E, 0xBA, 0xAE, 0x4A, 0xA1, 0xF9, 0x51, 0x29, 0xE5,
0xE5, 0x46, 0x70, 0xF1},
{0x34, 0xAA, 0x97, 0x3C, 0xD4, 0xC4, 0xDA, 0xA4, 0xF6, 0x1E, 0xEB, 0x2B, 0xDB, 0xAD, 0x27, 0x31,
diff --git a/extension/httpfs/third_party/mbedtls/library/sha256.cpp b/extension/httpfs/third_party/mbedtls/library/sha256.cpp
index 5105caca4dd..7f6c74aefec 100644
--- a/extension/httpfs/third_party/mbedtls/library/sha256.cpp
+++ b/extension/httpfs/third_party/mbedtls/library/sha256.cpp
@@ -182,7 +182,7 @@ static const uint32_t K[] = {
0xC67178F2,
};
-#define SHR(x, n) (((x)&0xFFFFFFFF) >> (n))
+#define SHR(x, n) (((x) & 0xFFFFFFFF) >> (n))
#define ROTR(x, n) (SHR(x, n) | ((x) << (32 - (n))))
#define S0(x) (ROTR(x, 7) ^ ROTR(x, 18) ^ SHR(x, 3))
@@ -195,7 +195,8 @@ static const uint32_t K[] = {
#define F1(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
#define R(t) \
- (local.W[t] = S1(local.W[(t)-2]) + local.W[(t)-7] + S0(local.W[(t)-15]) + local.W[(t)-16])
+ (local.W[t] = \
+ S1(local.W[(t) - 2]) + local.W[(t) - 7] + S0(local.W[(t) - 15]) + local.W[(t) - 16])
#define P(a, b, c, d, e, f, g, h, x, K) \
do { \
@@ -442,8 +443,8 @@ int mbedtls_sha256(const unsigned char* input, size_t ilen, unsigned char* outpu
/*
* FIPS-180-2 test vectors
*/
-static const unsigned char sha256_test_buf[3][57] = {
- {"abc"}, {"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, {""}};
+static const unsigned char sha256_test_buf[3][57] = {{"abc"},
+ {"abcdbcdecdefdefgefghfghighijhijkijkljklmklmnlmnomnopnopq"}, {""}};
static const size_t sha256_test_buflen[3] = {3, 56, 1000};
diff --git a/extension/postgres_scanner/CMakeLists.txt b/extension/postgres_scanner/CMakeLists.txt
new file mode 100644
index 00000000000..a8d3e658755
--- /dev/null
+++ b/extension/postgres_scanner/CMakeLists.txt
@@ -0,0 +1,58 @@
+find_package(DuckDB REQUIRED)
+
+add_library(postgres_scanner
+ SHARED
+ ../duckdb_scanner/src/duckdb_scan.cpp
+ ../duckdb_scanner/src/duckdb_catalog.cpp
+ ../duckdb_scanner/src/duckdb_table_catalog_entry.cpp
+ ../duckdb_scanner/src/duckdb_type_converter.cpp
+ src/postgres_scanner_extension.cpp
+ src/postgres_storage.cpp
+ src/postgres_catalog.cpp)
+
+include_directories(
+ src/include
+ ../duckdb_scanner/src/include
+ ${DuckDB_INCLUDE_DIRS}
+ ${PROJECT_SOURCE_DIR}/src/include)
+
+set_target_properties(postgres_scanner PROPERTIES
+ OUTPUT_NAME postgres_scanner
+ PREFIX "lib"
+ SUFFIX ".kuzu_extension"
+)
+
+set_target_properties(postgres_scanner
+ PROPERTIES
+ ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build"
+ LIBRARY_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build"
+ RUNTIME_OUTPUT_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/build"
+)
+
+target_link_libraries(postgres_scanner
+ PRIVATE
+ ${DuckDB_LIBRARIES})
+
+if (WIN32)
+ # On windows, there is no dynamic lookup available, so it's not
+ # possible to generically look for symbols on library load. There are
+ # two main alternatives to statically linking kuzu, neither of which is
+ # appealing:
+ # 1. Link against the shared library. This works well assuming
+ # the DLL is locatable, but this assumption isn't valid for users
+ # of kuzu_shell.exe.
+ # 2. Link against the executable (kuzu_shell.exe). This is
+ # strange but works well for kuzu_shell.exe. However, it forces
+ # users who are embedding kuzu in their application to recompile
+ # the extension _and_ export the symbols for the extension to
+ # locate on load.
+ # We choose the simplest option. Windows isn't known
+ # for its small libraries anyways...
+ # Future work could make it possible to embed extension into kuzu,
+ # which would help fix this problem.
+ target_link_libraries(postgres_scanner PRIVATE kuzu)
+endif()
+
+if (APPLE)
+ set_target_properties(postgres_scanner PROPERTIES LINK_FLAGS "-undefined dynamic_lookup")
+endif ()
diff --git a/extension/postgres_scanner/src/include/postgres_catalog.h b/extension/postgres_scanner/src/include/postgres_catalog.h
new file mode 100644
index 00000000000..a1e137dec90
--- /dev/null
+++ b/extension/postgres_scanner/src/include/postgres_catalog.h
@@ -0,0 +1,51 @@
+#pragma once
+
+#include "duckdb_catalog.h"
+
+namespace kuzu {
+namespace postgres_scanner {
+
+struct BoundExtraCreatePostgresTableInfo final
+ : public duckdb_scanner::BoundExtraCreateDuckDBTableInfo {
+ std::string pgConnectionStr;
+
+ BoundExtraCreatePostgresTableInfo(std::string pgConnectionStr, std::string dbPath,
+ std::string catalogName, std::string schemaName,
+ std::vector propertyInfos)
+ : BoundExtraCreateDuckDBTableInfo{std::move(dbPath), std::move(catalogName),
+ std::move(schemaName), std::move(propertyInfos)},
+ pgConnectionStr{std::move(pgConnectionStr)} {}
+ BoundExtraCreatePostgresTableInfo(const BoundExtraCreatePostgresTableInfo& other)
+ : BoundExtraCreateDuckDBTableInfo{other.dbPath, other.catalogName, other.schemaName,
+ copyVector(other.propertyInfos)},
+ pgConnectionStr{other.pgConnectionStr} {}
+
+ std::unique_ptr copy() const override {
+ return std::make_unique(*this);
+ }
+};
+
+class PostgresCatalogContent final : public duckdb_scanner::DuckDBCatalogContent {
+public:
+ PostgresCatalogContent() : duckdb_scanner::DuckDBCatalogContent{} {}
+
+ void init(const std::string& dbPath, const std::string& catalogName,
+ main::ClientContext* context) override;
+
+private:
+ std::unique_ptr bindCreateTableInfo(duckdb::Connection& con,
+ const std::string& tableName, const std::string& dbPath,
+ const std::string& /*catalogName*/) override;
+
+ std::string getDefaultSchemaName() const override;
+
+ std::pair getConnection(
+ const std::string& dbPath) const override;
+
+private:
+ static constexpr char DEFAULT_CATALOG_NAME[] = "pg";
+ static constexpr char DEFAULT_SCHEMA_NAME[] = "public";
+};
+
+} // namespace postgres_scanner
+} // namespace kuzu
diff --git a/extension/postgres_scanner/src/include/postgres_scanner_extension.h b/extension/postgres_scanner/src/include/postgres_scanner_extension.h
new file mode 100644
index 00000000000..aae2733d5ed
--- /dev/null
+++ b/extension/postgres_scanner/src/include/postgres_scanner_extension.h
@@ -0,0 +1,15 @@
+#pragma once
+
+#include "extension/extension.h"
+#include "main/database.h"
+
+namespace kuzu {
+namespace postgres_scanner {
+
+class PostgresScannerExtension final : public extension::Extension {
+public:
+ static void load(main::ClientContext* context);
+};
+
+} // namespace postgres_scanner
+} // namespace kuzu
diff --git a/extension/postgres_scanner/src/include/postgres_storage.h b/extension/postgres_scanner/src/include/postgres_storage.h
new file mode 100644
index 00000000000..bf0f14b0556
--- /dev/null
+++ b/extension/postgres_scanner/src/include/postgres_storage.h
@@ -0,0 +1,17 @@
+#pragma once
+
+#include "common/string_utils.h"
+#include "storage/storage_extension.h"
+
+namespace kuzu {
+namespace postgres_scanner {
+
+class PostgresStorageExtension final : public storage::StorageExtension {
+public:
+ PostgresStorageExtension();
+
+ bool canHandleDB(std::string dbType) const override;
+};
+
+} // namespace postgres_scanner
+} // namespace kuzu
diff --git a/extension/postgres_scanner/src/postgres_catalog.cpp b/extension/postgres_scanner/src/postgres_catalog.cpp
new file mode 100644
index 00000000000..713fcf40836
--- /dev/null
+++ b/extension/postgres_scanner/src/postgres_catalog.cpp
@@ -0,0 +1,51 @@
+#include "postgres_catalog.h"
+
+#include "common/exception/binder.h"
+#include "postgres_storage.h"
+
+namespace kuzu {
+namespace postgres_scanner {
+
+void PostgresCatalogContent::init(const std::string& dbPath, const std::string& /*catalogName*/,
+ main::ClientContext* context) {
+ duckdb_scanner::DuckDBCatalogContent::init(dbPath, DEFAULT_CATALOG_NAME, context);
+}
+
+std::string PostgresCatalogContent::getDefaultSchemaName() const {
+ return DEFAULT_SCHEMA_NAME;
+}
+
+std::unique_ptr PostgresCatalogContent::bindCreateTableInfo(
+ duckdb::Connection& con, const std::string& tableName, const std::string& dbPath,
+ const std::string& /*catalogName*/) {
+ std::vector propertyInfos;
+ if (!bindPropertyInfos(con, tableName, DEFAULT_CATALOG_NAME, propertyInfos)) {
+ return nullptr;
+ }
+ auto extraCreatePostgresTableInfo = std::make_unique(dbPath,
+ "" /* dbPath */, DEFAULT_CATALOG_NAME, getDefaultSchemaName(), std::move(propertyInfos));
+ return std::make_unique(common::TableType::FOREIGN, tableName,
+ std::move(extraCreatePostgresTableInfo));
+}
+
+static void executeQueryAndCheckErrMsg(duckdb::Connection& con, std::string query) {
+ auto result = con.Query(query);
+ if (result->HasError()) {
+ throw common::RuntimeException(common::stringFormat(
+ "Failed to execute query {}, due to: {}", query, result->GetError()));
+ }
+}
+
+std::pair PostgresCatalogContent::getConnection(
+ const std::string& dbPath) const {
+ duckdb::DuckDB db(nullptr);
+ duckdb::Connection con(db);
+ executeQueryAndCheckErrMsg(con, "install postgres;");
+ executeQueryAndCheckErrMsg(con, "load postgres;");
+ executeQueryAndCheckErrMsg(con,
+ common::stringFormat("attach '{}' as {} (TYPE postgres);", dbPath, DEFAULT_CATALOG_NAME));
+ return std::make_pair(std::move(db), std::move(con));
+}
+
+} // namespace postgres_scanner
+} // namespace kuzu
diff --git a/extension/postgres_scanner/src/postgres_scanner_extension.cpp b/extension/postgres_scanner/src/postgres_scanner_extension.cpp
new file mode 100644
index 00000000000..d5ac7337075
--- /dev/null
+++ b/extension/postgres_scanner/src/postgres_scanner_extension.cpp
@@ -0,0 +1,27 @@
+#include "postgres_scanner_extension.h"
+
+#include "postgres_storage.h"
+
+namespace kuzu {
+namespace postgres_scanner {
+
+void PostgresScannerExtension::load(main::ClientContext* context) {
+ auto db = context->getDatabase();
+ db->registerStorageExtension("postgres", std::make_unique());
+}
+
+} // namespace postgres_scanner
+} // namespace kuzu
+
+extern "C" {
+// Because we link against the static library on windows, we implicitly inherit KUZU_STATIC_DEFINE,
+// which cancels out any exporting, so we can't use KUZU_API.
+#if defined(_WIN32)
+#define INIT_EXPORT __declspec(dllexport)
+#else
+#define INIT_EXPORT __attribute__((visibility("default")))
+#endif
+INIT_EXPORT void init(kuzu::main::ClientContext* context) {
+ kuzu::postgres_scanner::PostgresScannerExtension::load(context);
+}
+}
diff --git a/extension/postgres_scanner/src/postgres_storage.cpp b/extension/postgres_scanner/src/postgres_storage.cpp
new file mode 100644
index 00000000000..7ba98b14484
--- /dev/null
+++ b/extension/postgres_scanner/src/postgres_storage.cpp
@@ -0,0 +1,40 @@
+#include "postgres_storage.h"
+
+#include
+
+#include "catalog/catalog_entry/table_catalog_entry.h"
+#include "duckdb_type_converter.h"
+#include "postgres_catalog.h"
+
+namespace kuzu {
+namespace postgres_scanner {
+
+std::string extractDBName(const std::string& connectionInfo) {
+ std::regex pattern("dbname=([^ ]+)");
+ std::smatch match;
+ if (std::regex_search(connectionInfo, match, pattern)) {
+ return match.str(1);
+ }
+ throw common::RuntimeException{"Invalid postgresql connection string."};
+}
+
+std::unique_ptr attachPostgres(std::string dbName, std::string dbPath,
+ main::ClientContext* clientContext) {
+ auto catalogName = extractDBName(dbPath);
+ if (dbName == "") {
+ dbName = catalogName;
+ }
+ auto postgresCatalog = std::make_unique();
+ postgresCatalog->init(dbPath, catalogName, clientContext);
+ return std::make_unique(dbName, std::move(postgresCatalog));
+}
+
+PostgresStorageExtension::PostgresStorageExtension() : StorageExtension{attachPostgres} {}
+
+bool PostgresStorageExtension::canHandleDB(std::string dbType) const {
+ common::StringUtils::toUpper(dbType);
+ return dbType == "POSTGRES";
+}
+
+} // namespace postgres_scanner
+} // namespace kuzu
diff --git a/extension/postgres_scanner/test/test_files/create_test_db.sql b/extension/postgres_scanner/test/test_files/create_test_db.sql
new file mode 100644
index 00000000000..e2ecbc11aff
--- /dev/null
+++ b/extension/postgres_scanner/test/test_files/create_test_db.sql
@@ -0,0 +1,241 @@
+--
+-- PostgreSQL database dump
+--
+
+-- Dumped from database version 14.10 (Homebrew)
+-- Dumped by pg_dump version 14.10 (Homebrew)
+
+SET statement_timeout = 0;
+SET lock_timeout = 0;
+SET idle_in_transaction_session_timeout = 0;
+SET client_encoding = 'UTF8';
+SET standard_conforming_strings = on;
+SELECT pg_catalog.set_config('search_path', '', false);
+SET check_function_bodies = false;
+SET xmloption = content;
+SET client_min_messages = warning;
+SET row_security = off;
+
+--
+-- Name: audience_type; Type: TYPE; Schema: public; Owner: ci
+--
+
+CREATE TYPE public.audience_type AS (
+ key character varying,
+ value bigint
+);
+
+
+ALTER TYPE public.audience_type OWNER TO ci;
+
+--
+-- Name: description_type; Type: TYPE; Schema: public; Owner: ci
+--
+
+CREATE TYPE public.description_type AS (
+ rating double precision,
+ stars bigint,
+ views bigint,
+ release timestamp without time zone,
+ release_ns timestamp without time zone,
+ release_ms timestamp without time zone,
+ release_sec timestamp without time zone,
+ release_tz timestamp with time zone,
+ film date,
+ u8 smallint,
+ u16 smallint,
+ u32 integer,
+ u64 bigint,
+ hugedata numeric
+);
+
+
+ALTER TYPE public.description_type OWNER TO ci;
+
+--
+-- Name: mood; Type: TYPE; Schema: public; Owner: ci
+--
+
+CREATE TYPE public.mood AS ENUM (
+ 'sad',
+ 'ok',
+ 'happy'
+);
+
+
+ALTER TYPE public.mood OWNER TO ci;
+
+--
+-- Name: state_type; Type: TYPE; Schema: public; Owner: ci
+--
+
+CREATE TYPE public.state_type AS (
+ revenue smallint,
+ location character varying[]
+);
+
+
+ALTER TYPE public.state_type OWNER TO ci;
+
+--
+-- Name: stock_type; Type: TYPE; Schema: public; Owner: ci
+--
+
+CREATE TYPE public.stock_type AS (
+ price bigint[],
+ volume bigint
+);
+
+
+ALTER TYPE public.stock_type OWNER TO ci;
+
+SET default_tablespace = '';
+
+SET default_table_access_method = heap;
+
+--
+-- Name: movies; Type: TABLE; Schema: public; Owner: ci
+--
+
+CREATE TABLE public.movies (
+ name character varying NOT NULL,
+ length integer,
+ note character varying,
+ description public.description_type,
+ content bytea,
+ audience public.audience_type[]
+);
+
+
+ALTER TABLE public.movies OWNER TO ci;
+
+--
+-- Name: organisation; Type: TABLE; Schema: public; Owner: ci
+--
+
+CREATE TABLE public.organisation (
+ id bigint NOT NULL,
+ name character varying,
+ orgcode bigint,
+ mark double precision,
+ score bigint,
+ history interval,
+ licensevalidinterval interval,
+ rating double precision,
+ state public.state_type,
+ stock public.stock_type,
+ info character varying
+);
+
+
+ALTER TABLE public.organisation OWNER TO ci;
+
+--
+-- Name: person; Type: TABLE; Schema: public; Owner: ci
+--
+
+CREATE TABLE public.person (
+ id bigint NOT NULL,
+ fname character varying,
+ gender bigint,
+ isstudent boolean,
+ isworker boolean,
+ age bigint,
+ eyesight double precision,
+ birthdate date,
+ registertime timestamp without time zone,
+ lastjobduration interval,
+ workedhours bigint[],
+ usednames character varying[],
+ height double precision,
+ u uuid
+);
+
+
+ALTER TABLE public.person OWNER TO ci;
+
+--
+-- Name: persontest; Type: TABLE; Schema: public; Owner: ci
+--
+
+CREATE TABLE public.persontest (
+ id integer
+);
+
+
+ALTER TABLE public.persontest OWNER TO ci;
+
+--
+-- Data for Name: movies; Type: TABLE DATA; Schema: public; Owner: ci
+--
+
+COPY public.movies (name, length, note, description, content, audience) FROM stdin;
+Sóló cón tu párejâ 126 this is a very very good movie (5.3,2,152,"2011-08-20 11:25:30","2011-08-20 11:25:30","2011-08-20 11:25:30","2011-08-20 11:25:30","2011-08-20 11:25:30+08",2012-05-11,220,20,1,180,1844674407370955161811111111) \\x5c7841415c784142696e746572657374696e675c783042 {"(audience1,52)","(audience53,42)"}
+The 😂😃🧘🏻♂️🌍🌦️🍞🚗 movie 2544 the movie is very very good (7,10,982,"2018-11-13 13:33:11","2018-11-13 13:33:11","2018-11-13 13:33:11","2018-11-13 13:33:11","2018-11-13 13:33:11+08",2014-09-12,12,120,55,1,-1844674407370955161511) \\x5c7841425c784344 {"(audience1,33)"}
+Roma 298 the movie is very interesting and funny (1223,100,10003,"2011-02-11 16:44:22","2011-02-11 16:44:22","2011-02-11 16:44:22","2011-02-11 16:44:22","2011-02-11 16:44:22+08",2013-02-22,1,15,200,4,-15) \\x707572652061736369692063686172616374657273 {}
+\.
+
+
+--
+-- Data for Name: organisation; Type: TABLE DATA; Schema: public; Owner: ci
+--
+
+COPY public.organisation (id, name, orgcode, mark, score, history, licensevalidinterval, rating, state, stock, info) FROM stdin;
+1 ABFsUni 325 3.7 -2 10 years 5 mons 13:00:00.000024 3 years 5 days 1 (138,"{toronto,""montr,eal""}") ("{96,56}",1000) 3.12
+4 CsWork 934 4.1 -100 2 years 4 days 10:00:00 26 years 52 days 48:00:00 0.78 (152,"{""vanco,uver north area""}") ("{15,78,671}",432) abcd
+6 DEsWork 824 4.1 7 2 years 04:34:00.000022 82:00:00.1 0.52 (558,"{""very long city name"",""new york""}") ({22},99) 2023-12-15
+\.
+
+
+--
+-- Data for Name: person; Type: TABLE DATA; Schema: public; Owner: ci
+--
+
+COPY public.person (id, fname, gender, isstudent, isworker, age, eyesight, birthdate, registertime, lastjobduration, workedhours, usednames, height, u) FROM stdin;
+0 Alice 1 t f 35 5 1900-01-01 2011-08-20 11:25:30 3 years 2 days 13:02:00 {10,5} {Aida} 1.731 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11
+2 Bob 2 t f 30 5.1 1900-01-01 2008-11-03 15:25:30.000526 10 years 5 mons 13:00:00.000024 {12,8} {Bobby} 0.99 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a12
+3 Carol 1 f t 45 5 1940-06-22 1911-08-20 02:32:21 48:24:11 {4,5} {Carmen,Fred} 1 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a13
+5 Dan 2 f t 20 4.8 1950-07-23 2031-11-30 12:25:30 10 years 5 mons 13:00:00.000024 {1,9} {Wolfeschlegelstein,Daniel} 1.3 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14
+7 Elizabeth 1 f t 20 4.7 1980-10-26 1976-12-23 11:21:42 48:24:11 {2} {Ein} 1.463 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a15
+8 Farooq 2 t f 25 4.5 1980-10-26 1972-07-31 13:22:30.678559 00:18:00.024 {3,4,5,6,7} {Fesdwe} 1.51 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a16
+9 Greg 2 f f 40 4.9 1980-10-26 1976-12-23 11:21:42 10 years 5 mons 13:00:00.000024 {1} {Grad} 1.6 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17
+10 Hubert Blaine Wolfeschlegelsteinhausenbergerdorff 2 f t 83 4.9 1990-11-27 2023-02-21 13:25:30 3 years 2 days 13:02:00 {10,11,12,3,4,5,6,7} {Ad,De,Hi,Kye,Orlan} 1.323 a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a18
+\.
+
+
+--
+-- Data for Name: persontest; Type: TABLE DATA; Schema: public; Owner: ci
+--
+
+COPY public.persontest (id) FROM stdin;
+\.
+
+
+--
+-- Name: movies movies_pkey; Type: CONSTRAINT; Schema: public; Owner: ci
+--
+
+ALTER TABLE ONLY public.movies
+ ADD CONSTRAINT movies_pkey PRIMARY KEY (name);
+
+
+--
+-- Name: organisation organisation_pkey; Type: CONSTRAINT; Schema: public; Owner: ci
+--
+
+ALTER TABLE ONLY public.organisation
+ ADD CONSTRAINT organisation_pkey PRIMARY KEY (id);
+
+
+--
+-- Name: person person_pkey; Type: CONSTRAINT; Schema: public; Owner: ci
+--
+
+ALTER TABLE ONLY public.person
+ ADD CONSTRAINT person_pkey PRIMARY KEY (id);
+
+
+--
+-- PostgreSQL database dump complete
+--
+
diff --git a/extension/postgres_scanner/test/test_files/postgres_scanner.test b/extension/postgres_scanner/test/test_files/postgres_scanner.test
new file mode 100644
index 00000000000..76ee4b0dc64
--- /dev/null
+++ b/extension/postgres_scanner/test/test_files/postgres_scanner.test
@@ -0,0 +1,50 @@
+-GROUP PostgresScanner
+-DATASET CSV empty
+
+--
+
+-CASE ScanPostgresTable
+-STATEMENT load extension "${KUZU_ROOT_DIRECTORY}/extension/postgres_scanner/build/libpostgres_scanner.kuzu_extension"
+---- ok
+-STATEMENT ATTACH 'dbname=pgscan user=ci host=localhost' as tinysnb (dbtype 'POSTGRES');
+---- ok
+-STATEMENT LOAD FROM tinysnb_person RETURN *;
+---- 8
+0|Alice|1|True|False|35|5.000000|1900-01-01|2011-08-20 11:25:30|3 years 2 days 13:02:00|[10,5]|[Aida]|1.731000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a11
+10|Hubert Blaine Wolfeschlegelsteinhausenbergerdorff|2|False|True|83|4.900000|1990-11-27|2023-02-21 13:25:30|3 years 2 days 13:02:00|[10,11,12,3,4,5,6,7]|[Ad,De,Hi,Kye,Orlan]|1.323000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a18
+2|Bob|2|True|False|30|5.100000|1900-01-01|2008-11-03 15:25:30.000526|10 years 5 months 13:00:00.000024|[12,8]|[Bobby]|0.990000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a12
+3|Carol|1|False|True|45|5.000000|1940-06-22|1911-08-20 02:32:21|48:24:11|[4,5]|[Carmen,Fred]|1.000000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a13
+5|Dan|2|False|True|20|4.800000|1950-07-23|2031-11-30 12:25:30|10 years 5 months 13:00:00.000024|[1,9]|[Wolfeschlegelstein,Daniel]|1.300000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a14
+7|Elizabeth|1|False|True|20|4.700000|1980-10-26|1976-12-23 11:21:42|48:24:11|[2]|[Ein]|1.463000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a15
+8|Farooq|2|True|False|25|4.500000|1980-10-26|1972-07-31 13:22:30.678559|00:18:00.024|[3,4,5,6,7]|[Fesdwe]|1.510000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a16
+9|Greg|2|False|False|40|4.900000|1980-10-26|1976-12-23 11:21:42|10 years 5 months 13:00:00.000024|[1]|[Grad]|1.600000|a0eebc99-9c0b-4ef8-bb6d-6bb9bd380a17
+-STATEMENT LOAD FROM tinysnb_organisation RETURN *;
+---- 3
+1|ABFsUni|325|3.700000|-2|10 years 5 months 13:00:00.000024|3 years 5 days|1.000000|{revenue: 138, "location": [toronto,montr,eal]}|{price: [96,56], volume: 1000}|3.12
+4|CsWork|934|4.100000|-100|2 years 4 days 10:00:00|26 years 52 days 48:00:00|0.780000|{revenue: 152, "location": [vanco,uver north area]}|{price: [15,78,671], volume: 432}|abcd
+6|DEsWork|824|4.100000|7|2 years 04:34:00.000022|82:00:00.1|0.520000|{revenue: 558, "location": [very long city name,new york]}|{price: [22], volume: 99}|2023-12-15
+-STATEMENT LOAD FROM tinysnb_movies RETURN *;
+---- 3
+Roma|298|the movie is very interesting and funny|{rating: 1223.000000, stars: 100, "views": 10003, "release": 2011-02-11 16:44:22, release_ns: 2011-02-11 16:44:22, release_ms: 2011-02-11 16:44:22, release_sec: 2011-02-11 16:44:22, release_tz: 2011-02-11 08:44:22+00, film: 2013-02-22, u8: 1, u16: 15, u32: 200, u64: 4, hugedata: -15.000000}|pure ascii characters|[]
+Sóló cón tu párejâ|126|this is a very very good movie|{rating: 5.300000, stars: 2, "views": 152, "release": 2011-08-20 11:25:30, release_ns: 2011-08-20 11:25:30, release_ms: 2011-08-20 11:25:30, release_sec: 2011-08-20 11:25:30, release_tz: 2011-08-20 03:25:30+00, film: 2012-05-11, u8: 220, u16: 20, u32: 1, u64: 180, hugedata: 1844674407370955161600000000.000000}|\x5CxAA\x5CxABinteresting\x5Cx0B|[{"key": audience1, "value": 52},{"key": audience53, "value": 42}]
+The 😂😃🧘🏻♂️🌍🌦️🍞🚗 movie|2544|the movie is very very good|{rating: 7.000000, stars: 10, "views": 982, "release": 2018-11-13 13:33:11, release_ns: 2018-11-13 13:33:11, release_ms: 2018-11-13 13:33:11, release_sec: 2018-11-13 13:33:11, release_tz: 2018-11-13 05:33:11+00, film: 2014-09-12, u8: 12, u16: 120, u32: 55, u64: 1, hugedata: -1844674407370954899456.000000}|\x5CxAB\x5CxCD|[{"key": audience1, "value": 33}]
+-STATEMENT LOAD FROM tinysnb_person1 RETURN *;
+---- error
+Catalog exception: Table: person1 does not exist.
+-STATEMENT LOAD FROM tinysnb1_person RETURN *;
+---- error
+Binder exception: No database named tinysnb1 has been attached.
+-STATEMENT ATTACH 'dbname=pgscan user=ci host=localhost' (dbtype 'POSTGRES');
+---- ok
+-STATEMENT LOAD FROM pgscan_movies RETURN count(*);
+---- 1
+3
+-STATEMENT LOAD FROM pgscan_movies where length > 2500 RETURN name;
+---- 1
+The 😂😃🧘🏻♂️🌍🌦️🍞🚗 movie
+# TODO(Ziyi): the error message is dependent on platforms. We have to implement regex in testing framework to handle this.
+#-LOG IncorrectConnectionStr
+#-STATEMENT ATTACH 'dbname=test2132131 user=ci host=127.0.0.1' as tinysnb (dbtype 'POSTGRES');
+#---- error
+#Binder exception: Failed to attach postgres database due to: IO Error: Unable to connect to Postgres at dbname=test2132131 user=ci host=127.0.0.1: connection to server at "127.0.0.1", port 5432 failed: Connection refused
+# Is the server running on that host and accepting TCP/IP connections?
diff --git a/scripts/antlr4/Cypher.g4.copy b/scripts/antlr4/Cypher.g4.copy
index 763bb33a916..d3491a5ad84 100644
--- a/scripts/antlr4/Cypher.g4.copy
+++ b/scripts/antlr4/Cypher.g4.copy
@@ -31,26 +31,44 @@ oC_Statement
| kU_Transaction
| kU_Extension
| kU_ExportDatabase
- | kU_ImportDatabase;
+ | kU_ImportDatabase
+ | kU_AttachDatabase
+ | kU_DetachDatabase;
kU_CopyFrom
- : COPY SP oC_SchemaName ( ( SP? '(' SP? kU_ColumnNames SP? ')' SP? ) | SP ) FROM SP (kU_FilePaths | oC_Variable) ( SP? '(' SP? kU_ParsingOptions SP? ')' )? ;
+ : COPY SP oC_SchemaName ( ( SP? kU_ColumnNames SP? ) | SP ) FROM SP kU_ScanSource ( SP? kU_ParsingOptions )? ;
kU_ColumnNames
- : oC_SchemaName ( SP? ',' SP? oC_SchemaName )* ;
+ : '(' SP? oC_SchemaName ( SP? ',' SP? oC_SchemaName )* SP? ')';
+
+kU_ScanSource
+ : kU_FilePaths
+ | '(' SP? oC_Query SP? ')'
+ | oC_Variable ;
kU_CopyFromByColumn
: COPY SP oC_SchemaName SP FROM SP '(' SP? StringLiteral ( SP? ',' SP? StringLiteral )* ')' SP BY SP COLUMN ;
kU_CopyTO
- : COPY SP '(' SP? oC_Query SP? ')' SP TO SP StringLiteral ( SP? '(' SP? kU_ParsingOptions SP? ')' )? ;
+ : COPY SP '(' SP? oC_Query SP? ')' SP TO SP StringLiteral ( SP? kU_ParsingOptions )? ;
kU_ExportDatabase
- : EXPORT SP DATABASE SP StringLiteral ( SP? '(' SP? kU_ParsingOptions SP? ')' )? ;
+ : EXPORT SP DATABASE SP StringLiteral ( SP? kU_ParsingOptions )? ;
kU_ImportDatabase
: IMPORT SP DATABASE SP StringLiteral;
+kU_AttachDatabase
+ : ATTACH SP StringLiteral (SP AS SP oC_SchemaName SP)? (SP? '(' SP? DBTYPE SP StringLiteral SP? ')')?;
+
+ATTACH:
+ ( 'A' | 'a') ( 'T' | 't') ( 'T' | 't') ( 'A' | 'a') ( 'C' | 'c') ( 'H' | 'h');
+
+DBTYPE:
+ ( 'D' | 'd') ( 'B' | 'b') ( 'T' | 't') ( 'Y' | 'y') ( 'P' | 'p') ( 'E' | 'e');
+
+kU_DetachDatabase
+ : DETACH SP oC_SchemaName;
kU_StandaloneCall
: CALL SP oC_SymbolicName SP? '=' SP? oC_Literal ;
@@ -58,9 +76,9 @@ kU_StandaloneCall
CALL : ( 'C' | 'c' ) ( 'A' | 'a' ) ( 'L' | 'l' ) ( 'L' | 'l' ) ;
kU_CommentOn
- : COMMENT SP ON SP TABLE SP oC_SchemaName SP IS SP StringLiteral ;
+ : COMMENT_ SP ON SP TABLE SP oC_SchemaName SP IS SP StringLiteral ;
-COMMENT : ( 'C' | 'c' ) ( 'O' | 'o' ) ( 'M' | 'm' ) ( 'M' | 'm' ) ( 'E' | 'e' ) ( 'N' | 'n' ) ( 'T' | 't' ) ;
+COMMENT_ : ( 'C' | 'c' ) ( 'O' | 'o' ) ( 'M' | 'm' ) ( 'M' | 'm' ) ( 'E' | 'e' ) ( 'N' | 'n' ) ( 'T' | 't' ) ;
kU_CreateMacro
: CREATE SP MACRO SP oC_FunctionName SP? '(' SP? kU_PositionalArgs? SP? kU_DefaultArg? ( SP? ',' SP? kU_DefaultArg )* SP? ')' SP AS SP oC_Expression ;
@@ -81,7 +99,7 @@ kU_FilePaths
GLOB : ( 'G' | 'g' ) ( 'L' | 'l' ) ( 'O' | 'o' ) ( 'B' | 'b' ) ;
kU_ParsingOptions
- : kU_ParsingOption ( SP? ',' SP? kU_ParsingOption )* ;
+ : '(' SP? kU_ParsingOption ( SP? ',' SP? kU_ParsingOption )* SP? ')' ;
kU_ParsingOption
: oC_SymbolicName SP? '=' SP? oC_Literal;
@@ -291,7 +309,7 @@ oC_ReadingClause
;
kU_LoadFrom
- : LOAD ( SP WITH SP HEADERS SP? '(' SP? kU_PropertyDefinitions SP? ')' )? SP FROM SP (kU_FilePaths ( SP? '(' SP? kU_ParsingOptions SP? ')' )? | oC_Variable) (SP? oC_Where)? ;
+ : LOAD ( SP WITH SP HEADERS SP? '(' SP? kU_PropertyDefinitions SP? ')' )? SP FROM SP kU_ScanSource (SP? kU_ParsingOptions)? (SP? oC_Where)? ;
LOAD : ( 'L' | 'l' ) ( 'O' | 'o' ) ( 'A' | 'a' ) ( 'D' | 'd' ) ;
@@ -754,7 +772,7 @@ oC_SymbolicName
// example of BEGIN and END: TCKWith2.Scenario1
kU_NonReservedKeywords
- : COMMENT
+ : COMMENT_
| COUNT
| NODE
| REL
@@ -819,7 +837,9 @@ WHITESPACE
;
Comment
- : ( '/*' ( Comment_1 | ( '*' Comment_2 ) )* '*/' ) ;
+ : ( '/*' ( Comment_1 | ( '*' Comment_2 ) )* '*/' )
+ | ( '//' ( Comment_3 )* CR? ( LF | EOF ) )
+ ;
oC_LeftArrowHead
: '<'
diff --git a/scripts/multiplatform-test-helper/collect-results.py b/scripts/multiplatform-test-helper/collect-results.py
new file mode 100644
index 00000000000..968ed4a6b8b
--- /dev/null
+++ b/scripts/multiplatform-test-helper/collect-results.py
@@ -0,0 +1,68 @@
+#!/usr/bin/env python3
+
+import os
+import sys
+import pandas as pd
+import json
+
+
+def main():
+ if len(sys.argv) != 2:
+ print("Usage: collect-results.py ")
+ sys.exit(1)
+ if not os.path.isdir(sys.argv[1]):
+ print(f"Error: {sys.argv[1]} is not a directory")
+ sys.exit(1)
+ results_dir = sys.argv[1]
+ results_df_hash = {}
+ results_exit_codes_hash = {}
+ results_summary = {}
+ stages = []
+ for root, _, files in os.walk(results_dir):
+ for csv_file in files:
+ if not csv_file.endswith(".csv"):
+ continue
+ platform = csv_file.split(".")[0]
+ df = pd.read_csv(os.path.join(root, csv_file), header=None)
+ df.columns = ["stage", "exit_code"]
+ results_df_hash[platform] = df
+
+ for platform, df in results_df_hash.items():
+ for stage, exit_code in df.values:
+ if stage not in stages:
+ stages.append(stage)
+ if platform not in results_exit_codes_hash:
+ results_exit_codes_hash[platform] = {}
+ results_exit_codes_hash[platform][stage] = int(exit_code)
+
+ for platform in results_df_hash.keys():
+ results_summary[platform] = []
+ for stage in stages:
+ status = (
+ "✅"
+ if stage in results_exit_codes_hash[platform]
+ and results_exit_codes_hash[platform][stage] == 0
+ else "❌"
+ )
+ results_summary[platform].append({"stage": stage, "status": status})
+
+ summary_df = {"stage": stages}
+ for platform, summary in results_summary.items():
+ df = pd.DataFrame(summary)
+ status = df["status"]
+ summary_df[platform] = status
+ summary_df = pd.DataFrame(summary_df)
+ summary_df.index = summary_df["stage"]
+ del summary_df["stage"]
+ summary_df.index.name = None
+
+ markdown = summary_df.to_markdown()
+ with open("results.md", "w") as f:
+ f.write(markdown)
+
+ with open("results.json", "w") as f:
+ json.dump(results_summary, f, indent=4)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/scripts/multiplatform-test-helper/notify-discord.py b/scripts/multiplatform-test-helper/notify-discord.py
new file mode 100644
index 00000000000..a5046b044ed
--- /dev/null
+++ b/scripts/multiplatform-test-helper/notify-discord.py
@@ -0,0 +1,55 @@
+import discord
+import os
+import json
+import sys
+
+TOKEN = os.getenv("DISCORD_BOT_TOKEN")
+CHANNEL_ID = os.getenv("DISCORD_CHANNEL_ID")
+GITHUB_URL = os.getenv("GITHUB_URL")
+
+messages = []
+
+
+if __name__ == "__main__":
+ if not len(sys.argv) == 2:
+ print("Usage: python send-dicord-notification.py ")
+ sys.exit(1)
+ if not os.path.isfile(sys.argv[1]):
+ print(f"Error: {sys.argv[1]} is not a file")
+ sys.exit(1)
+ if not TOKEN:
+ print("Error: DISCORD_BOT_TOKEN is not set")
+ sys.exit(1)
+
+ if not CHANNEL_ID:
+ print("Error: DISCORD_CHANNEL_ID is not set")
+ sys.exit(1)
+ client = discord.Client(intents=discord.Intents.default())
+
+ @client.event
+ async def on_ready():
+ channel = client.get_channel(int(CHANNEL_ID))
+ for message in messages:
+ try:
+ await channel.send(message)
+ except Exception as e:
+ print(f"Error: {e}")
+ sys.exit(1)
+ await client.close()
+
+ message = ""
+ message += "## Multiplatform test result:\n"
+ with open(sys.argv[1], "r") as f:
+ result = json.load(f)
+ for platform in result:
+ if len(message) >= 1500:
+ messages.append(message)
+ message = ""
+ message += f"- **{platform}**:\n"
+ for r in result[platform]:
+ message += f" - {r['stage']}: {r['status']}\n"
+ if GITHUB_URL:
+ message += f" [Github]({GITHUB_URL})"
+ if message:
+ messages.append(message)
+ client.run(TOKEN)
diff --git a/scripts/parquet/csv_to_parquet.py b/scripts/parquet/csv_to_parquet.py
deleted file mode 100644
index af3779d38be..00000000000
--- a/scripts/parquet/csv_to_parquet.py
+++ /dev/null
@@ -1,14 +0,0 @@
-from pyarrow import csv
-import pyarrow.parquet as pq
-
-csv_files = ['dummy.csv']
-has_header = True
-# CSV:
-# has header? autogenerate_column_names=False
-# no header? autogenerate_column_names=True
-read_options = csv.ReadOptions(autogenerate_column_names=not has_header)
-parse_options = csv.ParseOptions(delimiter=",")
-for csv_file in csv_files:
- table = csv.read_csv(csv_file, read_options=read_options,
- parse_options=parse_options)
- pq.write_table(table, csv_file.replace('.csv', '.parquet'))
diff --git a/scripts/pip-package/package_tar.py b/scripts/pip-package/package_tar.py
index 3bb5771d17e..9baaf416dec 100755
--- a/scripts/pip-package/package_tar.py
+++ b/scripts/pip-package/package_tar.py
@@ -9,21 +9,23 @@
base_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
def _get_kuzu_version():
- cmake_file = os.path.abspath(os.path.join(base_dir, '..', 'CMakeLists.txt'))
+ cmake_file = os.path.abspath(os.path.join(base_dir, "..", "CMakeLists.txt"))
with open(cmake_file) as f:
for line in f:
- if line.startswith('project(Kuzu VERSION'):
- raw_version = line.split(' ')[2].strip()
- version_nums = raw_version.split('.')
+ if line.startswith("project(Kuzu VERSION"):
+ raw_version = line.split(" ")[2].strip()
+ version_nums = raw_version.split(".")
if len(version_nums) <= 3:
return raw_version
else:
dev_suffix = version_nums[3]
- version = '.'.join(version_nums[:3])
+ version = ".".join(version_nums[:3])
version += ".dev%s" % dev_suffix
return version
+
if __name__ == "__main__":
if len(sys.argv) == 2:
file_name = sys.argv[1]
@@ -50,11 +52,34 @@ def _get_kuzu_version():
os.remove(os.path.join(tempdir, "kuzu-source.tar"))
+ # Remove components that are not needed for the pip package
+ shutil.rmtree(os.path.join(tempdir, "kuzu-source/dataset"))
+ shutil.rmtree(os.path.join(tempdir, "kuzu-source/examples"))
+ shutil.rmtree(os.path.join(tempdir, "kuzu-source/benchmark"))
+ shutil.rmtree(os.path.join(tempdir, "kuzu-source/logo"))
+ shutil.rmtree(os.path.join(tempdir, "kuzu-source/extension"))
+ shutil.rmtree(os.path.join(tempdir, "kuzu-source/test"))
+ shutil.rmtree(os.path.join(tempdir, "kuzu-source/.github"))
+
os.makedirs(os.path.join(tempdir, "kuzu"))
for path in ["setup.py", "setup.cfg", "MANIFEST.in"]:
shutil.copy2(path, os.path.join(tempdir, path))
- shutil.copy2("../../LICENSE", os.path.join(tempdir, "LICENSE.txt"))
+ shutil.copy2("../../LICENSE", os.path.join(tempdir, "LICENSE"))
shutil.copy2("../../README.md", os.path.join(tempdir, "README.md"))
+
+ shutil.copy2(
+ "../../tools/python_api/pyproject.toml",
+ os.path.join(tempdir, "pyproject.toml"),
+ )
+ # Update the version in pyproject.toml
+ with open(os.path.join(tempdir, "pyproject.toml"), "r") as f:
+ lines = f.readlines()
+ with open(os.path.join(tempdir, "pyproject.toml"), "w") as f:
+ for line in lines:
+ if line.startswith("version ="):
+ f.write('version = "%s"\n' % _get_kuzu_version())
+ else:
+ f.write(line)
shutil.copy2("README.md", os.path.join(tempdir, "README_PYTHON_BUILD.md"))
subprocess.check_call([sys.executable, "setup.py", "egg_info"], cwd=tempdir)
diff --git a/src/antlr4/Cypher.g4 b/src/antlr4/Cypher.g4
index 763bb33a916..d3491a5ad84 100644
--- a/src/antlr4/Cypher.g4
+++ b/src/antlr4/Cypher.g4
@@ -31,26 +31,44 @@ oC_Statement
| kU_Transaction
| kU_Extension
| kU_ExportDatabase
- | kU_ImportDatabase;
+ | kU_ImportDatabase
+ | kU_AttachDatabase
+ | kU_DetachDatabase;
kU_CopyFrom
- : COPY SP oC_SchemaName ( ( SP? '(' SP? kU_ColumnNames SP? ')' SP? ) | SP ) FROM SP (kU_FilePaths | oC_Variable) ( SP? '(' SP? kU_ParsingOptions SP? ')' )? ;
+ : COPY SP oC_SchemaName ( ( SP? kU_ColumnNames SP? ) | SP ) FROM SP kU_ScanSource ( SP? kU_ParsingOptions )? ;
kU_ColumnNames
- : oC_SchemaName ( SP? ',' SP? oC_SchemaName )* ;
+ : '(' SP? oC_SchemaName ( SP? ',' SP? oC_SchemaName )* SP? ')';
+
+kU_ScanSource
+ : kU_FilePaths
+ | '(' SP? oC_Query SP? ')'
+ | oC_Variable ;
kU_CopyFromByColumn
: COPY SP oC_SchemaName SP FROM SP '(' SP? StringLiteral ( SP? ',' SP? StringLiteral )* ')' SP BY SP COLUMN ;
kU_CopyTO
- : COPY SP '(' SP? oC_Query SP? ')' SP TO SP StringLiteral ( SP? '(' SP? kU_ParsingOptions SP? ')' )? ;
+ : COPY SP '(' SP? oC_Query SP? ')' SP TO SP StringLiteral ( SP? kU_ParsingOptions )? ;
kU_ExportDatabase
- : EXPORT SP DATABASE SP StringLiteral ( SP? '(' SP? kU_ParsingOptions SP? ')' )? ;
+ : EXPORT SP DATABASE SP StringLiteral ( SP? kU_ParsingOptions )? ;
kU_ImportDatabase
: IMPORT SP DATABASE SP StringLiteral;
+kU_AttachDatabase
+ : ATTACH SP StringLiteral (SP AS SP oC_SchemaName SP)? (SP? '(' SP? DBTYPE SP StringLiteral SP? ')')?;
+
+ATTACH:
+ ( 'A' | 'a') ( 'T' | 't') ( 'T' | 't') ( 'A' | 'a') ( 'C' | 'c') ( 'H' | 'h');
+
+DBTYPE:
+ ( 'D' | 'd') ( 'B' | 'b') ( 'T' | 't') ( 'Y' | 'y') ( 'P' | 'p') ( 'E' | 'e');
+
+kU_DetachDatabase
+ : DETACH SP oC_SchemaName;
kU_StandaloneCall
: CALL SP oC_SymbolicName SP? '=' SP? oC_Literal ;
@@ -58,9 +76,9 @@ kU_StandaloneCall
CALL : ( 'C' | 'c' ) ( 'A' | 'a' ) ( 'L' | 'l' ) ( 'L' | 'l' ) ;
kU_CommentOn
- : COMMENT SP ON SP TABLE SP oC_SchemaName SP IS SP StringLiteral ;
+ : COMMENT_ SP ON SP TABLE SP oC_SchemaName SP IS SP StringLiteral ;
-COMMENT : ( 'C' | 'c' ) ( 'O' | 'o' ) ( 'M' | 'm' ) ( 'M' | 'm' ) ( 'E' | 'e' ) ( 'N' | 'n' ) ( 'T' | 't' ) ;
+COMMENT_ : ( 'C' | 'c' ) ( 'O' | 'o' ) ( 'M' | 'm' ) ( 'M' | 'm' ) ( 'E' | 'e' ) ( 'N' | 'n' ) ( 'T' | 't' ) ;
kU_CreateMacro
: CREATE SP MACRO SP oC_FunctionName SP? '(' SP? kU_PositionalArgs? SP? kU_DefaultArg? ( SP? ',' SP? kU_DefaultArg )* SP? ')' SP AS SP oC_Expression ;
@@ -81,7 +99,7 @@ kU_FilePaths
GLOB : ( 'G' | 'g' ) ( 'L' | 'l' ) ( 'O' | 'o' ) ( 'B' | 'b' ) ;
kU_ParsingOptions
- : kU_ParsingOption ( SP? ',' SP? kU_ParsingOption )* ;
+ : '(' SP? kU_ParsingOption ( SP? ',' SP? kU_ParsingOption )* SP? ')' ;
kU_ParsingOption
: oC_SymbolicName SP? '=' SP? oC_Literal;
@@ -291,7 +309,7 @@ oC_ReadingClause
;
kU_LoadFrom
- : LOAD ( SP WITH SP HEADERS SP? '(' SP? kU_PropertyDefinitions SP? ')' )? SP FROM SP (kU_FilePaths ( SP? '(' SP? kU_ParsingOptions SP? ')' )? | oC_Variable) (SP? oC_Where)? ;
+ : LOAD ( SP WITH SP HEADERS SP? '(' SP? kU_PropertyDefinitions SP? ')' )? SP FROM SP kU_ScanSource (SP? kU_ParsingOptions)? (SP? oC_Where)? ;
LOAD : ( 'L' | 'l' ) ( 'O' | 'o' ) ( 'A' | 'a' ) ( 'D' | 'd' ) ;
@@ -754,7 +772,7 @@ oC_SymbolicName
// example of BEGIN and END: TCKWith2.Scenario1
kU_NonReservedKeywords
- : COMMENT
+ : COMMENT_
| COUNT
| NODE
| REL
@@ -819,7 +837,9 @@ WHITESPACE
;
Comment
- : ( '/*' ( Comment_1 | ( '*' Comment_2 ) )* '*/' ) ;
+ : ( '/*' ( Comment_1 | ( '*' Comment_2 ) )* '*/' )
+ | ( '//' ( Comment_3 )* CR? ( LF | EOF ) )
+ ;
oC_LeftArrowHead
: '<'
diff --git a/src/binder/CMakeLists.txt b/src/binder/CMakeLists.txt
index 5cc5bd577e8..3b767103224 100644
--- a/src/binder/CMakeLists.txt
+++ b/src/binder/CMakeLists.txt
@@ -8,6 +8,7 @@ add_subdirectory(visitor)
add_library(kuzu_binder
OBJECT
binder.cpp
+ binder_scope.cpp
bound_statement_result.cpp
bound_statement_rewriter.cpp
bound_statement_visitor.cpp
diff --git a/src/binder/bind/CMakeLists.txt b/src/binder/bind/CMakeLists.txt
index 90047fb71eb..6fa7ba366db 100644
--- a/src/binder/bind/CMakeLists.txt
+++ b/src/binder/bind/CMakeLists.txt
@@ -4,10 +4,12 @@ add_subdirectory(ddl)
add_library(
kuzu_binder_bind
OBJECT
+ bind_attach_database.cpp
bind_comment_on.cpp
bind_copy.cpp
bind_create_macro.cpp
bind_ddl.cpp
+ bind_detach_database.cpp
bind_explain.cpp
bind_file_scan.cpp
bind_graph_pattern.cpp
diff --git a/src/binder/bind/bind_attach_database.cpp b/src/binder/bind/bind_attach_database.cpp
new file mode 100644
index 00000000000..cad7a9b2bf6
--- /dev/null
+++ b/src/binder/bind/bind_attach_database.cpp
@@ -0,0 +1,16 @@
+#include "binder/binder.h"
+#include "binder/bound_attach_database.h"
+#include "common/cast.h"
+#include "parser/attach_database.h"
+
+namespace kuzu {
+namespace binder {
+
+std::unique_ptr Binder::bindAttachDatabase(const parser::Statement& statement) {
+ auto& attachDatabase =
+ common::ku_dynamic_cast(statement);
+ return std::make_unique(attachDatabase.getAttachInfo());
+}
+
+} // namespace binder
+} // namespace kuzu
diff --git a/src/binder/bind/bind_comment_on.cpp b/src/binder/bind/bind_comment_on.cpp
index 1f1c845fd7f..5b8e5cf6136 100644
--- a/src/binder/bind/bind_comment_on.cpp
+++ b/src/binder/bind/bind_comment_on.cpp
@@ -1,5 +1,6 @@
#include "binder/binder.h"
#include "binder/bound_comment_on.h"
+#include "catalog/catalog.h"
#include "main/client_context.h"
#include "parser/comment_on.h"
@@ -12,7 +13,7 @@ std::unique_ptr Binder::bindCommentOn(const parser::Statement& s
auto tableName = commentOn.getTable();
auto comment = commentOn.getComment();
validateTableExist(tableName);
- auto tableID = catalog.getTableID(clientContext->getTx(), tableName);
+ auto tableID = clientContext->getCatalog()->getTableID(clientContext->getTx(), tableName);
return std::make_unique(tableID, tableName, comment);
}
diff --git a/src/binder/bind/bind_copy.cpp b/src/binder/bind/bind_copy.cpp
index 6fbc5fc17c1..a3ba5aefa99 100644
--- a/src/binder/bind/bind_copy.cpp
+++ b/src/binder/bind/bind_copy.cpp
@@ -1,12 +1,12 @@
#include "binder/binder.h"
#include "binder/copy/bound_copy_from.h"
#include "binder/copy/bound_copy_to.h"
+#include "catalog/catalog.h"
#include "catalog/catalog_entry/node_table_catalog_entry.h"
#include "catalog/catalog_entry/rdf_graph_catalog_entry.h"
#include "catalog/catalog_entry/rel_table_catalog_entry.h"
#include "common/enums/table_type.h"
#include "common/exception/binder.h"
-#include "common/exception/message.h"
#include "common/string_format.h"
#include "function/table/bind_input.h"
#include "main/client_context.h"
@@ -16,6 +16,7 @@ using namespace kuzu::binder;
using namespace kuzu::catalog;
using namespace kuzu::common;
using namespace kuzu::parser;
+using namespace kuzu::function;
namespace kuzu {
namespace binder {
@@ -36,33 +37,15 @@ std::unique_ptr Binder::bindCopyToClause(const Statement& statem
columnTypes.push_back(column->getDataType());
}
if (fileType != FileType::CSV && fileType != FileType::PARQUET) {
- throw BinderException(ExceptionMessage::validateCopyToCSVParquetExtensionsException());
+ throw BinderException("COPY TO currently only supports csv and parquet files.");
}
if (fileType != FileType::CSV && copyToStatement.getParsingOptionsRef().size() != 0) {
throw BinderException{"Only copy to csv can have options."};
}
auto csvConfig =
CSVReaderConfig::construct(bindParsingOptions(copyToStatement.getParsingOptionsRef()));
- return std::make_unique(boundFilePath, fileType, std::move(columnNames),
- std::move(columnTypes), std::move(query), csvConfig.option.copy());
-}
-
-// As a temporary constraint, we require npy files loaded with COPY FROM BY COLUMN keyword.
-// And csv and parquet files loaded with COPY FROM keyword.
-static void validateByColumnKeyword(FileType fileType, bool byColumn) {
- if (fileType == FileType::NPY && !byColumn) {
- throw BinderException(ExceptionMessage::validateCopyNPYByColumnException());
- }
- if (fileType != FileType::NPY && byColumn) {
- throw BinderException(ExceptionMessage::validateCopyCSVParquetByColumnException());
- }
-}
-
-static void validateCopyNpyNotForRelTables(TableCatalogEntry* tableEntry) {
- if (tableEntry->getTableType() == TableType::REL) {
- throw BinderException(
- ExceptionMessage::validateCopyNpyNotForRelTablesException(tableEntry->getName()));
- }
+ return std::make_unique(boundFilePath, fileType, std::move(query),
+ csvConfig.option.copy());
}
std::unique_ptr Binder::bindCopyFromClause(const Statement& statement) {
@@ -70,8 +53,9 @@ std::unique_ptr Binder::bindCopyFromClause(const Statement& stat
auto tableName = copyStatement.getTableName();
validateTableExist(tableName);
// Bind to table schema.
- auto tableID = catalog.getTableID(clientContext->getTx(), tableName);
- auto tableEntry = catalog.getTableCatalogEntry(clientContext->getTx(), tableID);
+ auto catalog = clientContext->getCatalog();
+ auto tableID = catalog->getTableID(clientContext->getTx(), tableName);
+ auto tableEntry = catalog->getTableCatalogEntry(clientContext->getTx(), tableID);
switch (tableEntry->getTableType()) {
case TableType::REL_GROUP: {
throw BinderException(stringFormat("Cannot copy into {} table with type {}.", tableName,
@@ -80,84 +64,84 @@ std::unique_ptr Binder::bindCopyFromClause(const Statement& stat
default:
break;
}
- auto filePaths = bindFilePaths(copyStatement.getFilePaths());
- auto fileType = bindFileType(filePaths);
- auto readerConfig = std::make_unique(fileType, std::move(filePaths));
- readerConfig->options = bindParsingOptions(copyStatement.getParsingOptionsRef());
- validateByColumnKeyword(readerConfig->fileType, copyStatement.byColumn());
- if (readerConfig->fileType == FileType::NPY) {
- validateCopyNpyNotForRelTables(tableEntry);
- }
switch (tableEntry->getTableType()) {
- case TableType::NODE:
- return bindCopyNodeFrom(statement, std::move(readerConfig),
- ku_dynamic_cast(tableEntry));
- case TableType::REL:
- return bindCopyRelFrom(statement, std::move(readerConfig),
- ku_dynamic_cast(tableEntry));
- case TableType::RDF:
- return bindCopyRdfFrom(statement, std::move(readerConfig),
- ku_dynamic_cast(tableEntry));
+ case TableType::NODE: {
+ auto nodeTableEntry =
+ ku_dynamic_cast(tableEntry);
+ return bindCopyNodeFrom(statement, nodeTableEntry);
+ }
+ case TableType::REL: {
+ auto relTableEntry = ku_dynamic_cast(tableEntry);
+ return bindCopyRelFrom(statement, relTableEntry);
+ }
+ case TableType::RDF: {
+ auto rdfGraphEntry = ku_dynamic_cast(tableEntry);
+ return bindCopyRdfFrom(statement, rdfGraphEntry);
+ }
default: {
KU_UNREACHABLE;
}
}
}
+static void bindExpectedNodeColumns(NodeTableCatalogEntry* nodeTableEntry,
+ const std::vector& inputColumnNames, std::vector& columnNames,
+ std::vector& columnTypes);
+static void bindExpectedRelColumns(RelTableCatalogEntry* relTableEntry,
+ const std::vector& inputColumnNames, std::vector& columnNames,
+ std::vector& columnTypes, main::ClientContext* context);
+
std::unique_ptr Binder::bindCopyNodeFrom(const Statement& statement,
- std::unique_ptr config, NodeTableCatalogEntry* nodeTableEntry) {
+ NodeTableCatalogEntry* nodeTableEntry) {
auto& copyStatement = ku_dynamic_cast(statement);
- auto func = getScanFunction(config->fileType, *config);
- // For table with SERIAL columns, we need to read in serial from files.
- auto containsSerial = nodeTableEntry->containPropertyType(*LogicalType::SERIAL());
+ // Bind expected columns based on catalog information.
std::vector expectedColumnNames;
- std::vector expectedColumnTypes;
- bindExpectedNodeColumns(
- nodeTableEntry, copyStatement.getColumnNames(), expectedColumnNames, expectedColumnTypes);
- auto bindInput = std::make_unique(config->copy(),
- std::move(expectedColumnNames), std::move(expectedColumnTypes), clientContext);
- auto bindData = func->bindFunc(clientContext, bindInput.get());
- expression_vector columns;
- for (auto i = 0u; i < bindData->columnTypes.size(); i++) {
- columns.push_back(createVariable(bindData->columnNames[i], bindData->columnTypes[i]));
+ std::vector expectedColumnTypes;
+ bindExpectedNodeColumns(nodeTableEntry, copyStatement.getColumnNames(), expectedColumnNames,
+ expectedColumnTypes);
+ auto boundSource = bindScanSource(copyStatement.getSource(),
+ copyStatement.getParsingOptionsRef(), expectedColumnNames, expectedColumnTypes);
+ if (boundSource->type == ScanSourceType::FILE) {
+ auto fileSource =
+ ku_dynamic_cast(boundSource.get());
+ auto bindData = ku_dynamic_cast(
+ fileSource->fileScanInfo.bindData.get());
+ if (copyStatement.byColumn() && bindData->config.fileType != FileType::NPY) {
+ throw BinderException(stringFormat("Copy by column with {} file type is not supported.",
+ FileTypeUtils::toString(bindData->config.fileType)));
+ }
}
- auto offset = expressionBinder.createVariableExpression(
- LogicalType(LogicalTypeID::INT64), std::string(InternalKeyword::ANONYMOUS));
- auto boundFileScanInfo =
- std::make_unique(func, std::move(bindData), columns, std::move(offset));
- auto boundCopyFromInfo = BoundCopyFromInfo(
- nodeTableEntry, std::move(boundFileScanInfo), containsSerial, nullptr /* extraInfo */);
+ auto offset = expressionBinder.createVariableExpression(*LogicalType::INT64(),
+ std::string(InternalKeyword::ANONYMOUS));
+ auto boundCopyFromInfo =
+ BoundCopyFromInfo(nodeTableEntry, std::move(boundSource), offset, nullptr /* extraInfo */);
return std::make_unique(std::move(boundCopyFromInfo));
}
std::unique_ptr Binder::bindCopyRelFrom(const parser::Statement& statement,
- std::unique_ptr config, RelTableCatalogEntry* relTableEntry) {
+ RelTableCatalogEntry* relTableEntry) {
auto& copyStatement = ku_dynamic_cast(statement);
- auto func = getScanFunction(config->fileType, *config);
- // For table with SERIAL columns, we need to read in serial from files.
- auto containsSerial = relTableEntry->containPropertyType(*LogicalType::SERIAL());
- KU_ASSERT(containsSerial == false);
- std::vector expectedColumnNames;
- std::vector expectedColumnTypes;
- bindExpectedRelColumns(
- relTableEntry, copyStatement.getColumnNames(), expectedColumnNames, expectedColumnTypes);
- auto bindInput = std::make_unique(std::move(*config),
- std::move(expectedColumnNames), std::move(expectedColumnTypes), clientContext);
- auto bindData = func->bindFunc(clientContext, bindInput.get());
- expression_vector columns;
- for (auto i = 0u; i < bindData->columnTypes.size(); i++) {
- columns.push_back(createVariable(bindData->columnNames[i], bindData->columnTypes[i]));
+ if (copyStatement.byColumn()) {
+ throw BinderException(
+ stringFormat("Copy by column is not supported for relationship table."));
}
- auto offset = expressionBinder.createVariableExpression(
- *LogicalType::INT64(), std::string(InternalKeyword::ROW_OFFSET));
- auto boundFileScanInfo =
- std::make_unique(func, std::move(bindData), columns, offset);
+ // Bind expected columns based on catalog information.
+ std::vector expectedColumnNames;
+ std::vector expectedColumnTypes;
+ bindExpectedRelColumns(relTableEntry, copyStatement.getColumnNames(), expectedColumnNames,
+ expectedColumnTypes, clientContext);
+ auto boundSource = bindScanSource(copyStatement.getSource(),
+ copyStatement.getParsingOptionsRef(), expectedColumnNames, expectedColumnTypes);
+ auto columns = boundSource->getColumns();
+ auto offset = expressionBinder.createVariableExpression(*LogicalType::INT64(),
+ std::string(InternalKeyword::ROW_OFFSET));
auto srcTableID = relTableEntry->getSrcTableID();
auto dstTableID = relTableEntry->getDstTableID();
- auto srcSchema = ku_dynamic_cast(
- catalog.getTableCatalogEntry(clientContext->getTx(), srcTableID));
- auto dstSchema = ku_dynamic_cast(
- catalog.getTableCatalogEntry(clientContext->getTx(), dstTableID));
+ auto catalog = clientContext->getCatalog();
+ auto srcEntry = catalog->getTableCatalogEntry(clientContext->getTx(), srcTableID);
+ auto dstEntry = catalog->getTableCatalogEntry(clientContext->getTx(), dstTableID);
+ auto srcNodeEntry = ku_dynamic_cast(srcEntry);
+ auto dstNodeEntry = ku_dynamic_cast(dstEntry);
auto srcKey = columns[0];
auto dstKey = columns[1];
expression_vector propertyColumns;
@@ -166,8 +150,8 @@ std::unique_ptr Binder::bindCopyRelFrom(const parser::Statement&
}
auto srcOffset = createVariable(InternalKeyword::SRC_OFFSET, LogicalTypeID::INT64);
auto dstOffset = createVariable(InternalKeyword::DST_OFFSET, LogicalTypeID::INT64);
- auto srcPkType = srcSchema->getPrimaryKey()->getDataType();
- auto dstPkType = dstSchema->getPrimaryKey()->getDataType();
+ auto srcPkType = srcNodeEntry->getPrimaryKey()->getDataType();
+ auto dstPkType = dstNodeEntry->getPrimaryKey()->getDataType();
auto srcLookUpInfo = IndexLookupInfo(srcTableID, srcOffset, srcKey, *srcPkType);
auto dstLookUpInfo = IndexLookupInfo(dstTableID, dstOffset, dstKey, *dstPkType);
auto extraCopyRelInfo = std::make_unique();
@@ -176,8 +160,8 @@ std::unique_ptr Binder::bindCopyRelFrom(const parser::Statement&
extraCopyRelInfo->propertyColumns = std::move(propertyColumns);
extraCopyRelInfo->infos.push_back(std::move(srcLookUpInfo));
extraCopyRelInfo->infos.push_back(std::move(dstLookUpInfo));
- auto boundCopyFromInfo = BoundCopyFromInfo(
- relTableEntry, std::move(boundFileScanInfo), containsSerial, std::move(extraCopyRelInfo));
+ auto boundCopyFromInfo =
+ BoundCopyFromInfo(relTableEntry, boundSource->copy(), offset, std::move(extraCopyRelInfo));
return std::make_unique(std::move(boundCopyFromInfo));
}
@@ -206,8 +190,8 @@ static void bindExpectedColumns(TableCatalogEntry* tableEntry,
// Search column data type for each input column.
for (auto& columnName : inputColumnNames) {
if (!tableEntry->containProperty(columnName)) {
- throw BinderException(stringFormat(
- "Table {} does not contain column {}.", tableEntry->getName(), columnName));
+ throw BinderException(stringFormat("Table {} does not contain column {}.",
+ tableEntry->getName(), columnName));
}
auto propertyID = tableEntry->getPropertyID(columnName);
auto property = tableEntry->getProperty(propertyID);
@@ -229,21 +213,22 @@ static void bindExpectedColumns(TableCatalogEntry* tableEntry,
}
}
-void Binder::bindExpectedNodeColumns(NodeTableCatalogEntry* nodeTableEntry,
+void bindExpectedNodeColumns(NodeTableCatalogEntry* nodeTableEntry,
const std::vector& inputColumnNames, std::vector& columnNames,
- std::vector& columnTypes) {
+ std::vector& columnTypes) {
KU_ASSERT(columnNames.empty() && columnTypes.empty());
bindExpectedColumns(nodeTableEntry, inputColumnNames, columnNames, columnTypes);
}
-void Binder::bindExpectedRelColumns(RelTableCatalogEntry* relTableEntry,
+void bindExpectedRelColumns(RelTableCatalogEntry* relTableEntry,
const std::vector& inputColumnNames, std::vector& columnNames,
- std::vector& columnTypes) {
+ std::vector& columnTypes, main::ClientContext* context) {
KU_ASSERT(columnNames.empty() && columnTypes.empty());
- auto srcTable = ku_dynamic_cast(
- catalog.getTableCatalogEntry(clientContext->getTx(), relTableEntry->getSrcTableID()));
- auto dstTable = ku_dynamic_cast(
- catalog.getTableCatalogEntry(clientContext->getTx(), relTableEntry->getDstTableID()));
+ auto catalog = context->getCatalog();
+ auto srcEntry = catalog->getTableCatalogEntry(context->getTx(), relTableEntry->getSrcTableID());
+ auto srcTable = ku_dynamic_cast(srcEntry);
+ auto dstEntry = catalog->getTableCatalogEntry(context->getTx(), relTableEntry->getDstTableID());
+ auto dstTable = ku_dynamic_cast(dstEntry);
columnNames.push_back("from");
columnNames.push_back("to");
auto srcPKColumnType = *srcTable->getPrimaryKey()->getDataType();
diff --git a/src/binder/bind/bind_create_macro.cpp b/src/binder/bind/bind_create_macro.cpp
index 5f1aca07ef9..c4d720dced3 100644
--- a/src/binder/bind/bind_create_macro.cpp
+++ b/src/binder/bind/bind_create_macro.cpp
@@ -1,5 +1,6 @@
#include "binder/binder.h"
#include "binder/bound_create_macro.h"
+#include "catalog/catalog.h"
#include "common/exception/binder.h"
#include "common/string_format.h"
#include "common/string_utils.h"
@@ -16,7 +17,7 @@ std::unique_ptr Binder::bindCreateMacro(const Statement& stateme
auto& createMacro = ku_dynamic_cast(statement);
auto macroName = createMacro.getMacroName();
StringUtils::toUpper(macroName);
- if (catalog.containsMacro(clientContext->getTx(), macroName)) {
+ if (clientContext->getCatalog()->containsMacro(clientContext->getTx(), macroName)) {
throw BinderException{stringFormat("Macro {} already exists.", macroName)};
}
parser::default_macro_args defaultArgs;
diff --git a/src/binder/bind/bind_ddl.cpp b/src/binder/bind/bind_ddl.cpp
index bdb113d29a6..8bb0d0d1223 100644
--- a/src/binder/bind/bind_ddl.cpp
+++ b/src/binder/bind/bind_ddl.cpp
@@ -2,6 +2,7 @@
#include "binder/ddl/bound_alter.h"
#include "binder/ddl/bound_create_table.h"
#include "binder/ddl/bound_drop_table.h"
+#include "catalog/catalog.h"
#include "catalog/catalog_entry/node_table_catalog_entry.h"
#include "catalog/catalog_entry/rdf_graph_catalog_entry.h"
#include "catalog/catalog_entry/rel_group_catalog_entry.h"
@@ -39,8 +40,8 @@ std::vector Binder::bindPropertyInfo(
std::vector propertyInfos;
propertyInfos.reserve(propertyNameDataTypes.size());
for (auto& propertyNameDataType : propertyNameDataTypes) {
- propertyInfos.emplace_back(
- propertyNameDataType.first, *bindDataType(propertyNameDataType.second));
+ propertyInfos.emplace_back(propertyNameDataType.first,
+ *bindDataType(propertyNameDataType.second));
}
validateUniquePropertyName(propertyInfos);
for (auto& info : propertyInfos) {
@@ -51,8 +52,8 @@ std::vector Binder::bindPropertyInfo(
return propertyInfos;
}
-static uint32_t bindPrimaryKey(
- const std::string& pkColName, const std::vector& infos) {
+static uint32_t bindPrimaryKey(const std::string& pkColName,
+ const std::vector& infos) {
uint32_t primaryKeyIdx = UINT32_MAX;
for (auto i = 0u; i < infos.size(); i++) {
if (infos[i].name == pkColName) {
@@ -137,8 +138,8 @@ BoundCreateTableInfo Binder::bindCreateRelTableInfo(const CreateTableInfo* info)
validateTableType(srcTableID, TableType::NODE);
auto dstTableID = bindTableID(extraInfo->dstTableName);
validateTableType(dstTableID, TableType::NODE);
- auto boundExtraInfo = std::make_unique(
- srcMultiplicity, dstMultiplicity, srcTableID, dstTableID, std::move(propertyInfos));
+ auto boundExtraInfo = std::make_unique(srcMultiplicity,
+ dstMultiplicity, srcTableID, dstTableID, std::move(propertyInfos));
return BoundCreateTableInfo(TableType::REL, info->tableName, std::move(boundExtraInfo));
}
@@ -167,7 +168,7 @@ BoundCreateTableInfo Binder::bindCreateRelTableGroupInfo(const CreateTableInfo*
std::unique_ptr Binder::bindCreateTable(const Statement& statement) {
auto& createTable = ku_dynamic_cast(statement);
auto tableName = createTable.getInfo()->tableName;
- if (catalog.containsTable(clientContext->getTx(), tableName)) {
+ if (clientContext->getCatalog()->containsTable(clientContext->getTx(), tableName)) {
throw BinderException(tableName + " already exists in catalog.");
}
auto boundCreateInfo = bindCreateTableInfo(createTable.getInfo());
@@ -178,12 +179,13 @@ std::unique_ptr Binder::bindDropTable(const Statement& statement
auto& dropTable = ku_dynamic_cast(statement);
auto tableName = dropTable.getTableName();
validateTableExist(tableName);
- auto tableID = catalog.getTableID(clientContext->getTx(), tableName);
- auto tableEntry = catalog.getTableCatalogEntry(clientContext->getTx(), tableID);
+ auto catalog = clientContext->getCatalog();
+ auto tableID = catalog->getTableID(clientContext->getTx(), tableName);
+ auto tableEntry = catalog->getTableCatalogEntry(clientContext->getTx(), tableID);
switch (tableEntry->getTableType()) {
case TableType::NODE: {
// Check node table is not referenced by rel table.
- for (auto& relTableEntry : catalog.getRelTableEntries(clientContext->getTx())) {
+ for (auto& relTableEntry : catalog->getRelTableEntries(clientContext->getTx())) {
if (relTableEntry->isParent(tableID)) {
throw BinderException(stringFormat("Cannot delete node table {} because it is "
"referenced by relationship table {}.",
@@ -191,7 +193,7 @@ std::unique_ptr Binder::bindDropTable(const Statement& statement
}
}
// Check node table is not referenced by rdf graph
- for (auto& rdfEntry : catalog.getRdfGraphEntries(clientContext->getTx())) {
+ for (auto& rdfEntry : catalog->getRdfGraphEntries(clientContext->getTx())) {
if (rdfEntry->isParent(tableID)) {
throw BinderException(stringFormat(
"Cannot delete node table {} because it is referenced by rdfGraph {}.",
@@ -201,7 +203,7 @@ std::unique_ptr Binder::bindDropTable(const Statement& statement
} break;
case TableType::REL: {
// Check rel table is not referenced by rel group.
- for (auto& relTableGroupEntry : catalog.getRelTableGroupEntries(clientContext->getTx())) {
+ for (auto& relTableGroupEntry : catalog->getRelTableGroupEntries(clientContext->getTx())) {
if (relTableGroupEntry->isParent(tableID)) {
throw BinderException(stringFormat("Cannot delete relationship table {} because it "
"is referenced by relationship group {}.",
@@ -209,7 +211,7 @@ std::unique_ptr Binder::bindDropTable(const Statement& statement
}
}
// Check rel table is not referenced by rdf graph.
- for (auto& rdfGraphEntry : catalog.getRdfGraphEntries(clientContext->getTx())) {
+ for (auto& rdfGraphEntry : catalog->getRdfGraphEntries(clientContext->getTx())) {
if (rdfGraphEntry->isParent(tableID)) {
throw BinderException(stringFormat(
"Cannot delete relationship table {} because it is referenced by rdfGraph {}.",
@@ -220,7 +222,7 @@ std::unique_ptr Binder::bindDropTable(const Statement& statement
case TableType::RDF: {
auto rdfGraphEntry = ku_dynamic_cast(tableEntry);
// Check resource table is not referenced by rel table other than its triple table.
- for (auto& relTableEntry : catalog.getRelTableEntries(clientContext->getTx())) {
+ for (auto& relTableEntry : catalog->getRelTableEntries(clientContext->getTx())) {
if (relTableEntry->getTableID() == rdfGraphEntry->getResourceTripleTableID() ||
relTableEntry->getTableID() == rdfGraphEntry->getLiteralTripleTableID()) {
continue;
@@ -232,7 +234,7 @@ std::unique_ptr