diff --git a/.github/CONTRIBUTING.md b/.github/CONTRIBUTING.md index 81c2ceabb93..8ca9413821a 100644 --- a/.github/CONTRIBUTING.md +++ b/.github/CONTRIBUTING.md @@ -1 +1 @@ -Please review the [guidelines for contributing](http://netty.io/wiki/developer-guide.html) for this repository. +Please review the [guidelines for contributing](https://netty.io/wiki/developer-guide.html) for this repository. diff --git a/.github/scripts/build_affected_only.sh b/.github/scripts/build_affected_only.sh new file mode 100755 index 00000000000..3b468b066bd --- /dev/null +++ b/.github/scripts/build_affected_only.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +set -e + +if [ "$#" -lt 2 ]; then + echo "Expected branch and maven arguments" + exit 1 +fi + +MODULES=$(git diff --name-only "$1" | cut -d '/' -f 1 | sort -u | sed -n -e 'H;${x;s/\n/,/g;s/^,//;p;}') +MAVEN_ARGUMENTS=${*:2} +if [ -z "$MODULES" ]; then + echo "No changes detected, skipping build" + exit 0 +fi +echo "Changes detected, start the build" +echo "./mvnw -pl $MODULES -amd $MAVEN_ARGUMENTS" +./mvnw -pl "$MODULES" -amd "${@:2}" + diff --git a/.github/scripts/check_build_result.sh b/.github/scripts/check_build_result.sh new file mode 100755 index 00000000000..9ef6fcce449 --- /dev/null +++ b/.github/scripts/check_build_result.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +set -e + +if [ "$#" -ne 1 ]; then + echo "Expected build log as argument" + exit 1 +fi + +if grep -q 'BUILD FAILURE' $1 ; then + echo "Build failure detected, please inspect build log" + exit 1 +else + echo "Build successful" + exit 0 +fi diff --git a/.github/scripts/check_leak.sh b/.github/scripts/check_leak.sh new file mode 100755 index 00000000000..7a1a39d3636 --- /dev/null +++ b/.github/scripts/check_leak.sh @@ -0,0 +1,31 @@ +#!/bin/bash +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +set -e + +if [ "$#" -ne 1 ]; then + echo "Expected build log as argument" + exit 1 +fi + +if grep -q 'LEAK:' $1 ; then + echo "Leak detected, please inspect build log" + exit 1 +else + echo "No Leak detected" + exit 0 +fi + diff --git a/.github/scripts/merge_local_staging.sh b/.github/scripts/merge_local_staging.sh new file mode 100755 index 00000000000..324f6f74da6 --- /dev/null +++ b/.github/scripts/merge_local_staging.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +set -e +if [ "$#" -lt 2 ]; then + echo "Expected target directory and at least one local staging directory" + exit 1 +fi +TARGET=$1 + +for ((i=2; i<=$#; i++)) +do + DIR="${!i}" + SUB_DIR=$(ls -d "${DIR}"/* | awk -F / '{print $NF}') + + if [ ! -d "${TARGET}/${SUB_DIR}" ] + then + mkdir -p "${TARGET}/${SUB_DIR}" + fi + cat "${DIR}"/"${SUB_DIR}"/.index >> "${TARGET}/${SUB_DIR}"/.index + cp -r "${DIR}"/"${SUB_DIR}"/* "${TARGET}/${SUB_DIR}"/ +done diff --git a/.github/scripts/release_checkout_tag.sh b/.github/scripts/release_checkout_tag.sh new file mode 100755 index 00000000000..73972b8e85f --- /dev/null +++ b/.github/scripts/release_checkout_tag.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +set -e + +if [ "$#" -ne 1 ]; then + echo "Expected release.properties file" + exit 1 +fi + +TAG=$(grep scm.tag= "$1" | cut -d'=' -f2) + +echo "Checkout tag $TAG" +git checkout "$TAG" +exit 0 diff --git a/.github/scripts/release_rollback.sh b/.github/scripts/release_rollback.sh new file mode 100755 index 00000000000..52cd9a1dc69 --- /dev/null +++ b/.github/scripts/release_rollback.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +set -e + +if [ "$#" -ne 3 ]; then + echo "Expected release.properties file, repository name and branch" + exit 1 +fi + +TAG=$(grep scm.tag= "$1" | cut -d'=' -f2) +git remote set-url origin git@github.com:"$2".git +git fetch +git checkout "$3" +./mvnw -B --file pom.xml release:rollback +git push origin :"$TAG" diff --git a/.github/workflows/ci-build.yml b/.github/workflows/ci-build.yml new file mode 100644 index 00000000000..22d02c73a4b --- /dev/null +++ b/.github/workflows/ci-build.yml @@ -0,0 +1,79 @@ +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +name: Build project + +on: + push: + branches: [ "main"] + + schedule: + - cron: '30 1 * * 1' # At 01:30 on Monday, every Monday. + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryhandler.count=5 -Dmaven.wagon.httpconnectionManager.ttlSeconds=240 + +jobs: + build: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - setup: linux-x86_64-java11 + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml run build" + + name: ${{ matrix.setup }} + steps: + - uses: actions/checkout@v2 + + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ matrix.setup }}-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-${{ matrix.setup }}- + ${{ runner.os }}-maven- + + # Enable caching of Docker layers + - uses: satackey/action-docker-layer-caching@v0.0.11 + continue-on-error: true + with: + key: build-docker-cache-${{ matrix.setup }}-{hash} + restore-keys: | + build-docker-cache-${{ matrix.setup }}- + build-docker-cache- + + - name: Build docker image + run: docker-compose ${{ matrix.docker-compose-build }} + + - name: Build project without leak detection + run: docker-compose ${{ matrix.docker-compose-run }} | tee build.output + + - name: Checking for test failures + run: ./.github/scripts/check_build_result.sh build.output + + - uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: target + path: | + **/target/surefire-reports/ + **/hs_err*.log diff --git a/.github/workflows/ci-deploy.yml b/.github/workflows/ci-deploy.yml new file mode 100644 index 00000000000..93dbbf45211 --- /dev/null +++ b/.github/workflows/ci-deploy.yml @@ -0,0 +1,143 @@ +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +name: Deploy project + +on: + push: + branches: [ "main" ] + + schedule: + - cron: '30 1 * * 1' # At 01:30 on Monday, every Monday. + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryhandler.count=5 -Dmaven.wagon.httpconnectionManager.ttlSeconds=240 + +jobs: + stage-snapshot: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - setup: linux-x86_64-java11 + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml run stage-snapshot" + - setup: linux-aarch64 + docker-compose-build: "-f docker/docker-compose.centos-7.yaml build" + docker-compose-run: "-f docker/docker-compose.centos-7.yaml run cross-compile-aarch64-stage-snapshot" + + name: stage-snapshot-${{ matrix.setup }} + steps: + - uses: actions/checkout@v2 + + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ matrix.setup }}-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-${{ matrix.setup }}- + ${{ runner.os }}-maven- + + # Enable caching of Docker layers + - uses: satackey/action-docker-layer-caching@v0.0.11 + env: + docker-cache-name: staging-${{ matrix.setup }}-cache-docker + continue-on-error: true + with: + key: ${{ runner.os }}-staging-${{ env.docker-cache-name }}-{hash} + restore-keys: | + ${{ runner.os }}-staging-${{ env.docker-cache-name }}- + + - name: Create local staging directory + run: mkdir -p ~/local-staging + + - name: Build docker image + run: docker-compose ${{ matrix.docker-compose-build }} + + - name: Stage snapshots to local staging directory + run: docker-compose ${{ matrix.docker-compose-run }} + + - name: Upload local staging directory + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.setup }}-local-staging + path: ~/local-staging + if-no-files-found: error + + deploy-staged-snapshots: + runs-on: ubuntu-18.04 + # Wait until we have staged everything + needs: stage-snapshot + steps: + - uses: actions/checkout@v2 + + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: '11' + + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-deploy-staged-snapshots-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-deploy-staged-snapshots- + ${{ runner.os }}-maven- + + # Setup some env to re-use later. + - name: Prepare enviroment variables + run: | + echo "LOCAL_STAGING_DIR=$HOME/local-staging" >> $GITHUB_ENV + + # Hardcode the staging artifacts that need to be downloaded. + # These must match the matrix setups. There is currently no way to pull this out of the config. + - name: Download linux-aarch64 staging directory + uses: actions/download-artifact@v2 + with: + name: linux-aarch64-local-staging + path: ~/linux-aarch64-local-staging + + - name: Download linux-x86_64-java11 staging directory + uses: actions/download-artifact@v2 + with: + name: linux-x86_64-java11-local-staging + path: ~/linux-x86_64-java11-local-staging + + - name: Merge staging repositories + run: | + mkdir -p ~/local-staging/deferred + cat ~/linux-aarch64-local-staging/deferred/.index >> ~/local-staging/deferred/.index + cp -r ~/linux-aarch64-local-staging/deferred/* ~/local-staging/deferred/ + cat ~/linux-x86_64-java11-local-staging/deferred/.index >> ~/local-staging/deferred/.index + cp -r ~/linux-x86_64-java11-local-staging/deferred/* ~/local-staging/deferred/ + + - uses: s4u/maven-settings-action@v2.2.0 + with: + servers: | + [{ + "id": "sonatype-nexus-snapshots", + "username": "${{ secrets.SONATYPE_USERNAME }}", + "password": "${{ secrets.SONATYPE_PASSWORD }}" + }] + + - name: Deploy local staged artifacts + run: ./mvnw -B --file pom.xml org.sonatype.plugins:nexus-staging-maven-plugin:deploy-staged -DaltStagingDirectory=$LOCAL_STAGING_DIR \ No newline at end of file diff --git a/.github/workflows/ci-pr-reports.yml b/.github/workflows/ci-pr-reports.yml new file mode 100644 index 00000000000..acbb719ca41 --- /dev/null +++ b/.github/workflows/ci-pr-reports.yml @@ -0,0 +1,54 @@ +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +name: PR Reports +on: + workflow_run: + workflows: [ "Build PR" ] + types: + - completed +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryhandler.count=5 -Dmaven.wagon.httpconnectionManager.ttlSeconds=240 + +jobs: + tests: + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + ignore-if-missing: [false] + include: + - setup: linux-x86_64-java11 + - setup: linux-x86_64-java16 + - setup: linux-x86_64-java11-boringssl + - setup: windows-x86_64-java11-boringssl + continue-on-error: ${{ matrix.ignore-if-missing }} + steps: + - name: Download Artifacts + uses: dawidd6/action-download-artifact@v2.14.1 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + workflow: ${{ github.event.workflow_run.workflow_id }} + workflow_conclusion: completed + commit: ${{ github.event.workflow_run.head_commit.id }} + # File location set in ci-pr.yml and must be coordinated. + name: test-results-${{ matrix.setup }} + - name: Publish Test Report + uses: scacap/action-surefire-report@v1.0.13 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + report_paths: '**/target/surefire-reports/TEST-*.xml' + commit: ${{ github.event.workflow_run.head_commit.id }} + check_name: ${{ matrix.setup }} test reports \ No newline at end of file diff --git a/.github/workflows/ci-pr.yml b/.github/workflows/ci-pr.yml new file mode 100644 index 00000000000..17c1104088a --- /dev/null +++ b/.github/workflows/ci-pr.yml @@ -0,0 +1,206 @@ +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +name: Build PR + +on: + pull_request: + branches: [ "main"] + + # Allows you to run this workflow manually from the Actions tab + workflow_dispatch: + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryhandler.count=5 -Dmaven.wagon.httpconnectionManager.ttlSeconds=240 + +jobs: + verify-pr: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: '11' + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-verify-pr-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-verify-pr- + ${{ runner.os }}-maven- + - name: Verify with Maven + run: ./mvnw -B -ntp --file pom.xml verify -DskipTests=true + + build-pr-windows: + runs-on: windows-2016 + name: windows-x86_64-java11-boringssl + needs: verify-pr + steps: + - uses: actions/checkout@v2 + + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: 11 + + # Cache .m2/repository + # Caching of maven dependencies + - uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: pr-windows-x86_64-maven-cache-${{ hashFiles('**/pom.xml') }} + restore-keys: | + pr-windows-x86_64-maven-cache- + + - name: Build project + run: ./mvnw.cmd -B -ntp --file pom.xml clean package -Pboringssl -DskipHttp2Testsuite=true -DskipAutobahnTestsuite=true + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v2 + with: + name: test-results-windows-x86_64-java11-boringssl + path: '**/target/surefire-reports/TEST-*.xml' + + - uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: build-pr-windows-target + path: | + **/target/surefire-reports/ + **/hs_err*.log + + build-pr-aarch64: + name: linux-aarch64-verify-native + # The host should always be Linux + runs-on: ubuntu-20.04 + needs: verify-pr + steps: + - uses: actions/checkout@v2 + + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-build-pr-aarch64-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-build-pr-aarch64- + ${{ runner.os }}-maven- + + - uses: uraimo/run-on-arch-action@v2.0.9 + name: Run commands + id: runcmd + with: + arch: aarch64 + distro: ubuntu20.04 + + # Not required, but speeds up builds by storing container images in + # a GitHub package registry. + githubToken: ${{ github.token }} + + # Mount the .m2/repository + dockerRunArgs: | + --volume "/home/runner/.m2/repository/:/root/.m2/repository" + + # Install dependencies + install: | + apt-get update -q -y + apt-get install -q -y openjdk-11-jdk autoconf automake libtool make tar maven git + + # Compile native code and the modules it depend on and run NativeLoadingTest. This is enough to ensure + # we can load the native module on aarch64 + # + # Use tcnative.classifier that is empty as we don't support using the shared lib version on ubuntu. + run: | + JAVA_HOME=/usr/lib/jvm/java-11-openjdk-arm64 ./mvnw -B -ntp -pl testsuite-native -am clean package -DskipTests=true -Dcheckstyle.skip=true -DskipNativeTestsuite=false -Dtcnative.classifier= + + build-pr: + runs-on: ubuntu-latest + strategy: + matrix: + include: + - setup: linux-x86_64-java11 + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml run build-leak" + - setup: linux-x86_64-java11-graal + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.graalvm111.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.graalvm111.yaml run build-leak" + - setup: linux-x86_64-java16 + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.116.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.116.yaml run build-leak" + - setup: linux-x86_64-java17 + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.117.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.117.yaml run build-leak" + - setup: linux-x86_64-java11-boringssl + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml run build-leak-boringssl-static" + - setup: linux-x86_64-java11-unsafe-buffer + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.111.yaml run build-unsafe-buffer" + + name: ${{ matrix.setup }} build + needs: verify-pr + steps: + - uses: actions/checkout@v2 + + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ matrix.setup }}-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-${{ matrix.setup }}- + ${{ runner.os }}-maven- + + # Enable caching of Docker layers + - uses: satackey/action-docker-layer-caching@v0.0.11 + continue-on-error: true + with: + key: build-docker-cache-${{ matrix.setup }}-{hash} + restore-keys: | + build-docker-cache-${{ matrix.setup }}- + build-docker-cache- + + - name: Build docker image + run: docker-compose ${{ matrix.docker-compose-build }} + + - name: Build project with leak detection + run: docker-compose ${{ matrix.docker-compose-run }} | tee build-leak.output + + - name: Checking for test failures + run: ./.github/scripts/check_build_result.sh build-leak.output + + - name: Checking for detected leak + run: ./.github/scripts/check_leak.sh build-leak.output + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v2 + with: + name: test-results-${{ matrix.setup }} + path: '**/target/surefire-reports/TEST-*.xml' + + - uses: actions/upload-artifact@v2 + if: ${{ failure() }} + with: + name: build-${{ matrix.setup }}-target + path: | + **/target/surefire-reports/ + **/hs_err*.log diff --git a/.github/workflows/ci-release.yml b/.github/workflows/ci-release.yml new file mode 100644 index 00000000000..aa431372c72 --- /dev/null +++ b/.github/workflows/ci-release.yml @@ -0,0 +1,249 @@ +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +name: Release + +on: + + # Releases can only be triggered via the action tab + workflow_dispatch: + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryhandler.count=5 -Dmaven.wagon.httpconnectionManager.ttlSeconds=240 + +jobs: + prepare-release: + runs-on: ubuntu-18.04 + steps: + - uses: actions/checkout@v2 + with: + ref: main + + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: '11' + + - name: Setup git configuration + run: | + git config --global user.email "netty-project-bot@users.noreply.github.com" + git config --global user.name "Netty Project Bot" + + - name: Install SSH key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.SSH_PRIVATE_KEY_PEM }} + known_hosts: ${{ secrets.SSH_KNOWN_HOSTS }} + + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-prepare-release-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-prepare-release- + ${{ runner.os }}-maven- + + - name: Prepare release with Maven + run: | + ./mvnw -B -ntp --file pom.xml release:prepare -DpreparationGoals=clean -DskipTests=true + ./mvnw -B -ntp clean + + - name: Checkout tag + run: ./.github/scripts/release_checkout_tag.sh release.properties + + - name: Upload workspace + uses: actions/upload-artifact@v2 + with: + name: prepare-release-workspace + path: ${{ github.workspace }}/** + + stage-release-linux: + runs-on: ubuntu-latest + needs: prepare-release + strategy: + matrix: + include: + - setup: linux-x86_64-java11 + docker-compose-build: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.11.yaml build" + docker-compose-run: "-f docker/docker-compose.yaml -f docker/docker-compose.centos-6.11.yaml run stage-release" + - setup: linux-aarch64 + docker-compose-build: "-f docker/docker-compose.centos-7.yaml build" + docker-compose-run: "-f docker/docker-compose.centos-7.yaml run cross-compile-aarch64-stage-release" + + name: stage-release-${{ matrix.setup }} + + steps: + - name: Download release-workspace + uses: actions/download-artifact@v2 + with: + name: prepare-release-workspace + path: ./prepare-release-workspace/ + + - name: Adjust mvnw permissions + run: chmod 755 ./prepare-release-workspace/mvnw + + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: '11' + + - name: Setup git configuration + run: | + git config --global user.email "netty-project-bot@users.noreply.github.com" + git config --global user.name "Netty Project Bot" + + - name: Install SSH key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.SSH_PRIVATE_KEY_PEM }} + known_hosts: ${{ secrets.SSH_KNOWN_HOSTS }} + + # Enable caching of Docker layers + - uses: satackey/action-docker-layer-caching@v0.0.11 + continue-on-error: true + with: + key: ${{ runner.os }}-staging-docker-cache-${{ matrix.setup }}-{hash} + restore-keys: | + ${{ runner.os }}-staging-docker-cache-${{ matrix.setup }}- + ${{ runner.os }}-staging-docker-cache- + + - uses: s4u/maven-settings-action@v2.2.0 + with: + servers: | + [{ + "id": "sonatype-nexus-staging", + "username": "${{ secrets.SONATYPE_USERNAME }}", + "password": "${{ secrets.SONATYPE_PASSWORD }}" + }] + + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ matrix.setup }}-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-${{ matrix.setup }}- + ${{ runner.os }}-maven- + + - name: Create local staging directory + run: mkdir -p ~/local-staging + + - name: Build docker image + working-directory: ./prepare-release-workspace/ + run: docker-compose ${{ matrix.docker-compose-build }} + + - name: Stage release to local staging directory + working-directory: ./prepare-release-workspace/ + env: + GPG_PRIVATE_KEY: ${{ secrets.GPG_PRIVATE_KEY }} + GPG_KEYNAME: ${{ secrets.GPG_KEYNAME }} + GPG_PASSPHRASE: ${{ secrets.GPG_PASSPHRASE }} + run: docker-compose ${{ matrix.docker-compose-run }} + + - name: Upload local staging directory + uses: actions/upload-artifact@v2 + with: + name: ${{ matrix.setup }}-local-staging + path: ~/local-staging + if-no-files-found: error + + - name: Rollback release on failure + working-directory: ./prepare-release-workspace/ + if: ${{ failure() }} + # Rollback the release in case of an failure + run: bash ./.github/scripts/release_rollback.sh release.properties netty/netty main + + deploy-staged-release: + runs-on: ubuntu-18.04 + # Wait until we have staged everything + needs: stage-release-linux + steps: + - name: Download release-workspace + uses: actions/download-artifact@v2 + with: + name: prepare-release-workspace + path: ./prepare-release-workspace/ + + - name: Adjust mvnw permissions + run: chmod 755 ./prepare-release-workspace/mvnw + + - name: Set up JDK 11 + uses: actions/setup-java@v1 + with: + java-version: '11' + + - name: Setup git configuration + run: | + git config --global user.email "netty-project-bot@users.noreply.github.com" + git config --global user.name "Netty Project Bot" + + - name: Install SSH key + uses: shimataro/ssh-key-action@v2 + with: + key: ${{ secrets.SSH_PRIVATE_KEY_PEM }} + known_hosts: ${{ secrets.SSH_KNOWN_HOSTS }} + + # Hardcode the staging artifacts that need to be downloaded. + # These must match the matrix setups. There is currently no way to pull this out of the config. + - name: Download linux-aarch64 staging directory + uses: actions/download-artifact@v2 + with: + name: linux-aarch64-local-staging + path: ~/linux-aarch64-local-staging + + - name: Download linux-x86_64-java11 staging directory + uses: actions/download-artifact@v2 + with: + name: linux-x86_64-java11-local-staging + path: ~/linux-x86_64-java11-local-staging + + # This step takes care of merging all the previous staged repositories in a way that will allow us to deploy + # all together with one maven command. + - name: Merge staging repositories + working-directory: ./prepare-release-workspace/ + run: bash ./.github/scripts/merge_local_staging.sh /home/runner/local-staging/staging ~/linux-aarch64-local-staging/staging ~/linux-x86_64-java11-local-staging/staging + + - uses: s4u/maven-settings-action@v2.2.0 + with: + servers: | + [{ + "id": "sonatype-nexus-staging", + "username": "${{ secrets.SONATYPE_USERNAME }}", + "password": "${{ secrets.SONATYPE_PASSWORD }}" + }] + + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-deploy-staged-release-${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-deploy-staged-release- + ${{ runner.os }}-maven- + + - name: Deploy local staged artifacts + working-directory: ./prepare-release-workspace/ + # If we don't want to close the repository we can add -DskipStagingRepositoryClose=true + run: ./mvnw -B -ntp --file pom.xml org.sonatype.plugins:nexus-staging-maven-plugin:deploy-staged -DnexusUrl=https://oss.sonatype.org -DserverId=sonatype-nexus-staging -DaltStagingDirectory=/home/runner/local-staging -DskipStagingRepositoryClose=true + + - name: Rollback release on failure + working-directory: ./prepare-release-workspace/ + if: ${{ failure() }} + # Rollback the release in case of an failure + run: bash ./.github/scripts/release_rollback.sh release.properties netty/netty main diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml new file mode 100644 index 00000000000..be5da1671c8 --- /dev/null +++ b/.github/workflows/codeql-analysis.yml @@ -0,0 +1,102 @@ +# ---------------------------------------------------------------------------- +# Copyright 2021 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# ---------------------------------------------------------------------------- +# For most projects, this workflow file will not need changing; you simply need +# to commit it to your repository. +# +# You may wish to alter this file to override the set of languages analyzed, +# or to provide custom queries or build logic. +name: "CodeQL" + +on: + push: + branches: ["4.1", main] + pull_request: + # The branches below must be a subset of the branches above + branches: ["4.1", main] + schedule: + - cron: '0 13 * * 3' + +env: + MAVEN_OPTS: -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryhandler.count=5 -Dmaven.wagon.httpconnectionManager.ttlSeconds=240 + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + # Override automatic language detection by changing the below list + # Supported options are ['csharp', 'cpp', 'go', 'java', 'javascript', 'python'] + language: ['java', 'cpp' ] + # Learn more... + # https://docs.github.com/en/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#overriding-automatic-language-detection + + steps: + - name: Checkout repository + uses: actions/checkout@v2 + with: + # We must fetch at least the immediate parents so that if this is + # a pull request then we can checkout the head. + fetch-depth: 2 + + # Cache .m2/repository + - name: Cache local Maven repository + uses: actions/cache@v2 + with: + path: ~/.m2/repository + key: ${{ runner.os }}-maven-${{ matrix.language }} ${{ hashFiles('**/pom.xml') }} + restore-keys: | + ${{ runner.os }}-maven-${{ matrix.language }} + ${{ runner.os }}-maven- + + # If this run was triggered by a pull request event, then checkout + # the head of the pull request instead of the merge commit. + - run: git checkout HEAD^2 + if: ${{ github.event_name == 'pull_request' }} + + # Initializes the CodeQL tools for scanning. + - name: Initialize CodeQL + uses: github/codeql-action/init@v1 + with: + languages: ${{ matrix.language }} + # If you wish to specify custom queries, you can do so here or in a config file. + # By default, queries listed here will override any specified in a config file. + # Prefix the list here with "+" to use these queries and those in the config file. + # queries: ./path/to/local/query, your-org/your-repo/queries@main + + + # Autobuild attempts to build any compiled languages (C/C++, C#, or Java). + # If this step fails, then you should remove it and run the build manually (see below) + # - name: Autobuild + # uses: github/codeql-action/autobuild@v1 + + # â„šī¸ Command-line programs to run using the OS shell. + # 📚 https://git.io/JvXDl + + # âœī¸ If the Autobuild fails above, remove it and uncomment the following three lines + # and modify them (or add more) to build your code if your project + # uses a compiled language + - uses: actions/setup-java@v1 + with: + java-version: '11' # The JDK version to make available on the path. + + - name: Compile project + run: ./mvnw -B -ntp clean package -DskipTests=true + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v1 diff --git a/.gitignore b/.gitignore index a3c75313a21..5188d6518b5 100644 --- a/.gitignore +++ b/.gitignore @@ -8,6 +8,7 @@ *.ipr *.iws .idea/ +.shelf/ # Geany project file .geany @@ -31,3 +32,17 @@ hs_err_pid*.log dependency-reduced-pom.xml + +*/.unison.* + +# exclude mainframer files +mainframer +.mainframer + +# exclude docker-sync stuff +.docker-sync +*/.docker-sync + +# exclude vscode files +.vscode/ +*.factorypath diff --git a/.lgtm.yml b/.lgtm.yml new file mode 100644 index 00000000000..9aa74b180be --- /dev/null +++ b/.lgtm.yml @@ -0,0 +1,13 @@ +extraction: + java: + prepare: + packages: + - "autoconf" + - "automake" + - "libtool" + - "make" + - "tar" + - "libaio-dev" + - "libssl-dev" + - "libapr1-dev" + - "lksctp-tools" diff --git a/.mvn/wrapper/maven-wrapper.properties b/.mvn/wrapper/maven-wrapper.properties index a447c9fa812..08e7e646f3b 100644 --- a/.mvn/wrapper/maven-wrapper.properties +++ b/.mvn/wrapper/maven-wrapper.properties @@ -1 +1 @@ -distributionUrl=https://repo1.maven.org/maven2/org/apache/maven/apache-maven/3.5.2/apache-maven-3.5.2-bin.zip \ No newline at end of file +distributionUrl=https://downloads.apache.org/maven/maven-3/3.8.1/binaries/apache-maven-3.8.1-bin.zip diff --git a/.travis.yml b/.travis.yml deleted file mode 100644 index b35e822464d..00000000000 --- a/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: java -jdk: - - oraclejdk7 - - openjdk7 -branches: - only: - - master - - 3 - - 3.5 -before_install: 'mvn -version' -install: 'mvn clean install -Pfull -DskipTests' - diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1a8b4d84cd5..a18232f285e 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -42,5 +42,5 @@ My system has IPv6 disabled. ## How to contribute your work -Before submitting a pull request or push a commit, please read [our developer guide](http://netty.io/wiki/developer-guide.html). +Before submitting a pull request or push a commit, please read [our developer guide](https://netty.io/wiki/developer-guide.html). diff --git a/LICENSE.txt b/LICENSE.txt index d6456956733..62589edd12a 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -1,7 +1,7 @@ Apache License Version 2.0, January 2004 - http://www.apache.org/licenses/ + https://www.apache.org/licenses/ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION @@ -193,7 +193,7 @@ you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/NOTICE.txt b/NOTICE.txt index f973663670b..5ce10823857 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -4,7 +4,7 @@ Please visit the Netty web site for more information: - * http://netty.io/ + * https://netty.io/ Copyright 2014 The Netty Project @@ -12,7 +12,7 @@ The Netty Project licenses this file to you under the Apache License, version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at: - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -56,7 +56,7 @@ facade for Java, which can be obtained at: * LICENSE: * license/LICENSE.slf4j.txt (MIT License) * HOMEPAGE: - * http://www.slf4j.org/ + * https://www.slf4j.org/ This product contains a modified portion of 'Apache Harmony', an open source Java SE, which can be obtained at: @@ -66,7 +66,7 @@ Java SE, which can be obtained at: * LICENSE: * license/LICENSE.harmony.txt (Apache License 2.0) * HOMEPAGE: - * http://archive.apache.org/dist/harmony/ + * https://archive.apache.org/dist/harmony/ This product contains a modified portion of 'jbzip2', a Java bzip2 compression and decompression library written by Matthew J. Francis. It can be obtained at: @@ -125,6 +125,14 @@ and decompression library, which can be obtained at: * HOMEPAGE: * https://github.com/jponge/lzma-java +This product optionally depends on 'zstd-jni', a zstd-jni Java compression +and decompression library, which can be obtained at: + + * LICENSE: + * license/LICENSE.zstd-jni.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/luben/zstd-jni + This product contains a modified portion of 'jfastlz', a Java port of FastLZ compression and decompression library written by William Kinney. It can be obtained at: @@ -148,7 +156,7 @@ equivalent functionality. It can be obtained at: * LICENSE: * license/LICENSE.bouncycastle.txt (MIT License) * HOMEPAGE: - * http://www.bouncycastle.org/ + * https://www.bouncycastle.org/ This product optionally depends on 'Snappy', a compression library produced by Google Inc, which can be obtained at: @@ -162,9 +170,9 @@ This product optionally depends on 'JBoss Marshalling', an alternative Java serialization API, which can be obtained at: * LICENSE: - * license/LICENSE.jboss-marshalling.txt (GNU LGPL 2.1) + * license/LICENSE.jboss-marshalling.txt (Apache License 2.0) * HOMEPAGE: - * http://www.jboss.org/jbossmarshalling + * https://github.com/jboss-remoting/jboss-marshalling This product optionally depends on 'Caliper', Google's micro- benchmarking framework, which can be obtained at: @@ -180,7 +188,7 @@ framework, which can be obtained at: * LICENSE: * license/LICENSE.commons-logging.txt (Apache License 2.0) * HOMEPAGE: - * http://commons.apache.org/logging/ + * https://commons.apache.org/logging/ This product optionally depends on 'Apache Log4J', a logging framework, which can be obtained at: @@ -188,7 +196,7 @@ can be obtained at: * LICENSE: * license/LICENSE.log4j.txt (Apache License 2.0) * HOMEPAGE: - * http://logging.apache.org/log4j/ + * https://logging.apache.org/log4j/ This product optionally depends on 'Aalto XML', an ultra-high performance non-blocking XML processor, which can be obtained at: @@ -196,7 +204,7 @@ non-blocking XML processor, which can be obtained at: * LICENSE: * license/LICENSE.aalto-xml.txt (Apache License 2.0) * HOMEPAGE: - * http://wiki.fasterxml.com/AaltoHome + * https://wiki.fasterxml.com/AaltoHome This product contains a modified version of 'HPACK', a Java implementation of the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: @@ -205,6 +213,22 @@ the HTTP/2 HPACK algorithm written by Twitter. It can be obtained at: * license/LICENSE.hpack.txt (Apache License 2.0) * HOMEPAGE: * https://github.com/twitter/hpack + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Cory Benfield. It can be obtained at: + + * LICENSE: + * license/LICENSE.hyper-hpack.txt (MIT License) + * HOMEPAGE: + * https://github.com/python-hyper/hpack/ + +This product contains a modified version of 'HPACK', a Java implementation of +the HTTP/2 HPACK algorithm written by Tatsuhiro Tsujikawa. It can be obtained at: + + * LICENSE: + * license/LICENSE.nghttp2-hpack.txt (MIT License) + * HOMEPAGE: + * https://github.com/nghttp2/nghttp2/ This product contains a modified portion of 'Apache Commons Lang', a Java library provides utilities for the java.lang API, which can be obtained at: @@ -221,3 +245,20 @@ This product contains the Maven wrapper scripts from 'Maven Wrapper', that provi * license/LICENSE.mvn-wrapper.txt (Apache License 2.0) * HOMEPAGE: * https://github.com/takari/maven-wrapper + +This product contains the dnsinfo.h header file, that provides a way to retrieve the system DNS configuration on MacOS. +This private header is also used by Apple's open source + mDNSResponder (https://opensource.apple.com/tarballs/mDNSResponder/). + + * LICENSE: + * license/LICENSE.dnsinfo.txt (Apple Public Source License 2.0) + * HOMEPAGE: + * https://www.opensource.apple.com/source/configd/configd-453.19/dnsinfo/dnsinfo.h + +This product optionally depends on 'Brotli4j', Brotli compression and +decompression for Java., which can be obtained at: + + * LICENSE: + * license/LICENSE.brotli4j.txt (Apache License 2.0) + * HOMEPAGE: + * https://github.com/hyperxpro/Brotli4j diff --git a/README.md b/README.md index 5c4c4b5afd8..9176ac5eaa9 100644 --- a/README.md +++ b/README.md @@ -1,33 +1,35 @@ +![Build project](https://github.com/netty/netty/workflows/Build%20project/badge.svg) + # Netty Project Netty is an asynchronous event-driven network application framework for rapid development of maintainable high performance protocol servers & clients. ## Links -* [Web Site](http://netty.io/) -* [Downloads](http://netty.io/downloads.html) -* [Documentation](http://netty.io/wiki/) +* [Web Site](https://netty.io/) +* [Downloads](https://netty.io/downloads.html) +* [Documentation](https://netty.io/wiki/) * [@netty_project](https://twitter.com/netty_project) ## How to build -For the detailed information about building and developing Netty, please visit [the developer guide](http://netty.io/wiki/developer-guide.html). This page only gives very basic information. +For the detailed information about building and developing Netty, please visit [the developer guide](https://netty.io/wiki/developer-guide.html). This page only gives very basic information. You require the following to build Netty: -* Latest stable [Oracle JDK 7](http://www.oracle.com/technetwork/java/) -* Latest stable [Apache Maven](http://maven.apache.org/) -* If you are on Linux, you need [additional development packages](http://netty.io/wiki/native-transports.html) installed on your system, because you'll build the native transport. +* Latest stable [OpenJDK 8](https://adoptopenjdk.net) +* Latest stable [Apache Maven](https://maven.apache.org/) +* If you are on Linux, you need [additional development packages](https://netty.io/wiki/native-transports.html) installed on your system, because you'll build the native transport. -Note that this is build-time requirement. JDK 5 (for 3.x) or 6 (for 4.0+) is enough to run your Netty-based application. +Note that this is build-time requirement. JDK 5 (for 3.x) or 6 (for 4.0+ / 4.1+) is enough to run your Netty-based application. ## Branches to look -Development of all versions takes place in each branch whose name is identical to `.`. For example, the development of 3.9 and 4.0 resides in [the branch '3.9'](https://github.com/netty/netty/tree/3.9) and [the branch '4.0'](https://github.com/netty/netty/tree/4.0) respectively. +Development of all versions takes place in each branch whose name is identical to `.`. For example, the development of 3.9 and 4.1 resides in [the branch '3.9'](https://github.com/netty/netty/tree/3.9) and [the branch '4.1'](https://github.com/netty/netty/tree/4.1) respectively. -## Usage with JDK 9 +## Usage with JDK 9+ -Netty can be used in modular JDK9 applications as a collection of automatic modules. The module names follow the +Netty can be used in modular JDK9+ applications as a collection of automatic modules. The module names follow the reverse-DNS style, and are derived from subproject names rather than root packages due to historical reasons. They are listed below: diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 00000000000..12aa95ea99a --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,5 @@ +# Reporting a security issue + +If you think the bug you found is likely to make Netty-based applications vulnerable to an attack, +please do not use our public issue tracker +but report it to [the dedicated private Google Group](https://groups.google.com/d/forum/netty-security). diff --git a/all/pom.xml b/all/pom.xml index 31bb267bc04..0afc0ab2970 100644 --- a/all/pom.xml +++ b/all/pom.xml @@ -6,7 +6,7 @@ ~ version 2.0 (the "License"); you may not use this file except in compliance ~ with the License. You may obtain a copy of the License at: ~ - ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ https://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -14,13 +14,13 @@ ~ License for the specific language governing permissions and limitations ~ under the License. --> - + 4.0.0 io.netty netty-parent - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT netty-all @@ -31,8 +31,21 @@ ${project.build.directory}/src ${project.build.directory}/versions + true + + + + io.netty + netty-bom + ${project.version} + pom + import + + + + @@ -51,15 +64,27 @@ ${project.groupId} netty-transport-native-epoll - ${project.version} linux-x86_64 compile true + + ${project.groupId} + netty-transport-native-epoll + linux-aarch_64 + compile + true + ${project.groupId} netty-transport-native-kqueue - ${project.version} + osx-x86_64 + compile + true + + + ${project.groupId} + netty-resolver-dns-native-macos osx-x86_64 compile true @@ -75,15 +100,27 @@ ${project.groupId} netty-transport-native-epoll - ${project.version} linux-x86_64 compile true + + ${project.groupId} + netty-transport-native-epoll + linux-aarch_64 + compile + true + ${project.groupId} netty-transport-native-kqueue - ${project.version} + osx-x86_64 + compile + true + + + ${project.groupId} + netty-resolver-dns-native-macos osx-x86_64 compile true @@ -111,6 +148,19 @@ compile true + + + ${project.groupId} + netty-transport-native-kqueue + compile + true + + + ${project.groupId} + netty-resolver-dns-native-macos + compile + true + + + ${project.groupId} + netty-transport-native-epoll + compile + true + @@ -153,6 +218,19 @@ compile true + + ${project.groupId} + netty-resolver-dns-native-macos + compile + true + + + + ${project.groupId} + netty-transport-native-epoll + compile + true + @@ -173,331 +251,145 @@ compile true + + ${project.groupId} + netty-resolver-dns-native-macos + compile + true + + + + ${project.groupId} + netty-transport-native-epoll + compile + true + - - - full - - - - - - - maven-jxr-plugin - - - generate-xref - package - - jxr - - - - - true - ${project.build.directory}/xref - ${project.build.directory}/api - Netty Source Xref (${project.version}) - Netty Source Xref (${project.version}) - - - - ${project.groupId} - netty-build - 19 - - - - - com.puppycrawl.tools - checkstyle - - - - - com.puppycrawl.tools - checkstyle - 7.3 - - - - - - - maven-javadoc-plugin - - - attach-javadocs - - jar - - - - - ${quickbuild} - *.internal,*.example - true - ${project.build.directory}/api - ${project.basedir}/src/javadoc/overview.html - Netty API Reference (${project.version}) - Netty API Reference (${project.version}) - false - - -link http://docs.oracle.com/javase/7/docs/api/ - -link https://developers.google.com/protocol-buffers/docs/reference/java/ - -link http://docs.oracle.com/javaee/6/api/ - -link http://www.slf4j.org/apidocs/ - -link https://commons.apache.org/proper/commons-logging/apidocs/ - -link http://logging.apache.org/log4j/1.2/apidocs/ - - -group "Low-level data representation" io.netty.buffer* - -group "Central interface for all I/O operations" io.netty.channel* - -group "Client & Server bootstrapping utilities" io.netty.bootstrap* - -group "Reusable I/O event interceptors" io.netty.handler* - -group "Miscellaneous" io.netty.util* - - en_US - - - - - - - coverage - - - - org.jacoco - jacoco-maven-plugin - - - jacoco-merge - prepare-package - - merge - - - - - ${project.parent.build.directory}/.. - - **/target/jacoco.exec - - - - - - - jacoco-report - prepare-package - - report - - - ${project.build.directory}/jacoco-report - - - - - - - - ${project.groupId} netty-buffer - ${project.version} compile true ${project.groupId} netty-codec - ${project.version} compile true ${project.groupId} netty-codec-dns - ${project.version} compile true ${project.groupId} netty-codec-haproxy - ${project.version} compile true ${project.groupId} netty-codec-http - ${project.version} compile true ${project.groupId} netty-codec-http2 - ${project.version} compile true ${project.groupId} netty-codec-memcache - ${project.version} compile true ${project.groupId} netty-codec-mqtt - ${project.version} compile true ${project.groupId} netty-codec-redis - ${project.version} compile true ${project.groupId} netty-codec-smtp - ${project.version} compile true ${project.groupId} netty-codec-socks - ${project.version} compile true ${project.groupId} netty-codec-stomp - ${project.version} compile true ${project.groupId} netty-codec-xml - ${project.version} compile true ${project.groupId} netty-common - ${project.version} compile true ${project.groupId} netty-handler - ${project.version} compile true ${project.groupId} netty-handler-proxy - ${project.version} compile true ${project.groupId} netty-resolver - ${project.version} compile true ${project.groupId} netty-resolver-dns - ${project.version} compile true ${project.groupId} netty-transport - ${project.version} - compile - true - - - ${project.groupId} - netty-transport-rxtx - ${project.version} compile true ${project.groupId} netty-transport-sctp - ${project.version} - compile - true - - - ${project.groupId} - netty-transport-udt - ${project.version} - compile - true - - - ${project.groupId} - netty-example - ${project.version} - compile - true - - - - - com.google.protobuf - protobuf-java - compile - true - - - org.jboss.marshalling - jboss-marshalling compile true - - org.slf4j - slf4j-api - true - - - commons-logging - commons-logging - true - - - log4j - log4j - true - @@ -552,7 +444,7 @@ io/netty/internal/tcnative/**,io/netty/example/**,META-INF/native/libnetty_tcnative*,META-INF/native/include/**,META-INF/native/**/*.a - io/netty/**,META-INF/native/** + io/netty/**,META-INF/native/**,META-INF/native-image/** runtime ${project.groupId} ${project.build.outputDirectory} diff --git a/bom/pom.xml b/bom/pom.xml index 21a54ac545b..e22b1c7ba53 100644 --- a/bom/pom.xml +++ b/bom/pom.xml @@ -6,7 +6,7 @@ ~ version 2.0 (the "License"); you may not use this file except in compliance ~ with the License. You may obtain a copy of the License at: ~ - ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ https://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -14,7 +14,7 @@ ~ License for the specific language governing permissions and limitations ~ under the License. --> - + 4.0.0 org.sonatype.oss @@ -25,22 +25,22 @@ io.netty netty-bom - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT pom Netty/BOM Netty (Bill of Materials) - http://netty.io/ + https://netty.io/ The Netty Project - http://netty.io/ + https://netty.io/ Apache License, Version 2.0 - http://www.apache.org/licenses/LICENSE-2.0 + https://www.apache.org/licenses/LICENSE-2.0 2008 @@ -57,9 +57,9 @@ netty.io The Netty Project Contributors netty@googlegroups.com - http://netty.io/ + https://netty.io/ The Netty Project - http://netty.io/ + https://netty.io/ @@ -69,167 +69,234 @@ io.netty netty-buffer - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-dns - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-haproxy - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-http - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-http2 - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-memcache - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-mqtt - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-redis - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-smtp - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-socks - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-stomp - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-codec-xml - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-common - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-dev-tools - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-handler - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-handler-proxy - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-resolver - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-resolver-dns - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-transport - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty - netty-transport-rxtx - 4.1.23.Final-SNAPSHOT + netty-transport-sctp + 5.0.0.Final-SNAPSHOT io.netty - netty-transport-sctp - 4.1.23.Final-SNAPSHOT + netty-example + 5.0.0.Final-SNAPSHOT io.netty - netty-transport-udt - 4.1.23.Final-SNAPSHOT + netty-all + 5.0.0.Final-SNAPSHOT io.netty - netty-example - 4.1.23.Final-SNAPSHOT + netty-resolver-dns-native-macos + 5.0.0.Final-SNAPSHOT io.netty - netty-all - 4.1.23.Final-SNAPSHOT + netty-resolver-dns-native-macos + 5.0.0.Final-SNAPSHOT + osx-x86_64 + + + io.netty + netty-transport-native-unix-common + 5.0.0.Final-SNAPSHOT io.netty netty-transport-native-unix-common - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT + linux-aarch_64 io.netty netty-transport-native-unix-common - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT linux-x86_64 io.netty netty-transport-native-unix-common - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT osx-x86_64 io.netty netty-transport-native-epoll - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-transport-native-epoll - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT + linux-aarch_64 + + + io.netty + netty-transport-native-epoll + 5.0.0.Final-SNAPSHOT linux-x86_64 io.netty netty-transport-native-kqueue - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT io.netty netty-transport-native-kqueue - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT + osx-x86_64 + + + + + io.netty + netty-tcnative + ${tcnative.version} + + + io.netty + netty-tcnative + ${tcnative.version} + linux-x86_64 + + + io.netty + netty-tcnative + ${tcnative.version} + linux-aarch_64 + + + io.netty + netty-tcnative + ${tcnative.version} osx-x86_64 + + io.netty + netty-tcnative-boringssl-static + ${tcnative.version} + + + io.netty + netty-tcnative-boringssl-static + ${tcnative.version} + linux-x86_64 + + + io.netty + netty-tcnative-boringssl-static + ${tcnative.version} + linux-aarch_64 + + + io.netty + netty-tcnative-boringssl-static + ${tcnative.version} + osx_64 + + + io.netty + netty-tcnative-boringssl-static + ${tcnative.version} + windows_64 + diff --git a/buffer/pom.xml b/buffer/pom.xml index 70d49195472..6c038d750c3 100644 --- a/buffer/pom.xml +++ b/buffer/pom.xml @@ -6,7 +6,7 @@ ~ version 2.0 (the "License"); you may not use this file except in compliance ~ with the License. You may obtain a copy of the License at: ~ - ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ https://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -14,13 +14,13 @@ ~ License for the specific language governing permissions and limitations ~ under the License. --> - + 4.0.0 io.netty netty-parent - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT netty-buffer diff --git a/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java b/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java index 7a42a011dfb..34e7fd7cbe7 100644 --- a/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AbstractByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,12 +15,12 @@ */ package io.netty.buffer; +import io.netty.util.AsciiString; import io.netty.util.ByteProcessor; import io.netty.util.CharsetUtil; import io.netty.util.IllegalReferenceCountException; import io.netty.util.ResourceLeakDetector; import io.netty.util.ResourceLeakDetectorFactory; -import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.StringUtil; import io.netty.util.internal.SystemPropertyUtil; import io.netty.util.internal.logging.InternalLogger; @@ -37,19 +37,25 @@ import java.nio.charset.Charset; import static io.netty.util.internal.MathUtil.isOutOfBounds; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import static java.util.Objects.requireNonNull; /** * A skeletal implementation of a buffer. */ public abstract class AbstractByteBuf extends ByteBuf { private static final InternalLogger logger = InternalLoggerFactory.getInstance(AbstractByteBuf.class); - private static final String PROP_MODE = "io.netty.buffer.bytebuf.checkAccessible"; - private static final boolean checkAccessible; + private static final String PROP_CHECK_ACCESSIBLE = "io.netty.buffer.checkAccessible"; + static final boolean checkAccessible; // accessed from CompositeByteBuf + private static final String PROP_CHECK_BOUNDS = "io.netty.buffer.checkBounds"; + private static final boolean checkBounds; static { - checkAccessible = SystemPropertyUtil.getBoolean(PROP_MODE, true); + checkAccessible = SystemPropertyUtil.getBoolean(PROP_CHECK_ACCESSIBLE, true); + checkBounds = SystemPropertyUtil.getBoolean(PROP_CHECK_BOUNDS, true); if (logger.isDebugEnabled()) { - logger.debug("-D{}: {}", PROP_MODE, checkAccessible); + logger.debug("-D{}: {}", PROP_CHECK_ACCESSIBLE, checkAccessible); + logger.debug("-D{}: {}", PROP_CHECK_BOUNDS, checkBounds); } } @@ -58,14 +64,10 @@ public abstract class AbstractByteBuf extends ByteBuf { int readerIndex; int writerIndex; - private int markedReaderIndex; - private int markedWriterIndex; private int maxCapacity; protected AbstractByteBuf(int maxCapacity) { - if (maxCapacity < 0) { - throw new IllegalArgumentException("maxCapacity: " + maxCapacity + " (expected: >= 0)"); - } + checkPositiveOrZero(maxCapacity, "maxCapacity"); this.maxCapacity = maxCapacity; } @@ -97,11 +99,18 @@ public int readerIndex() { return readerIndex; } + private static void checkIndexBounds(final int readerIndex, final int writerIndex, final int capacity) { + if (readerIndex < 0 || readerIndex > writerIndex || writerIndex > capacity) { + throw new IndexOutOfBoundsException(String.format( + "readerIndex: %d, writerIndex: %d (expected: 0 <= readerIndex <= writerIndex <= capacity(%d))", + readerIndex, writerIndex, capacity)); + } + } + @Override public ByteBuf readerIndex(int readerIndex) { - if (readerIndex < 0 || readerIndex > writerIndex) { - throw new IndexOutOfBoundsException(String.format( - "readerIndex: %d (expected: 0 <= readerIndex <= writerIndex(%d))", readerIndex, writerIndex)); + if (checkBounds) { + checkIndexBounds(readerIndex, writerIndex, capacity()); } this.readerIndex = readerIndex; return this; @@ -114,10 +123,8 @@ public int writerIndex() { @Override public ByteBuf writerIndex(int writerIndex) { - if (writerIndex < readerIndex || writerIndex > capacity()) { - throw new IndexOutOfBoundsException(String.format( - "writerIndex: %d (expected: readerIndex(%d) <= writerIndex <= capacity(%d))", - writerIndex, readerIndex, capacity())); + if (checkBounds) { + checkIndexBounds(readerIndex, writerIndex, capacity()); } this.writerIndex = writerIndex; return this; @@ -125,10 +132,8 @@ public ByteBuf writerIndex(int writerIndex) { @Override public ByteBuf setIndex(int readerIndex, int writerIndex) { - if (readerIndex < 0 || readerIndex > writerIndex || writerIndex > capacity()) { - throw new IndexOutOfBoundsException(String.format( - "readerIndex: %d, writerIndex: %d (expected: 0 <= readerIndex <= writerIndex <= capacity(%d))", - readerIndex, writerIndex, capacity())); + if (checkBounds) { + checkIndexBounds(readerIndex, writerIndex, capacity()); } setIndex0(readerIndex, writerIndex); return this; @@ -175,44 +180,19 @@ public int maxWritableBytes() { return maxCapacity() - writerIndex; } - @Override - public ByteBuf markReaderIndex() { - markedReaderIndex = readerIndex; - return this; - } - - @Override - public ByteBuf resetReaderIndex() { - readerIndex(markedReaderIndex); - return this; - } - - @Override - public ByteBuf markWriterIndex() { - markedWriterIndex = writerIndex; - return this; - } - - @Override - public ByteBuf resetWriterIndex() { - writerIndex(markedWriterIndex); - return this; - } - @Override public ByteBuf discardReadBytes() { - ensureAccessible(); if (readerIndex == 0) { + ensureAccessible(); return this; } if (readerIndex != writerIndex) { setBytes(0, this, readerIndex, writerIndex - readerIndex); writerIndex -= readerIndex; - adjustMarkers(readerIndex); readerIndex = 0; } else { - adjustMarkers(readerIndex); + ensureAccessible(); writerIndex = readerIndex = 0; } return this; @@ -220,66 +200,56 @@ public ByteBuf discardReadBytes() { @Override public ByteBuf discardSomeReadBytes() { - ensureAccessible(); - if (readerIndex == 0) { - return this; - } - - if (readerIndex == writerIndex) { - adjustMarkers(readerIndex); - writerIndex = readerIndex = 0; - return this; - } + if (readerIndex > 0) { + if (readerIndex == writerIndex) { + ensureAccessible(); + writerIndex = readerIndex = 0; + return this; + } - if (readerIndex >= capacity() >>> 1) { - setBytes(0, this, readerIndex, writerIndex - readerIndex); - writerIndex -= readerIndex; - adjustMarkers(readerIndex); - readerIndex = 0; + if (readerIndex >= capacity() >>> 1) { + setBytes(0, this, readerIndex, writerIndex - readerIndex); + writerIndex -= readerIndex; + readerIndex = 0; + return this; + } } + ensureAccessible(); return this; } - protected final void adjustMarkers(int decrement) { - int markedReaderIndex = this.markedReaderIndex; - if (markedReaderIndex <= decrement) { - this.markedReaderIndex = 0; - int markedWriterIndex = this.markedWriterIndex; - if (markedWriterIndex <= decrement) { - this.markedWriterIndex = 0; - } else { - this.markedWriterIndex = markedWriterIndex - decrement; - } - } else { - this.markedReaderIndex = markedReaderIndex - decrement; - markedWriterIndex -= decrement; + // Called after a capacity reduction + protected final void trimIndicesToCapacity(int newCapacity) { + if (writerIndex() > newCapacity) { + setIndex0(Math.min(readerIndex(), newCapacity), newCapacity); } } @Override public ByteBuf ensureWritable(int minWritableBytes) { - if (minWritableBytes < 0) { - throw new IllegalArgumentException(String.format( - "minWritableBytes: %d (expected: >= 0)", minWritableBytes)); - } - ensureWritable0(minWritableBytes); + ensureWritable0(checkPositiveOrZero(minWritableBytes, "minWritableBytes")); return this; } final void ensureWritable0(int minWritableBytes) { - ensureAccessible(); - if (minWritableBytes <= writableBytes()) { + final int writerIndex = writerIndex(); + final int targetCapacity = writerIndex + minWritableBytes; + // using non-short-circuit & to reduce branching - this is a hot path and targetCapacity should rarely overflow + if (targetCapacity >= 0 & targetCapacity <= capacity()) { + ensureAccessible(); return; } - - if (minWritableBytes > maxCapacity - writerIndex) { + if (checkBounds && (targetCapacity < 0 || targetCapacity > maxCapacity)) { + ensureAccessible(); throw new IndexOutOfBoundsException(String.format( "writerIndex(%d) + minWritableBytes(%d) exceeds maxCapacity(%d): %s", writerIndex, minWritableBytes, maxCapacity, this)); } - // Normalize the current capacity to the power of 2. - int newCapacity = alloc().calculateNewCapacity(writerIndex + minWritableBytes, maxCapacity); + // Normalize the target capacity to the power of 2. + final int fastWritable = maxFastWritableBytes(); + int newCapacity = fastWritable >= minWritableBytes ? writerIndex + fastWritable + : alloc().calculateNewCapacity(targetCapacity, maxCapacity); // Adjust to the new capacity. capacity(newCapacity); @@ -288,10 +258,7 @@ final void ensureWritable0(int minWritableBytes) { @Override public int ensureWritable(int minWritableBytes, boolean force) { ensureAccessible(); - if (minWritableBytes < 0) { - throw new IllegalArgumentException(String.format( - "minWritableBytes: %d (expected: >= 0)", minWritableBytes)); - } + checkPositiveOrZero(minWritableBytes, "minWritableBytes"); if (minWritableBytes <= writableBytes()) { return 0; @@ -308,8 +275,9 @@ public int ensureWritable(int minWritableBytes, boolean force) { return 3; } - // Normalize the current capacity to the power of 2. - int newCapacity = alloc().calculateNewCapacity(writerIndex + minWritableBytes, maxCapacity); + int fastWritable = maxFastWritableBytes(); + int newCapacity = fastWritable >= minWritableBytes ? writerIndex + fastWritable + : alloc().calculateNewCapacity(writerIndex + minWritableBytes, maxCapacity); // Adjust to the new capacity. capacity(newCapacity); @@ -318,12 +286,10 @@ public int ensureWritable(int minWritableBytes, boolean force) { @Override public ByteBuf order(ByteOrder endianness) { - if (endianness == null) { - throw new NullPointerException("endianness"); - } if (endianness == order()) { return this; } + requireNonNull(endianness, "endianness"); return newSwappedByteBuf(); } @@ -490,7 +456,10 @@ public ByteBuf getBytes(int index, ByteBuf dst, int length) { @Override public CharSequence getCharSequence(int index, int length, Charset charset) { - // TODO: We could optimize this for UTF8 and US_ASCII + if (CharsetUtil.US_ASCII.equals(charset) || CharsetUtil.ISO_8859_1.equals(charset)) { + // ByteBufUtil.getBytes(...) will return a new copy which the AsciiString uses directly + return new AsciiString(ByteBufUtil.getBytes(this, index, length, true), false); + } return toString(index, length, charset); } @@ -618,16 +587,20 @@ public ByteBuf setBytes(int index, ByteBuf src) { return this; } - @Override - public ByteBuf setBytes(int index, ByteBuf src, int length) { - checkIndex(index, length); - if (src == null) { - throw new NullPointerException("src"); - } + private static void checkReadableBounds(final ByteBuf src, final int length) { if (length > src.readableBytes()) { throw new IndexOutOfBoundsException(String.format( "length(%d) exceeds src.readableBytes(%d) where src is: %s", length, src.readableBytes(), src)); } + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src, int length) { + checkIndex(index, length); + requireNonNull(src, "src"); + if (checkBounds) { + checkReadableBounds(src, length); + } setBytes(index, src, src.readerIndex(), length); src.readerIndex(src.readerIndex() + length); @@ -681,7 +654,7 @@ private int setCharSequence0(int index, CharSequence sequence, Charset charset, } else { checkIndex(index, length); } - return ByteBufUtil.writeUtf8(this, index, sequence, sequence.length()); + return ByteBufUtil.writeUtf8(this, index, length, sequence, sequence.length()); } if (charset.equals(CharsetUtil.US_ASCII) || charset.equals(CharsetUtil.ISO_8859_1)) { int length = sequence.length(); @@ -889,9 +862,11 @@ public ByteBuf readBytes(ByteBuf dst) { @Override public ByteBuf readBytes(ByteBuf dst, int length) { - if (length > dst.writableBytes()) { - throw new IndexOutOfBoundsException(String.format( - "length(%d) exceeds dst.writableBytes(%d) where dst is: %s", length, dst.writableBytes(), dst)); + if (checkBounds) { + if (length > dst.writableBytes()) { + throw new IndexOutOfBoundsException(String.format( + "length(%d) exceeds dst.writableBytes(%d) where dst is: %s", length, dst.writableBytes(), dst)); + } } readBytes(dst, dst.writerIndex(), length); dst.writerIndex(dst.writerIndex() + length); @@ -1065,9 +1040,8 @@ public ByteBuf writeBytes(ByteBuf src) { @Override public ByteBuf writeBytes(ByteBuf src, int length) { - if (length > src.readableBytes()) { - throw new IndexOutOfBoundsException(String.format( - "length(%d) exceeds src.readableBytes(%d) where src is: %s", length, src.readableBytes(), src)); + if (checkBounds) { + checkReadableBounds(src, length); } writeBytes(src, src.readerIndex(), length); src.readerIndex(src.readerIndex() + length); @@ -1172,6 +1146,7 @@ public ByteBuf copy() { @Override public ByteBuf duplicate() { + ensureAccessible(); return new UnpooledDuplicatedByteBuf(this); } @@ -1192,6 +1167,7 @@ public ByteBuf retainedSlice() { @Override public ByteBuf slice(int index, int length) { + ensureAccessible(); return new UnpooledSlicedByteBuf(this, index, length); } @@ -1222,7 +1198,10 @@ public String toString(int index, int length, Charset charset) { @Override public int indexOf(int fromIndex, int toIndex, byte value) { - return ByteBufUtil.indexOf(this, fromIndex, toIndex, value); + if (fromIndex <= toIndex) { + return ByteBufUtil.firstIndexOf(this, fromIndex, toIndex, value); + } + return ByteBufUtil.lastIndexOf(this, fromIndex, toIndex, value); } @Override @@ -1248,58 +1227,37 @@ public int bytesBefore(int index, int length, byte value) { @Override public int forEachByte(ByteProcessor processor) { ensureAccessible(); - try { - return forEachByteAsc0(readerIndex, writerIndex, processor); - } catch (Exception e) { - PlatformDependent.throwException(e); - return -1; - } + return forEachByteAsc0(readerIndex, writerIndex, processor); } @Override public int forEachByte(int index, int length, ByteProcessor processor) { checkIndex(index, length); - try { - return forEachByteAsc0(index, index + length, processor); - } catch (Exception e) { - PlatformDependent.throwException(e); - return -1; - } + return forEachByteAsc0(index, index + length, processor); } - private int forEachByteAsc0(int start, int end, ByteProcessor processor) throws Exception { + int forEachByteAsc0(int start, int end, ByteProcessor processor) { for (; start < end; ++start) { if (!processor.process(_getByte(start))) { return start; } } - return -1; } @Override public int forEachByteDesc(ByteProcessor processor) { ensureAccessible(); - try { - return forEachByteDesc0(writerIndex - 1, readerIndex, processor); - } catch (Exception e) { - PlatformDependent.throwException(e); - return -1; - } + return forEachByteDesc0(writerIndex - 1, readerIndex, processor); } @Override public int forEachByteDesc(int index, int length, ByteProcessor processor) { checkIndex(index, length); - try { - return forEachByteDesc0(index + length - 1, index, processor); - } catch (Exception e) { - PlatformDependent.throwException(e); - return -1; - } + return forEachByteDesc0(index + length - 1, index, processor); } - private int forEachByteDesc0(int rStart, final int rEnd, ByteProcessor processor) throws Exception { + int forEachByteDesc0(int rStart, final int rEnd, ByteProcessor processor) { for (; rStart >= rEnd; --rStart) { if (!processor.process(_getByte(rStart))) { return rStart; @@ -1315,7 +1273,7 @@ public int hashCode() { @Override public boolean equals(Object o) { - return this == o || (o instanceof ByteBuf && ByteBufUtil.equals(this, (ByteBuf) o)); + return o instanceof ByteBuf && ByteBufUtil.equals(this, (ByteBuf) o); } @Override @@ -1355,26 +1313,38 @@ protected final void checkIndex(int index, int fieldLength) { checkIndex0(index, fieldLength); } - final void checkIndex0(int index, int fieldLength) { - if (isOutOfBounds(index, fieldLength, capacity())) { + private static void checkRangeBounds(final String indexName, final int index, + final int fieldLength, final int capacity) { + if (isOutOfBounds(index, fieldLength, capacity)) { throw new IndexOutOfBoundsException(String.format( - "index: %d, length: %d (expected: range(0, %d))", index, fieldLength, capacity())); + "%s: %d, length: %d (expected: range(0, %d))", indexName, index, fieldLength, capacity)); + } + } + + final void checkIndex0(int index, int fieldLength) { + if (checkBounds) { + checkRangeBounds("index", index, fieldLength, capacity()); } } protected final void checkSrcIndex(int index, int length, int srcIndex, int srcCapacity) { checkIndex(index, length); - if (isOutOfBounds(srcIndex, length, srcCapacity)) { - throw new IndexOutOfBoundsException(String.format( - "srcIndex: %d, length: %d (expected: range(0, %d))", srcIndex, length, srcCapacity)); + if (checkBounds) { + checkRangeBounds("srcIndex", srcIndex, length, srcCapacity); } } protected final void checkDstIndex(int index, int length, int dstIndex, int dstCapacity) { checkIndex(index, length); - if (isOutOfBounds(dstIndex, length, dstCapacity)) { - throw new IndexOutOfBoundsException(String.format( - "dstIndex: %d, length: %d (expected: range(0, %d))", dstIndex, length, dstCapacity)); + if (checkBounds) { + checkRangeBounds("dstIndex", dstIndex, length, dstCapacity); + } + } + + protected final void checkDstIndex(int length, int dstIndex, int dstCapacity) { + checkReadableBytes(length); + if (checkBounds) { + checkRangeBounds("dstIndex", dstIndex, length, dstCapacity); } } @@ -1384,22 +1354,20 @@ protected final void checkDstIndex(int index, int length, int dstIndex, int dstC * than the specified value. */ protected final void checkReadableBytes(int minimumReadableBytes) { - if (minimumReadableBytes < 0) { - throw new IllegalArgumentException("minimumReadableBytes: " + minimumReadableBytes + " (expected: >= 0)"); - } - checkReadableBytes0(minimumReadableBytes); + checkReadableBytes0(checkPositiveOrZero(minimumReadableBytes, "minimumReadableBytes")); } protected final void checkNewCapacity(int newCapacity) { ensureAccessible(); - if (newCapacity < 0 || newCapacity > maxCapacity()) { - throw new IllegalArgumentException("newCapacity: " + newCapacity + " (expected: 0-" + maxCapacity() + ')'); + if (checkBounds && (newCapacity < 0 || newCapacity > maxCapacity())) { + throw new IllegalArgumentException("newCapacity: " + newCapacity + + " (expected: 0-" + maxCapacity() + ')'); } } private void checkReadableBytes0(int minimumReadableBytes) { ensureAccessible(); - if (readerIndex > writerIndex - minimumReadableBytes) { + if (checkBounds && readerIndex > writerIndex - minimumReadableBytes) { throw new IndexOutOfBoundsException(String.format( "readerIndex(%d) + length(%d) exceeds writerIndex(%d): %s", readerIndex, minimumReadableBytes, writerIndex, this)); @@ -1411,7 +1379,7 @@ private void checkReadableBytes0(int minimumReadableBytes) { * if the buffer was released before. */ protected final void ensureAccessible() { - if (checkAccessible && refCnt() == 0) { + if (checkAccessible && !isAccessible()) { throw new IllegalReferenceCountException(0); } } @@ -1420,8 +1388,4 @@ final void setIndex0(int readerIndex, int writerIndex) { this.readerIndex = readerIndex; this.writerIndex = writerIndex; } - - final void discardMarks() { - markedReaderIndex = markedWriterIndex = 0; - } } diff --git a/buffer/src/main/java/io/netty/buffer/AbstractByteBufAllocator.java b/buffer/src/main/java/io/netty/buffer/AbstractByteBufAllocator.java index 40525144e3f..2d880a76ff3 100644 --- a/buffer/src/main/java/io/netty/buffer/AbstractByteBufAllocator.java +++ b/buffer/src/main/java/io/netty/buffer/AbstractByteBufAllocator.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,6 +16,8 @@ package io.netty.buffer; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; + import io.netty.util.ResourceLeakDetector; import io.netty.util.ResourceLeakTracker; import io.netty.util.internal.PlatformDependent; @@ -125,7 +127,7 @@ public ByteBuf buffer(int initialCapacity, int maxCapacity) { @Override public ByteBuf ioBuffer() { - if (PlatformDependent.hasUnsafe()) { + if (PlatformDependent.hasUnsafe() || isDirectBufferPooled()) { return directBuffer(DEFAULT_INITIAL_CAPACITY); } return heapBuffer(DEFAULT_INITIAL_CAPACITY); @@ -133,7 +135,7 @@ public ByteBuf ioBuffer() { @Override public ByteBuf ioBuffer(int initialCapacity) { - if (PlatformDependent.hasUnsafe()) { + if (PlatformDependent.hasUnsafe() || isDirectBufferPooled()) { return directBuffer(initialCapacity); } return heapBuffer(initialCapacity); @@ -141,7 +143,7 @@ public ByteBuf ioBuffer(int initialCapacity) { @Override public ByteBuf ioBuffer(int initialCapacity, int maxCapacity) { - if (PlatformDependent.hasUnsafe()) { + if (PlatformDependent.hasUnsafe() || isDirectBufferPooled()) { return directBuffer(initialCapacity, maxCapacity); } return heapBuffer(initialCapacity, maxCapacity); @@ -222,9 +224,7 @@ public CompositeByteBuf compositeDirectBuffer(int maxNumComponents) { } private static void validate(int initialCapacity, int maxCapacity) { - if (initialCapacity < 0) { - throw new IllegalArgumentException("initialCapacity: " + initialCapacity + " (expected: 0+)"); - } + checkPositiveOrZero(initialCapacity, "initialCapacity"); if (initialCapacity > maxCapacity) { throw new IllegalArgumentException(String.format( "initialCapacity: %d (expected: not greater than maxCapacity(%d)", @@ -249,9 +249,7 @@ public String toString() { @Override public int calculateNewCapacity(int minNewCapacity, int maxCapacity) { - if (minNewCapacity < 0) { - throw new IllegalArgumentException("minNewCapacity: " + minNewCapacity + " (expected: 0+)"); - } + checkPositiveOrZero(minNewCapacity, "minNewCapacity"); if (minNewCapacity > maxCapacity) { throw new IllegalArgumentException(String.format( "minNewCapacity: %d (expected: not greater than maxCapacity(%d)", diff --git a/buffer/src/main/java/io/netty/buffer/AbstractDerivedByteBuf.java b/buffer/src/main/java/io/netty/buffer/AbstractDerivedByteBuf.java index 58f1d907a15..c3765c80143 100644 --- a/buffer/src/main/java/io/netty/buffer/AbstractDerivedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AbstractDerivedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -31,6 +31,15 @@ protected AbstractDerivedByteBuf(int maxCapacity) { super(maxCapacity); } + @Override + final boolean isAccessible() { + return isAccessible0(); + } + + boolean isAccessible0() { + return unwrap().isAccessible(); + } + @Override public final int refCnt() { return refCnt0(); @@ -112,4 +121,9 @@ public ByteBuffer internalNioBuffer(int index, int length) { public ByteBuffer nioBuffer(int index, int length) { return unwrap().nioBuffer(index, length); } + + @Override + public boolean isContiguous() { + return unwrap().isContiguous(); + } } diff --git a/buffer/src/main/java/io/netty/buffer/AbstractPooledDerivedByteBuf.java b/buffer/src/main/java/io/netty/buffer/AbstractPooledDerivedByteBuf.java index 80f33914d7f..c143dde0723 100644 --- a/buffer/src/main/java/io/netty/buffer/AbstractPooledDerivedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AbstractPooledDerivedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,8 +16,7 @@ package io.netty.buffer; -import io.netty.util.Recycler.Handle; -import io.netty.util.ReferenceCounted; +import io.netty.util.internal.ObjectPool.Handle; import java.nio.ByteBuffer; import java.nio.ByteOrder; @@ -63,7 +62,7 @@ final U init( try { maxCapacity(maxCapacity); setIndex0(readerIndex, writerIndex); // It is assumed the bounds checking is done by the caller. - setRefCnt(1); + resetRefCnt(); @SuppressWarnings("unchecked") final U castThis = (U) this; @@ -123,6 +122,11 @@ public boolean hasMemoryAddress() { return unwrap().hasMemoryAddress(); } + @Override + public boolean isContiguous() { + return unwrap().isContiguous(); + } + @Override public final int nioBufferCount() { return unwrap().nioBufferCount(); @@ -141,23 +145,30 @@ public final ByteBuf retainedSlice() { @Override public ByteBuf slice(int index, int length) { + ensureAccessible(); // All reference count methods should be inherited from this object (this is the "parent"). return new PooledNonRetainedSlicedByteBuf(this, unwrap(), index, length); } final ByteBuf duplicate0() { + ensureAccessible(); // All reference count methods should be inherited from this object (this is the "parent"). return new PooledNonRetainedDuplicateByteBuf(this, unwrap()); } private static final class PooledNonRetainedDuplicateByteBuf extends UnpooledDuplicatedByteBuf { - private final ReferenceCounted referenceCountDelegate; + private final ByteBuf referenceCountDelegate; - PooledNonRetainedDuplicateByteBuf(ReferenceCounted referenceCountDelegate, AbstractByteBuf buffer) { + PooledNonRetainedDuplicateByteBuf(ByteBuf referenceCountDelegate, AbstractByteBuf buffer) { super(buffer); this.referenceCountDelegate = referenceCountDelegate; } + @Override + boolean isAccessible0() { + return referenceCountDelegate.isAccessible(); + } + @Override int refCnt0() { return referenceCountDelegate.refCnt(); @@ -199,6 +210,7 @@ boolean release0(int decrement) { @Override public ByteBuf duplicate() { + ensureAccessible(); return new PooledNonRetainedDuplicateByteBuf(referenceCountDelegate, this); } @@ -209,7 +221,7 @@ public ByteBuf retainedDuplicate() { @Override public ByteBuf slice(int index, int length) { - checkIndex0(index, length); + checkIndex(index, length); return new PooledNonRetainedSlicedByteBuf(referenceCountDelegate, unwrap(), index, length); } @@ -226,14 +238,19 @@ public ByteBuf retainedSlice(int index, int length) { } private static final class PooledNonRetainedSlicedByteBuf extends UnpooledSlicedByteBuf { - private final ReferenceCounted referenceCountDelegate; + private final ByteBuf referenceCountDelegate; - PooledNonRetainedSlicedByteBuf(ReferenceCounted referenceCountDelegate, + PooledNonRetainedSlicedByteBuf(ByteBuf referenceCountDelegate, AbstractByteBuf buffer, int index, int length) { super(buffer, index, length); this.referenceCountDelegate = referenceCountDelegate; } + @Override + boolean isAccessible0() { + return referenceCountDelegate.isAccessible(); + } + @Override int refCnt0() { return referenceCountDelegate.refCnt(); @@ -275,6 +292,7 @@ boolean release0(int decrement) { @Override public ByteBuf duplicate() { + ensureAccessible(); return new PooledNonRetainedDuplicateByteBuf(referenceCountDelegate, unwrap()) .setIndex(idx(readerIndex()), idx(writerIndex())); } @@ -286,7 +304,7 @@ public ByteBuf retainedDuplicate() { @Override public ByteBuf slice(int index, int length) { - checkIndex0(index, length); + checkIndex(index, length); return new PooledNonRetainedSlicedByteBuf(referenceCountDelegate, unwrap(), idx(index), length); } diff --git a/buffer/src/main/java/io/netty/buffer/AbstractReferenceCountedByteBuf.java b/buffer/src/main/java/io/netty/buffer/AbstractReferenceCountedByteBuf.java index d3058c0fb62..05751cbef71 100644 --- a/buffer/src/main/java/io/netty/buffer/AbstractReferenceCountedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AbstractReferenceCountedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,57 +16,73 @@ package io.netty.buffer; -import io.netty.util.IllegalReferenceCountException; - import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; -import static io.netty.util.internal.ObjectUtil.checkPositive; +import io.netty.util.internal.ReferenceCountUpdater; /** * Abstract base class for {@link ByteBuf} implementations that count references. */ public abstract class AbstractReferenceCountedByteBuf extends AbstractByteBuf { - - private static final AtomicIntegerFieldUpdater refCntUpdater = + private static final long REFCNT_FIELD_OFFSET = + ReferenceCountUpdater.getUnsafeOffset(AbstractReferenceCountedByteBuf.class, "refCnt"); + private static final AtomicIntegerFieldUpdater AIF_UPDATER = AtomicIntegerFieldUpdater.newUpdater(AbstractReferenceCountedByteBuf.class, "refCnt"); - private volatile int refCnt; + private static final ReferenceCountUpdater updater = + new ReferenceCountUpdater() { + @Override + protected AtomicIntegerFieldUpdater updater() { + return AIF_UPDATER; + } + @Override + protected long unsafeOffset() { + return REFCNT_FIELD_OFFSET; + } + }; + + // Value might not equal "real" reference count, all access should be via the updater + @SuppressWarnings({"unused", "FieldMayBeFinal"}) + private volatile int refCnt = updater.initialValue(); protected AbstractReferenceCountedByteBuf(int maxCapacity) { super(maxCapacity); - refCntUpdater.set(this, 1); + } + + @Override + boolean isAccessible() { + // Try to do non-volatile read for performance as the ensureAccessible() is racy anyway and only provide + // a best-effort guard. + return updater.isLiveNonVolatile(this); } @Override public int refCnt() { - return refCnt; + return updater.refCnt(this); } /** * An unsafe operation intended for use by a subclass that sets the reference count of the buffer directly */ protected final void setRefCnt(int refCnt) { - refCntUpdater.set(this, refCnt); + updater.setRefCnt(this, refCnt); + } + + /** + * An unsafe operation intended for use by a subclass that resets the reference count of the buffer to 1 + */ + protected final void resetRefCnt() { + updater.resetRefCnt(this); } @Override public ByteBuf retain() { - return retain0(1); + return updater.retain(this); } @Override public ByteBuf retain(int increment) { - return retain0(checkPositive(increment, "increment")); - } - - private ByteBuf retain0(final int increment) { - int oldRef = refCntUpdater.getAndAdd(this, increment); - if (oldRef <= 0 || oldRef + increment < oldRef) { - // Ensure we don't resurrect (which means the refCnt was 0) and also that we encountered an overflow. - refCntUpdater.getAndAdd(this, -increment); - throw new IllegalReferenceCountException(oldRef, increment); - } - return this; + return updater.retain(this, increment); } @Override @@ -81,26 +97,21 @@ public ByteBuf touch(Object hint) { @Override public boolean release() { - return release0(1); + return handleRelease(updater.release(this)); } @Override public boolean release(int decrement) { - return release0(checkPositive(decrement, "decrement")); + return handleRelease(updater.release(this, decrement)); } - private boolean release0(int decrement) { - int oldRef = refCntUpdater.getAndAdd(this, -decrement); - if (oldRef == decrement) { + private boolean handleRelease(boolean result) { + if (result) { deallocate(); - return true; - } else if (oldRef < decrement || oldRef - decrement > oldRef) { - // Ensure we don't over-release, and avoid underflow. - refCntUpdater.getAndAdd(this, decrement); - throw new IllegalReferenceCountException(oldRef, decrement); } - return false; + return result; } + /** * Called once {@link #refCnt()} is equals 0. */ diff --git a/buffer/src/main/java/io/netty/buffer/AbstractUnpooledSlicedByteBuf.java b/buffer/src/main/java/io/netty/buffer/AbstractUnpooledSlicedByteBuf.java index a9a88ac7b51..f1863ff3203 100644 --- a/buffer/src/main/java/io/netty/buffer/AbstractUnpooledSlicedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AbstractUnpooledSlicedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/AbstractUnsafeSwappedByteBuf.java b/buffer/src/main/java/io/netty/buffer/AbstractUnsafeSwappedByteBuf.java index d5e9239cabc..3ebcef367ae 100644 --- a/buffer/src/main/java/io/netty/buffer/AbstractUnsafeSwappedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AbstractUnsafeSwappedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -64,7 +64,7 @@ public final long getUnsignedInt(int index) { @Override public final int getInt(int index) { - wrapped.checkIndex0(index, 4); + wrapped.checkIndex(index, 4); int v = _getInt(wrapped, index); return nativeByteOrder ? v : Integer.reverseBytes(v); } @@ -76,21 +76,21 @@ public final int getUnsignedShort(int index) { @Override public final short getShort(int index) { - wrapped.checkIndex0(index, 2); + wrapped.checkIndex(index, 2); short v = _getShort(wrapped, index); return nativeByteOrder ? v : Short.reverseBytes(v); } @Override public final ByteBuf setShort(int index, int value) { - wrapped.checkIndex0(index, 2); + wrapped.checkIndex(index, 2); _setShort(wrapped, index, nativeByteOrder ? (short) value : Short.reverseBytes((short) value)); return this; } @Override public final ByteBuf setInt(int index, int value) { - wrapped.checkIndex0(index, 4); + wrapped.checkIndex(index, 4); _setInt(wrapped, index, nativeByteOrder ? value : Integer.reverseBytes(value)); return this; } diff --git a/buffer/src/main/java/io/netty/buffer/AdvancedLeakAwareByteBuf.java b/buffer/src/main/java/io/netty/buffer/AdvancedLeakAwareByteBuf.java index 2d537bd6b7e..e8d96d0c917 100644 --- a/buffer/src/main/java/io/netty/buffer/AdvancedLeakAwareByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AdvancedLeakAwareByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -35,6 +35,7 @@ final class AdvancedLeakAwareByteBuf extends SimpleLeakAwareByteBuf { + // If set to true we will only record stacktraces for touch(...), release(...) and retain(...) calls. private static final String PROP_ACQUIRE_AND_RELEASE_ONLY = "io.netty.leakDetection.acquireAndReleaseOnly"; private static final boolean ACQUIRE_AND_RELEASE_ONLY; diff --git a/buffer/src/main/java/io/netty/buffer/AdvancedLeakAwareCompositeByteBuf.java b/buffer/src/main/java/io/netty/buffer/AdvancedLeakAwareCompositeByteBuf.java index ff93de665c2..3eb404e2fd3 100644 --- a/buffer/src/main/java/io/netty/buffer/AdvancedLeakAwareCompositeByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/AdvancedLeakAwareCompositeByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -939,6 +939,12 @@ public CompositeByteBuf addComponent(boolean increaseWriterIndex, int cIndex, By return super.addComponent(increaseWriterIndex, cIndex, buffer); } + @Override + public CompositeByteBuf addFlattenedComponents(boolean increaseWriterIndex, ByteBuf buffer) { + recordLeakNonRefCountingOperation(leak); + return super.addFlattenedComponents(increaseWriterIndex, buffer); + } + @Override public CompositeByteBuf removeComponent(int cIndex) { recordLeakNonRefCountingOperation(leak); diff --git a/buffer/src/main/java/io/netty/buffer/ByteBuf.java b/buffer/src/main/java/io/netty/buffer/ByteBuf.java index c04340a0d15..2adb7e64389 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -43,7 +43,7 @@ *

Random Access Indexing

* * Just like an ordinary primitive byte array, {@link ByteBuf} uses - * zero-based indexing. + * zero-based indexing. * It means the index of the first byte is always {@code 0} and the index of the last byte is * always {@link #capacity() capacity - 1}. For example, to iterate all bytes of a buffer, you * can do the following, regardless of its internal implementation: @@ -183,15 +183,6 @@ * For complicated searches, use {@link #forEachByte(int, int, ByteProcessor)} with a {@link ByteProcessor} * implementation. * - *

Mark and reset

- * - * There are two marker indexes in every buffer. One is for storing - * {@link #readerIndex() readerIndex} and the other is for storing - * {@link #writerIndex() writerIndex}. You can always reposition one of the - * two indexes by calling a reset method. It works in a similar fashion to - * the mark and reset methods in {@link InputStream} except that there's no - * {@code readlimit}. - * *

Derived buffers

* * You can create a view of an existing buffer by calling one of the following methods: @@ -206,7 +197,7 @@ *
  • {@link #readRetainedSlice(int)}
  • * * A derived buffer will have an independent {@link #readerIndex() readerIndex}, - * {@link #writerIndex() writerIndex} and marker indexes, while it shares + * {@link #writerIndex() writerIndex}, while it shares * other internal data representation, just like a NIO buffer does. *

    * In case a completely fresh copy of an existing buffer is required, please @@ -245,8 +236,7 @@ * Please refer to {@link ByteBufInputStream} and * {@link ByteBufOutputStream}. */ -@SuppressWarnings("ClassMayBeInterface") -public abstract class ByteBuf implements ReferenceCounted, Comparable { +public abstract class ByteBuf implements ReferenceCounted, Comparable, ByteBufConvertible { /** * Returns the number of bytes (octets) this buffer can contain. @@ -258,14 +248,14 @@ public abstract class ByteBuf implements ReferenceCounted, Comparable { * capacity, the content of this buffer is truncated. If the {@code newCapacity} is greater * than the current capacity, the buffer is appended with unspecified data whose length is * {@code (newCapacity - currentCapacity)}. + * + * @throws IllegalArgumentException if the {@code newCapacity} is greater than {@link #maxCapacity()} */ public abstract ByteBuf capacity(int newCapacity); /** - * Returns the maximum allowed capacity of this buffer. If a user attempts to increase the - * capacity of this buffer beyond the maximum capacity using {@link #capacity(int)} or - * {@link #ensureWritable(int)}, those methods will raise an - * {@link IllegalArgumentException}. + * Returns the maximum allowed capacity of this buffer. This value provides an upper + * bound on {@link #capacity()}. */ public abstract int maxCapacity(); @@ -275,7 +265,7 @@ public abstract class ByteBuf implements ReferenceCounted, Comparable { public abstract ByteBufAllocator alloc(); /** - * Returns the endianness + * Returns the endianness * of this buffer. * * @deprecated use the Little Endian accessors, e.g. {@code getShortLE}, {@code getIntLE} @@ -286,8 +276,8 @@ public abstract class ByteBuf implements ReferenceCounted, Comparable { /** * Returns a buffer with the specified {@code endianness} which shares the whole region, - * indexes, and marks of this buffer. Modifying the content, the indexes, or the marks of the - * returned buffer or this buffer affects each other's content, indexes, and marks. If the + * indexes of this buffer. Modifying the content, the indexes of the + * returned buffer or this buffer affects each other's content, and indexes. If the * specified {@code endianness} is identical to this buffer's byte order, this method can * return {@code this}. This method does not modify {@code readerIndex} or {@code writerIndex} * of this buffer. @@ -405,27 +395,37 @@ public abstract class ByteBuf implements ReferenceCounted, Comparable { public abstract ByteBuf setIndex(int readerIndex, int writerIndex); /** - * Returns the number of readable bytes which is equal to - * {@code (this.writerIndex - this.readerIndex)}. + * Returns the number of readable bytes which is logically equivalent to + * {@code (this.writerIndex - this.readerIndex)}, but maybe overridden to accommodate + * specialized behavior (e.g. write only). */ public abstract int readableBytes(); /** - * Returns the number of writable bytes which is equal to - * {@code (this.capacity - this.writerIndex)}. + * Returns the number of writable bytes which is logically equivalent to + * {@code (this.capacity - this.writerIndex)}, but maybe overridden to accommodate + * specialized behavior (e.g. read only). */ public abstract int writableBytes(); /** - * Returns the maximum possible number of writable bytes, which is equal to - * {@code (this.maxCapacity - this.writerIndex)}. + * Returns the maximum possible number of writable bytes, which is logically equivalent to + * {@code (this.maxCapacity - this.writerIndex)}, but maybe overridden to accommodate + * specialized behavior (e.g. read only). */ public abstract int maxWritableBytes(); /** - * Returns {@code true} - * if and only if {@code (this.writerIndex - this.readerIndex)} is greater - * than {@code 0}. + * Returns the maximum number of bytes which can be written for certain without involving + * an internal reallocation or data-copy. The returned value will be ≥ {@link #writableBytes()} + * and ≤ {@link #maxWritableBytes()}. + */ + public int maxFastWritableBytes() { + return writableBytes(); + } + + /** + * Returns {@code true} if and only if {@link #readableBytes()} is greater than {@code 0}. */ public abstract boolean isReadable(); @@ -435,9 +435,7 @@ public abstract class ByteBuf implements ReferenceCounted, Comparable { public abstract boolean isReadable(int size); /** - * Returns {@code true} - * if and only if {@code (this.capacity - this.writerIndex)} is greater - * than {@code 0}. + * Returns {@code true} if and only if {@link #writableBytes()} is greater than {@code 0}. */ public abstract boolean isWritable(); @@ -458,42 +456,6 @@ public abstract class ByteBuf implements ReferenceCounted, Comparable { */ public abstract ByteBuf clear(); - /** - * Marks the current {@code readerIndex} in this buffer. You can - * reposition the current {@code readerIndex} to the marked - * {@code readerIndex} by calling {@link #resetReaderIndex()}. - * The initial value of the marked {@code readerIndex} is {@code 0}. - */ - public abstract ByteBuf markReaderIndex(); - - /** - * Repositions the current {@code readerIndex} to the marked - * {@code readerIndex} in this buffer. - * - * @throws IndexOutOfBoundsException - * if the current {@code writerIndex} is less than the marked - * {@code readerIndex} - */ - public abstract ByteBuf resetReaderIndex(); - - /** - * Marks the current {@code writerIndex} in this buffer. You can - * reposition the current {@code writerIndex} to the marked - * {@code writerIndex} by calling {@link #resetWriterIndex()}. - * The initial value of the marked {@code writerIndex} is {@code 0}. - */ - public abstract ByteBuf markWriterIndex(); - - /** - * Repositions the current {@code writerIndex} to the marked - * {@code writerIndex} in this buffer. - * - * @throws IndexOutOfBoundsException - * if the current {@code readerIndex} is greater than the marked - * {@code writerIndex} - */ - public abstract ByteBuf resetWriterIndex(); - /** * Discards the bytes between the 0th index and {@code readerIndex}. * It moves the bytes between {@code readerIndex} and {@code writerIndex} @@ -513,22 +475,23 @@ public abstract class ByteBuf implements ReferenceCounted, Comparable { public abstract ByteBuf discardSomeReadBytes(); /** - * Makes sure the number of {@linkplain #writableBytes() the writable bytes} - * is equal to or greater than the specified value. If there is enough - * writable bytes in this buffer, this method returns with no side effect. - * Otherwise, it raises an {@link IllegalArgumentException}. + * Expands the buffer {@link #capacity()} to make sure the number of + * {@linkplain #writableBytes() writable bytes} is equal to or greater than the + * specified value. If there are enough writable bytes in this buffer, this method + * returns with no side effect. * * @param minWritableBytes * the expected minimum number of writable bytes * @throws IndexOutOfBoundsException - * if {@link #writerIndex()} + {@code minWritableBytes} > {@link #maxCapacity()} + * if {@link #writerIndex()} + {@code minWritableBytes} > {@link #maxCapacity()}. + * @see #capacity(int) */ public abstract ByteBuf ensureWritable(int minWritableBytes); /** - * Tries to make sure the number of {@linkplain #writableBytes() the writable bytes} - * is equal to or greater than the specified value. Unlike {@link #ensureWritable(int)}, - * this method does not raise an exception but returns a code. + * Expands the buffer {@link #capacity()} to make sure the number of + * {@linkplain #writableBytes() writable bytes} is equal to or greater than the + * specified value. Unlike {@link #ensureWritable(int)}, this method returns a status code. * * @param minWritableBytes * the expected minimum number of writable bytes @@ -1166,7 +1129,7 @@ public ByteBuf setDoubleLE(int index, double value) { * the number of the transferred bytes while * {@link #setBytes(int, ByteBuf, int, int)} does not. * This method does not modify {@code readerIndex} or {@code writerIndex} of - * the source buffer (i.e. {@code this}). + * this buffer (i.e. {@code this}). * * @throws IndexOutOfBoundsException * if the specified {@code index} is less than {@code 0} or @@ -1183,7 +1146,7 @@ public ByteBuf setDoubleLE(int index, double value) { * the number of the transferred bytes while * {@link #setBytes(int, ByteBuf, int, int)} does not. * This method does not modify {@code readerIndex} or {@code writerIndex} of - * the source buffer (i.e. {@code this}). + * this buffer (i.e. {@code this}). * * @param length the number of bytes to transfer * @@ -1756,9 +1719,8 @@ public double readDoubleLE() { /** * Sets the specified boolean at the current {@code writerIndex} * and increases the {@code writerIndex} by {@code 1} in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 1} + * If {@code this.writableBytes} is less than {@code 1}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeBoolean(boolean value); @@ -1766,9 +1728,8 @@ public double readDoubleLE() { * Sets the specified byte at the current {@code writerIndex} * and increases the {@code writerIndex} by {@code 1} in this buffer. * The 24 high-order bits of the specified value are ignored. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 1} + * If {@code this.writableBytes} is less than {@code 1}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeByte(int value); @@ -1776,9 +1737,8 @@ public double readDoubleLE() { * Sets the specified 16-bit short integer at the current * {@code writerIndex} and increases the {@code writerIndex} by {@code 2} * in this buffer. The 16 high-order bits of the specified value are ignored. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 2} + * If {@code this.writableBytes} is less than {@code 2}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeShort(int value); @@ -1787,9 +1747,8 @@ public double readDoubleLE() { * Order at the current {@code writerIndex} and increases the * {@code writerIndex} by {@code 2} in this buffer. * The 16 high-order bits of the specified value are ignored. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 2} + * If {@code this.writableBytes} is less than {@code 2}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeShortLE(int value); @@ -1797,9 +1756,8 @@ public double readDoubleLE() { * Sets the specified 24-bit medium integer at the current * {@code writerIndex} and increases the {@code writerIndex} by {@code 3} * in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 3} + * If {@code this.writableBytes} is less than {@code 3}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeMedium(int value); @@ -1808,18 +1766,16 @@ public double readDoubleLE() { * {@code writerIndex} in the Little Endian Byte Order and * increases the {@code writerIndex} by {@code 3} in this * buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 3} + * If {@code this.writableBytes} is less than {@code 3}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeMediumLE(int value); /** * Sets the specified 32-bit integer at the current {@code writerIndex} * and increases the {@code writerIndex} by {@code 4} in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 4} + * If {@code this.writableBytes} is less than {@code 4}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeInt(int value); @@ -1827,9 +1783,8 @@ public double readDoubleLE() { * Sets the specified 32-bit integer at the current {@code writerIndex} * in the Little Endian Byte Order and increases the {@code writerIndex} * by {@code 4} in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 4} + * If {@code this.writableBytes} is less than {@code 4}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeIntLE(int value); @@ -1837,9 +1792,8 @@ public double readDoubleLE() { * Sets the specified 64-bit long integer at the current * {@code writerIndex} and increases the {@code writerIndex} by {@code 8} * in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 8} + * If {@code this.writableBytes} is less than {@code 8}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeLong(long value); @@ -1848,9 +1802,8 @@ public double readDoubleLE() { * {@code writerIndex} in the Little Endian Byte Order and * increases the {@code writerIndex} by {@code 8} * in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 8} + * If {@code this.writableBytes} is less than {@code 8}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeLongLE(long value); @@ -1858,9 +1811,8 @@ public double readDoubleLE() { * Sets the specified 2-byte UTF-16 character at the current * {@code writerIndex} and increases the {@code writerIndex} by {@code 2} * in this buffer. The 16 high-order bits of the specified value are ignored. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 2} + * If {@code this.writableBytes} is less than {@code 2}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeChar(int value); @@ -1868,9 +1820,8 @@ public double readDoubleLE() { * Sets the specified 32-bit floating point number at the current * {@code writerIndex} and increases the {@code writerIndex} by {@code 4} * in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 4} + * If {@code this.writableBytes} is less than {@code 4}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeFloat(float value); @@ -1878,9 +1829,8 @@ public double readDoubleLE() { * Sets the specified 32-bit floating point number at the current * {@code writerIndex} in Little Endian Byte Order and increases * the {@code writerIndex} by {@code 4} in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 4} + * If {@code this.writableBytes} is less than {@code 4}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public ByteBuf writeFloatLE(float value) { return writeIntLE(Float.floatToRawIntBits(value)); @@ -1890,9 +1840,8 @@ public ByteBuf writeFloatLE(float value) { * Sets the specified 64-bit floating point number at the current * {@code writerIndex} and increases the {@code writerIndex} by {@code 8} * in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 8} + * If {@code this.writableBytes} is less than {@code 8}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeDouble(double value); @@ -1900,9 +1849,8 @@ public ByteBuf writeFloatLE(float value) { * Sets the specified 64-bit floating point number at the current * {@code writerIndex} in Little Endian Byte Order and increases * the {@code writerIndex} by {@code 8} in this buffer. - * - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is less than {@code 8} + * If {@code this.writableBytes} is less than {@code 8}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public ByteBuf writeDoubleLE(double value) { return writeLongLE(Double.doubleToRawLongBits(value)); @@ -1917,10 +1865,9 @@ public ByteBuf writeDoubleLE(double value) { * increases the {@code readerIndex} of the source buffer by the number of * the transferred bytes while {@link #writeBytes(ByteBuf, int, int)} * does not. - * - * @throws IndexOutOfBoundsException - * if {@code src.readableBytes} is greater than - * {@code this.writableBytes} + * If {@code this.writableBytes} is less than {@code src.readableBytes}, + * {@link #ensureWritable(int)} will be called in an attempt to expand + * capacity to accommodate. */ public abstract ByteBuf writeBytes(ByteBuf src); @@ -1932,12 +1879,11 @@ public ByteBuf writeDoubleLE(double value) { * except that this method increases the {@code readerIndex} of the source * buffer by the number of the transferred bytes (= {@code length}) while * {@link #writeBytes(ByteBuf, int, int)} does not. + * If {@code this.writableBytes} is less than {@code length}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. * * @param length the number of bytes to transfer - * - * @throws IndexOutOfBoundsException - * if {@code length} is greater than {@code this.writableBytes} or - * if {@code length} is greater then {@code src.readableBytes} + * @throws IndexOutOfBoundsException if {@code length} is greater then {@code src.readableBytes} */ public abstract ByteBuf writeBytes(ByteBuf src, int length); @@ -1945,15 +1891,15 @@ public ByteBuf writeDoubleLE(double value) { * Transfers the specified source buffer's data to this buffer starting at * the current {@code writerIndex} and increases the {@code writerIndex} * by the number of the transferred bytes (= {@code length}). + * If {@code this.writableBytes} is less than {@code length}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. * * @param srcIndex the first index of the source * @param length the number of bytes to transfer * * @throws IndexOutOfBoundsException - * if the specified {@code srcIndex} is less than {@code 0}, - * if {@code srcIndex + length} is greater than - * {@code src.capacity}, or - * if {@code length} is greater than {@code this.writableBytes} + * if the specified {@code srcIndex} is less than {@code 0}, or + * if {@code srcIndex + length} is greater than {@code src.capacity} */ public abstract ByteBuf writeBytes(ByteBuf src, int srcIndex, int length); @@ -1961,9 +1907,8 @@ public ByteBuf writeDoubleLE(double value) { * Transfers the specified source array's data to this buffer starting at * the current {@code writerIndex} and increases the {@code writerIndex} * by the number of the transferred bytes (= {@code src.length}). - * - * @throws IndexOutOfBoundsException - * if {@code src.length} is greater than {@code this.writableBytes} + * If {@code this.writableBytes} is less than {@code src.length}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. */ public abstract ByteBuf writeBytes(byte[] src); @@ -1971,15 +1916,15 @@ public ByteBuf writeDoubleLE(double value) { * Transfers the specified source array's data to this buffer starting at * the current {@code writerIndex} and increases the {@code writerIndex} * by the number of the transferred bytes (= {@code length}). + * If {@code this.writableBytes} is less than {@code length}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. * * @param srcIndex the first index of the source * @param length the number of bytes to transfer * * @throws IndexOutOfBoundsException - * if the specified {@code srcIndex} is less than {@code 0}, - * if {@code srcIndex + length} is greater than - * {@code src.length}, or - * if {@code length} is greater than {@code this.writableBytes} + * if the specified {@code srcIndex} is less than {@code 0}, or + * if {@code srcIndex + length} is greater than {@code src.length} */ public abstract ByteBuf writeBytes(byte[] src, int srcIndex, int length); @@ -1988,10 +1933,9 @@ public ByteBuf writeDoubleLE(double value) { * the current {@code writerIndex} until the source buffer's position * reaches its limit, and increases the {@code writerIndex} by the * number of the transferred bytes. - * - * @throws IndexOutOfBoundsException - * if {@code src.remaining()} is greater than - * {@code this.writableBytes} + * If {@code this.writableBytes} is less than {@code src.remaining()}, + * {@link #ensureWritable(int)} will be called in an attempt to expand + * capacity to accommodate. */ public abstract ByteBuf writeBytes(ByteBuffer src); @@ -1999,29 +1943,28 @@ public ByteBuf writeDoubleLE(double value) { * Transfers the content of the specified stream to this buffer * starting at the current {@code writerIndex} and increases the * {@code writerIndex} by the number of the transferred bytes. + * If {@code this.writableBytes} is less than {@code length}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. * * @param length the number of bytes to transfer * * @return the actual number of bytes read in from the specified stream * - * @throws IndexOutOfBoundsException - * if {@code length} is greater than {@code this.writableBytes} - * @throws IOException - * if the specified stream threw an exception during I/O + * @throws IOException if the specified stream threw an exception during I/O */ - public abstract int writeBytes(InputStream in, int length) throws IOException; + public abstract int writeBytes(InputStream in, int length) throws IOException; /** * Transfers the content of the specified channel to this buffer * starting at the current {@code writerIndex} and increases the * {@code writerIndex} by the number of the transferred bytes. + * If {@code this.writableBytes} is less than {@code length}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. * * @param length the maximum number of bytes to transfer * * @return the actual number of bytes read in from the specified channel * - * @throws IndexOutOfBoundsException - * if {@code length} is greater than {@code this.writableBytes} * @throws IOException * if the specified channel threw an exception during I/O */ @@ -2032,14 +1975,14 @@ public ByteBuf writeDoubleLE(double value) { * to this buffer starting at the current {@code writerIndex} and increases the * {@code writerIndex} by the number of the transferred bytes. * This method does not modify the channel's position. + * If {@code this.writableBytes} is less than {@code length}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. * * @param position the file position at which the transfer is to begin * @param length the maximum number of bytes to transfer * * @return the actual number of bytes read in from the specified channel * - * @throws IndexOutOfBoundsException - * if {@code length} is greater than {@code this.writableBytes} * @throws IOException * if the specified channel threw an exception during I/O */ @@ -2049,11 +1992,10 @@ public ByteBuf writeDoubleLE(double value) { * Fills this buffer with NUL (0x00) starting at the current * {@code writerIndex} and increases the {@code writerIndex} by the * specified {@code length}. + * If {@code this.writableBytes} is less than {@code length}, {@link #ensureWritable(int)} + * will be called in an attempt to expand capacity to accommodate. * * @param length the number of NULs to write to the buffer - * - * @throws IndexOutOfBoundsException - * if {@code length} is greater than {@code this.writableBytes} */ public abstract ByteBuf writeZero(int length); @@ -2061,22 +2003,25 @@ public ByteBuf writeDoubleLE(double value) { * Writes the specified {@link CharSequence} at the current {@code writerIndex} and increases * the {@code writerIndex} by the written bytes. * in this buffer. + * If {@code this.writableBytes} is not large enough to write the whole sequence, + * {@link #ensureWritable(int)} will be called in an attempt to expand capacity to accommodate. * * @param sequence to write * @param charset that should be used * @return the written number of bytes - * @throws IndexOutOfBoundsException - * if {@code this.writableBytes} is not large enough to write the whole sequence */ public abstract int writeCharSequence(CharSequence sequence, Charset charset); /** * Locates the first occurrence of the specified {@code value} in this - * buffer. The search takes place from the specified {@code fromIndex} - * (inclusive) to the specified {@code toIndex} (exclusive). + * buffer. The search takes place from the specified {@code fromIndex} + * (inclusive) to the specified {@code toIndex} (exclusive). *

    * If {@code fromIndex} is greater than {@code toIndex}, the search is - * performed in a reversed order. + * performed in a reversed order from {@code fromIndex} (exclusive) + * down to {@code toIndex} (inclusive). + *

    + * Note that the lower index is always included and higher always excluded. *

    * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. @@ -2186,7 +2131,7 @@ public ByteBuf writeDoubleLE(double value) { /** * Returns a slice of this buffer's readable bytes. Modifying the content * of the returned buffer or this buffer affects each other's content - * while they maintain separate indexes and marks. This method is + * while they maintain separate indexes. This method is * identical to {@code buf.slice(buf.readerIndex(), buf.readableBytes())}. * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. @@ -2199,7 +2144,7 @@ public ByteBuf writeDoubleLE(double value) { /** * Returns a retained slice of this buffer's readable bytes. Modifying the content * of the returned buffer or this buffer affects each other's content - * while they maintain separate indexes and marks. This method is + * while they maintain separate indexes. This method is * identical to {@code buf.slice(buf.readerIndex(), buf.readableBytes())}. * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. @@ -2213,7 +2158,7 @@ public ByteBuf writeDoubleLE(double value) { /** * Returns a slice of this buffer's sub-region. Modifying the content of * the returned buffer or this buffer affects each other's content while - * they maintain separate indexes and marks. + * they maintain separate indexes. * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. *

    @@ -2225,7 +2170,7 @@ public ByteBuf writeDoubleLE(double value) { /** * Returns a retained slice of this buffer's sub-region. Modifying the content of * the returned buffer or this buffer affects each other's content while - * they maintain separate indexes and marks. + * they maintain separate indexes. * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. *

    @@ -2238,11 +2183,11 @@ public ByteBuf writeDoubleLE(double value) { /** * Returns a buffer which shares the whole region of this buffer. * Modifying the content of the returned buffer or this buffer affects - * each other's content while they maintain separate indexes and marks. + * each other's content while they maintain separate indexes. * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. *

    - * The reader and writer marks will not be duplicated. Also be aware that this method will + * Be aware that this method will * NOT call {@link #retain()} and so the reference count will NOT be increased. * @return A buffer whose readable content is equivalent to the buffer returned by {@link #slice()}. * However this buffer will share the capacity of the underlying buffer, and therefore allows access to all of the @@ -2253,7 +2198,7 @@ public ByteBuf writeDoubleLE(double value) { /** * Returns a retained buffer which shares the whole region of this buffer. * Modifying the content of the returned buffer or this buffer affects - * each other's content while they maintain separate indexes and marks. + * each other's content while they maintain separate indexes. * This method is identical to {@code buf.slice(0, buf.capacity())}. * This method does not modify {@code readerIndex} or {@code writerIndex} of * this buffer. @@ -2282,7 +2227,7 @@ public ByteBuf writeDoubleLE(double value) { /** * Exposes this buffer's readable bytes as an NIO {@link ByteBuffer}. The returned buffer * either share or contains the copied content of this buffer, while changing the position - * and limit of the returned NIO buffer does not affect the indexes and marks of this buffer. + * and limit of the returned NIO buffer does not affect the indexes of this buffer. * This method is identical to {@code buf.nioBuffer(buf.readerIndex(), buf.readableBytes())}. * This method does not modify {@code readerIndex} or {@code writerIndex} of this buffer. * Please note that the returned NIO buffer will not see the changes of this buffer if this buffer @@ -2300,7 +2245,7 @@ public ByteBuf writeDoubleLE(double value) { /** * Exposes this buffer's sub-region as an NIO {@link ByteBuffer}. The returned buffer * either share or contains the copied content of this buffer, while changing the position - * and limit of the returned NIO buffer does not affect the indexes and marks of this buffer. + * and limit of the returned NIO buffer does not affect the indexes of this buffer. * This method does not modify {@code readerIndex} or {@code writerIndex} of this buffer. * Please note that the returned NIO buffer will not see the changes of this buffer if this buffer * is a dynamic buffer and it adjusted its capacity. @@ -2322,7 +2267,7 @@ public ByteBuf writeDoubleLE(double value) { /** * Exposes this buffer's readable bytes as an NIO {@link ByteBuffer}'s. The returned buffer * either share or contains the copied content of this buffer, while changing the position - * and limit of the returned NIO buffer does not affect the indexes and marks of this buffer. + * and limit of the returned NIO buffer does not affect the indexes of this buffer. * This method does not modify {@code readerIndex} or {@code writerIndex} of this buffer. * Please note that the returned NIO buffer will not see the changes of this buffer if this buffer * is a dynamic buffer and it adjusted its capacity. @@ -2340,7 +2285,7 @@ public ByteBuf writeDoubleLE(double value) { /** * Exposes this buffer's bytes as an NIO {@link ByteBuffer}'s for the specified index and length * The returned buffer either share or contains the copied content of this buffer, while changing - * the position and limit of the returned NIO buffer does not affect the indexes and marks of this buffer. + * the position and limit of the returned NIO buffer does not affect the indexes of this buffer. * This method does not modify {@code readerIndex} or {@code writerIndex} of this buffer. Please note that the * returned NIO buffer will not see the changes of this buffer if this buffer is a dynamic * buffer and it adjusted its capacity. @@ -2392,6 +2337,28 @@ public ByteBuf writeDoubleLE(double value) { */ public abstract long memoryAddress(); + /** + * Returns {@code true} if this {@link ByteBuf} implementation is backed by a single memory region. + * Composite buffer implementations must return false even if they currently hold ≤ 1 components. + * For buffers that return {@code true}, it's guaranteed that a successful call to {@link #discardReadBytes()} + * will increase the value of {@link #maxFastWritableBytes()} by the current {@code readerIndex}. + *

    + * This method will return {@code false} by default, and a {@code false} return value does not necessarily + * mean that the implementation is composite or that it is not backed by a single memory region. + */ + public boolean isContiguous() { + return false; + } + + /** + * A {@code ByteBuf} can turn into itself. + * @return This {@code ByteBuf} instance. + */ + @Override + public final ByteBuf asByteBuf() { + return this; + } + /** * Decodes this buffer's readable bytes into a string with the specified * character set name. This method is identical to @@ -2465,4 +2432,12 @@ public ByteBuf writeDoubleLE(double value) { @Override public abstract ByteBuf touch(Object hint); + + /** + * Used internally by {@link AbstractByteBuf#ensureAccessible()} to try to guard + * against using the buffer after it was released (best-effort). + */ + boolean isAccessible() { + return refCnt() != 0; + } } diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufAllocator.java b/buffer/src/main/java/io/netty/buffer/ByteBufAllocator.java index c30d9620462..802f5c85d33 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufAllocator.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufAllocator.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufAllocatorMetric.java b/buffer/src/main/java/io/netty/buffer/ByteBufAllocatorMetric.java index 058d4a793ff..7f3ffbdcde7 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufAllocatorMetric.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufAllocatorMetric.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufAllocatorMetricProvider.java b/buffer/src/main/java/io/netty/buffer/ByteBufAllocatorMetricProvider.java index a8befa71986..84b01848c51 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufAllocatorMetricProvider.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufAllocatorMetricProvider.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufConvertible.java b/buffer/src/main/java/io/netty/buffer/ByteBufConvertible.java new file mode 100644 index 00000000000..b2559f9c3b1 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/ByteBufConvertible.java @@ -0,0 +1,32 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer; + +/** + * An interface that can be implemented by any object that know how to turn itself into a {@link ByteBuf}. + * All {@link ByteBuf} classes implement this interface, and return themselves. + */ +public interface ByteBufConvertible { + /** + * Turn this object into a {@link ByteBuf}. + * This does not increment the reference count of the {@link ByteBuf} instance. + * The conversion or exposure of the {@link ByteBuf} must be idempotent, so that this method can be called + * either once, or multiple times, without causing any change in program behaviour. + * + * @return A {@link ByteBuf} instance from this object. + */ + ByteBuf asByteBuf(); +} diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufHolder.java b/buffer/src/main/java/io/netty/buffer/ByteBufHolder.java index 9a4f5976958..c506dd95a31 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufHolder.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufHolder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufInputStream.java b/buffer/src/main/java/io/netty/buffer/ByteBufInputStream.java index 2d8d34d3231..c23fb9d91cd 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufInputStream.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufInputStream.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,11 @@ */ package io.netty.buffer; +import static java.util.Objects.requireNonNull; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; + import io.netty.util.ReferenceCounted; +import io.netty.util.internal.StringUtil; import java.io.DataInput; import java.io.DataInputStream; @@ -44,6 +48,8 @@ public class ByteBufInputStream extends InputStream implements DataInput { private final int startIndex; private final int endIndex; private boolean closed; + private int markReaderIndex; + /** * To preserve backwards compatibility (which didn't transfer ownership) we support a conditional flag which * indicates if {@link #buffer} should be released when this {@link InputStream} is closed. @@ -101,14 +107,12 @@ public ByteBufInputStream(ByteBuf buffer, boolean releaseOnClose) { * {@code writerIndex} */ public ByteBufInputStream(ByteBuf buffer, int length, boolean releaseOnClose) { - if (buffer == null) { - throw new NullPointerException("buffer"); - } + requireNonNull(buffer, "buffer"); if (length < 0) { if (releaseOnClose) { buffer.release(); } - throw new IllegalArgumentException("length: " + length); + checkPositiveOrZero(length, "length"); } if (length > buffer.readableBytes()) { if (releaseOnClose) { @@ -122,7 +126,7 @@ public ByteBufInputStream(ByteBuf buffer, int length, boolean releaseOnClose) { this.buffer = buffer; startIndex = buffer.readerIndex(); endIndex = startIndex + length; - buffer.markReaderIndex(); + markReaderIndex = startIndex; } /** @@ -150,9 +154,10 @@ public int available() throws IOException { return endIndex - buffer.readerIndex(); } + // Suppress a warning since the class is not thread-safe @Override - public void mark(int readlimit) { - buffer.markReaderIndex(); + public void mark(int readlimit) { // lgtm[java/non-sync-override] + markReaderIndex = buffer.readerIndex(); } @Override @@ -162,7 +167,8 @@ public boolean markSupported() { @Override public int read() throws IOException { - if (!buffer.isReadable()) { + int available = available(); + if (available == 0) { return -1; } return buffer.readByte() & 0xff; @@ -180,9 +186,10 @@ public int read(byte[] b, int off, int len) throws IOException { return len; } + // Suppress a warning since the class is not thread-safe @Override - public void reset() throws IOException { - buffer.resetReaderIndex(); + public void reset() throws IOException { // lgtm[java/non-sync-override] + buffer.readerIndex(markReaderIndex); } @Override @@ -202,7 +209,8 @@ public boolean readBoolean() throws IOException { @Override public byte readByte() throws IOException { - if (!buffer.isReadable()) { + int available = available(); + if (available == 0) { throw new EOFException(); } return buffer.readByte(); @@ -240,34 +248,42 @@ public int readInt() throws IOException { return buffer.readInt(); } - private final StringBuilder lineBuf = new StringBuilder(); + private StringBuilder lineBuf; @Override public String readLine() throws IOException { - lineBuf.setLength(0); + int available = available(); + if (available == 0) { + return null; + } - loop: while (true) { - if (!buffer.isReadable()) { - return lineBuf.length() > 0 ? lineBuf.toString() : null; - } + if (lineBuf != null) { + lineBuf.setLength(0); + } + loop: do { int c = buffer.readUnsignedByte(); + --available; switch (c) { case '\n': break loop; case '\r': - if (buffer.isReadable() && (char) buffer.getUnsignedByte(buffer.readerIndex()) == '\n') { + if (available > 0 && (char) buffer.getUnsignedByte(buffer.readerIndex()) == '\n') { buffer.skipBytes(1); + --available; } break loop; default: + if (lineBuf == null) { + lineBuf = new StringBuilder(); + } lineBuf.append((char) c); } - } + } while (available > 0); - return lineBuf.toString(); + return lineBuf != null && lineBuf.length() > 0 ? lineBuf.toString() : StringUtil.EMPTY_STRING; } @Override diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufOutputStream.java b/buffer/src/main/java/io/netty/buffer/ByteBufOutputStream.java index c4f78053694..ff184928cb5 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufOutputStream.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufOutputStream.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.buffer; +import static java.util.Objects.requireNonNull; + import io.netty.util.CharsetUtil; import java.io.DataOutput; @@ -39,15 +41,14 @@ public class ByteBufOutputStream extends OutputStream implements DataOutput { private final ByteBuf buffer; private final int startIndex; - private final DataOutputStream utf8out = new DataOutputStream(this); + private DataOutputStream utf8out; // lazily-instantiated + private boolean closed; /** * Creates a new stream which writes data to the specified {@code buffer}. */ public ByteBufOutputStream(ByteBuf buffer) { - if (buffer == null) { - throw new NullPointerException("buffer"); - } + requireNonNull(buffer, "buffer"); this.buffer = buffer; startIndex = buffer.writerIndex(); } @@ -133,7 +134,15 @@ public void writeShort(int v) throws IOException { @Override public void writeUTF(String s) throws IOException { - utf8out.writeUTF(s); + DataOutputStream out = utf8out; + if (out == null) { + if (closed) { + throw new IOException("The stream is closed"); + } + // Suppress a warning since the stream is closed in the close() method + utf8out = out = new DataOutputStream(this); // lgtm[java/output-resource-leak] + } + out.writeUTF(s); } /** @@ -142,4 +151,20 @@ public void writeUTF(String s) throws IOException { public ByteBuf buffer() { return buffer; } + + @Override + public void close() throws IOException { + if (closed) { + return; + } + closed = true; + + try { + super.close(); + } finally { + if (utf8out != null) { + utf8out.close(); + } + } + } } diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufProcessor.java b/buffer/src/main/java/io/netty/buffer/ByteBufProcessor.java index 4e9ce6d488d..acc917fc068 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufProcessor.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufProcessor.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -28,109 +28,59 @@ public interface ByteBufProcessor extends ByteProcessor { * @deprecated Use {@link ByteProcessor#FIND_NUL}. */ @Deprecated - ByteBufProcessor FIND_NUL = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value != 0; - } - }; + ByteBufProcessor FIND_NUL = value -> value != 0; /** * @deprecated Use {@link ByteProcessor#FIND_NON_NUL}. */ @Deprecated - ByteBufProcessor FIND_NON_NUL = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value == 0; - } - }; + ByteBufProcessor FIND_NON_NUL = value -> value == 0; /** * @deprecated Use {@link ByteProcessor#FIND_CR}. */ @Deprecated - ByteBufProcessor FIND_CR = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value != '\r'; - } - }; + ByteBufProcessor FIND_CR = value -> value != '\r'; /** * @deprecated Use {@link ByteProcessor#FIND_NON_CR}. */ @Deprecated - ByteBufProcessor FIND_NON_CR = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value == '\r'; - } - }; + ByteBufProcessor FIND_NON_CR = value -> value == '\r'; /** * @deprecated Use {@link ByteProcessor#FIND_LF}. */ @Deprecated - ByteBufProcessor FIND_LF = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value != '\n'; - } - }; + ByteBufProcessor FIND_LF = value -> value != '\n'; /** * @deprecated Use {@link ByteProcessor#FIND_NON_LF}. */ @Deprecated - ByteBufProcessor FIND_NON_LF = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value == '\n'; - } - }; + ByteBufProcessor FIND_NON_LF = value -> value == '\n'; /** * @deprecated Use {@link ByteProcessor#FIND_CRLF}. */ @Deprecated - ByteBufProcessor FIND_CRLF = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value != '\r' && value != '\n'; - } - }; + ByteBufProcessor FIND_CRLF = value -> value != '\r' && value != '\n'; /** * @deprecated Use {@link ByteProcessor#FIND_NON_CRLF}. */ @Deprecated - ByteBufProcessor FIND_NON_CRLF = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value == '\r' || value == '\n'; - } - }; + ByteBufProcessor FIND_NON_CRLF = value -> value == '\r' || value == '\n'; /** * @deprecated Use {@link ByteProcessor#FIND_LINEAR_WHITESPACE}. */ @Deprecated - ByteBufProcessor FIND_LINEAR_WHITESPACE = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value != ' ' && value != '\t'; - } - }; + ByteBufProcessor FIND_LINEAR_WHITESPACE = value -> value != ' ' && value != '\t'; /** * @deprecated Use {@link ByteProcessor#FIND_NON_LINEAR_WHITESPACE}. */ @Deprecated - ByteBufProcessor FIND_NON_LINEAR_WHITESPACE = new ByteBufProcessor() { - @Override - public boolean process(byte value) throws Exception { - return value == ' ' || value == '\t'; - } - }; + ByteBufProcessor FIND_NON_LINEAR_WHITESPACE = value -> value == ' ' || value == '\t'; } diff --git a/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java b/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java index b0ecb35cace..68f1737b129 100644 --- a/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java +++ b/buffer/src/main/java/io/netty/buffer/ByteBufUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,15 +18,19 @@ import io.netty.util.AsciiString; import io.netty.util.ByteProcessor; import io.netty.util.CharsetUtil; -import io.netty.util.Recycler; -import io.netty.util.Recycler.Handle; +import io.netty.util.IllegalReferenceCountException; import io.netty.util.concurrent.FastThreadLocal; +import io.netty.util.internal.MathUtil; +import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.ObjectPool.Handle; import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.StringUtil; import io.netty.util.internal.SystemPropertyUtil; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; +import java.io.IOException; +import java.io.OutputStream; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.CharBuffer; @@ -40,9 +44,10 @@ import java.util.Locale; import static io.netty.util.internal.MathUtil.isOutOfBounds; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; import static io.netty.util.internal.StringUtil.NEWLINE; import static io.netty.util.internal.StringUtil.isSurrogate; +import static java.util.Objects.requireNonNull; /** * A collection of utility methods that is related with handling {@link ByteBuf}, @@ -51,10 +56,10 @@ public final class ByteBufUtil { private static final InternalLogger logger = InternalLoggerFactory.getInstance(ByteBufUtil.class); - private static final FastThreadLocal CHAR_BUFFERS = new FastThreadLocal() { + private static final FastThreadLocal BYTE_ARRAYS = new FastThreadLocal() { @Override - protected CharBuffer initialValue() throws Exception { - return CharBuffer.allocate(1024); + protected byte[] initialValue() throws Exception { + return PlatformDependent.allocateUninitializedArray(MAX_TL_ARRAY_LEN); } }; @@ -64,6 +69,7 @@ protected CharBuffer initialValue() throws Exception { private static final int MAX_BYTES_PER_CHAR_UTF8 = (int) CharsetUtil.encoder(CharsetUtil.UTF_8).maxBytesPerChar(); + static final int WRITE_CHUNK_SIZE = 8192; static final ByteBufAllocator DEFAULT_ALLOCATOR; static { @@ -92,8 +98,36 @@ protected CharBuffer initialValue() throws Exception { logger.debug("-Dio.netty.maxThreadLocalCharBufferSize: {}", MAX_CHAR_BUFFER_SIZE); } + static final int MAX_TL_ARRAY_LEN = 1024; + + /** + * Allocates a new array if minLength > {@link ByteBufUtil#MAX_TL_ARRAY_LEN} + */ + static byte[] threadLocalTempArray(int minLength) { + return minLength <= MAX_TL_ARRAY_LEN ? BYTE_ARRAYS.get() + : PlatformDependent.allocateUninitializedArray(minLength); + } + + /** + * @return whether the specified buffer has a nonzero ref count + */ + public static boolean isAccessible(ByteBuf buffer) { + return buffer.isAccessible(); + } + /** - * Returns a hex dump + * @throws IllegalReferenceCountException if the buffer has a zero ref count + * @return the passed in buffer + */ + public static ByteBuf ensureAccessible(ByteBuf buffer) { + if (!buffer.isAccessible()) { + throw new IllegalReferenceCountException(buffer.refCnt()); + } + return buffer; + } + + /** + * Returns a hex dump * of the specified buffer's readable bytes. */ public static String hexDump(ByteBuf buffer) { @@ -101,7 +135,7 @@ public static String hexDump(ByteBuf buffer) { } /** - * Returns a hex dump + * Returns a hex dump * of the specified buffer's sub-region. */ public static String hexDump(ByteBuf buffer, int fromIndex, int length) { @@ -109,7 +143,7 @@ public static String hexDump(ByteBuf buffer, int fromIndex, int length) { } /** - * Returns a hex dump + * Returns a hex dump * of the specified byte array. */ public static String hexDump(byte[] array) { @@ -117,7 +151,7 @@ public static String hexDump(byte[] array) { } /** - * Returns a hex dump + * Returns a hex dump * of the specified byte array's sub-region. */ public static String hexDump(byte[] array, int fromIndex, int length) { @@ -165,7 +199,7 @@ public static int hashCode(ByteBuf buffer) { final int intCount = aLen >>> 2; final int byteCount = aLen & 3; - int hashCode = 1; + int hashCode = EmptyByteBuf.EMPTY_BYTE_BUF_HASH_CODE; int arrayIndex = buffer.readerIndex(); if (buffer.order() == ByteOrder.BIG_ENDIAN) { for (int i = intCount; i > 0; i --) { @@ -192,20 +226,125 @@ public static int hashCode(ByteBuf buffer) { /** * Returns the reader index of needle in haystack, or -1 if needle is not in haystack. + * This method uses the Two-Way + * string matching algorithm, which yields O(1) space complexity and excellent performance. */ public static int indexOf(ByteBuf needle, ByteBuf haystack) { - // TODO: maybe use Boyer Moore for efficiency. - int attempts = haystack.readableBytes() - needle.readableBytes() + 1; - for (int i = 0; i < attempts; i++) { - if (equals(needle, needle.readerIndex(), - haystack, haystack.readerIndex() + i, - needle.readableBytes())) { - return haystack.readerIndex() + i; + if (haystack == null || needle == null) { + return -1; + } + + if (needle.readableBytes() > haystack.readableBytes()) { + return -1; + } + + int n = haystack.readableBytes(); + int m = needle.readableBytes(); + if (m == 0) { + return 0; + } + + // When the needle has only one byte that can be read, + // the firstIndexOf method needs to be called + if (m == 1) { + return firstIndexOf((AbstractByteBuf) haystack, haystack.readerIndex(), + haystack.writerIndex(), needle.getByte(needle.readerIndex())); + } + + int i; + int j = 0; + int aStartIndex = needle.readerIndex(); + int bStartIndex = haystack.readerIndex(); + long suffixes = maxSuf(needle, m, aStartIndex, true); + long prefixes = maxSuf(needle, m, aStartIndex, false); + int ell = Math.max((int) (suffixes >> 32), (int) (prefixes >> 32)); + int per = Math.max((int) suffixes, (int) prefixes); + int memory; + int length = Math.min(m - per, ell + 1); + + if (equals(needle, aStartIndex, needle, aStartIndex + per, length)) { + memory = -1; + while (j <= n - m) { + i = Math.max(ell, memory) + 1; + while (i < m && needle.getByte(i + aStartIndex) == haystack.getByte(i + j + bStartIndex)) { + ++i; + } + if (i > n) { + return -1; + } + if (i >= m) { + i = ell; + while (i > memory && needle.getByte(i + aStartIndex) == haystack.getByte(i + j + bStartIndex)) { + --i; + } + if (i <= memory) { + return j; + } + j += per; + memory = m - per - 1; + } else { + j += i - ell; + memory = -1; + } + } + } else { + per = Math.max(ell + 1, m - ell - 1) + 1; + while (j <= n - m) { + i = ell + 1; + while (i < m && needle.getByte(i + aStartIndex) == haystack.getByte(i + j + bStartIndex)) { + ++i; + } + if (i > n) { + return -1; + } + if (i >= m) { + i = ell; + while (i >= 0 && needle.getByte(i + aStartIndex) == haystack.getByte(i + j + bStartIndex)) { + --i; + } + if (i < 0) { + return j; + } + j += per; + } else { + j += i - ell; + } } } return -1; } + private static long maxSuf(ByteBuf x, int m, int start, boolean isSuffix) { + int p = 1; + int ms = -1; + int j = start; + int k = 1; + byte a; + byte b; + while (j + k < m) { + a = x.getByte(j + k); + b = x.getByte(ms + k); + boolean suffix = isSuffix ? a < b : a > b; + if (suffix) { + j += k; + k = 1; + p = j - ms; + } else if (a == b) { + if (k != p) { + ++k; + } else { + j += p; + k = 1; + } + } else { + ms = j; + j = ms + 1; + k = p = 1; + } + } + return ((long) ms << 32) + p; + } + /** * Returns {@code true} if and only if the two specified buffers are * identical to each other for {@code length} bytes starting at {@code aStartIndex} @@ -215,9 +354,13 @@ public static int indexOf(ByteBuf needle, ByteBuf haystack) { * {@code a[aStartIndex : aStartIndex + length] == b[bStartIndex : bStartIndex + length]} */ public static boolean equals(ByteBuf a, int aStartIndex, ByteBuf b, int bStartIndex, int length) { - if (aStartIndex < 0 || bStartIndex < 0 || length < 0) { - throw new IllegalArgumentException("All indexes and lengths must be non-negative"); - } + requireNonNull(a, "a"); + requireNonNull(b, "b"); + // All indexes and lengths must be non-negative + checkPositiveOrZero(aStartIndex, "aStartIndex"); + checkPositiveOrZero(bStartIndex, "bStartIndex"); + checkPositiveOrZero(length, "length"); + if (a.writerIndex() - length < aStartIndex || b.writerIndex() - length < bStartIndex) { return false; } @@ -260,6 +403,9 @@ public static boolean equals(ByteBuf a, int aStartIndex, ByteBuf b, int bStartIn * This method is useful when implementing a new buffer type. */ public static boolean equals(ByteBuf bufferA, ByteBuf bufferB) { + if (bufferA == bufferB) { + return true; + } final int aLen = bufferA.readableBytes(); if (aLen != bufferB.readableBytes()) { return false; @@ -272,6 +418,9 @@ public static boolean equals(ByteBuf bufferA, ByteBuf bufferB) { * This method is useful when implementing a new buffer type. */ public static int compare(ByteBuf bufferA, ByteBuf bufferB) { + if (bufferA == bufferB) { + return 0; + } final int aLen = bufferA.readableBytes(); final int bLen = bufferB.readableBytes(); final int minLength = Math.min(aLen, bLen); @@ -354,16 +503,124 @@ private static long compareUintBigEndianB( return 0; } + private static final class SWARByteSearch { + + private static long compilePattern(byte byteToFind) { + return (byteToFind & 0xFFL) * 0x101010101010101L; + } + + private static int firstAnyPattern(long word, long pattern, boolean leading) { + long input = word ^ pattern; + long tmp = (input & 0x7F7F7F7F7F7F7F7FL) + 0x7F7F7F7F7F7F7F7FL; + tmp = ~(tmp | input | 0x7F7F7F7F7F7F7F7FL); + final int binaryPosition = leading? Long.numberOfLeadingZeros(tmp) : Long.numberOfTrailingZeros(tmp); + return binaryPosition >>> 3; + } + } + + private static int unrolledFirstIndexOf(AbstractByteBuf buffer, int fromIndex, int byteCount, byte value) { + assert byteCount > 0 && byteCount < 8; + if (buffer._getByte(fromIndex) == value) { + return fromIndex; + } + if (byteCount == 1) { + return -1; + } + if (buffer._getByte(fromIndex + 1) == value) { + return fromIndex + 1; + } + if (byteCount == 2) { + return -1; + } + if (buffer._getByte(fromIndex + 2) == value) { + return fromIndex + 2; + } + if (byteCount == 3) { + return -1; + } + if (buffer._getByte(fromIndex + 3) == value) { + return fromIndex + 3; + } + if (byteCount == 4) { + return -1; + } + if (buffer._getByte(fromIndex + 4) == value) { + return fromIndex + 4; + } + if (byteCount == 5) { + return -1; + } + if (buffer._getByte(fromIndex + 5) == value) { + return fromIndex + 5; + } + if (byteCount == 6) { + return -1; + } + if (buffer._getByte(fromIndex + 6) == value) { + return fromIndex + 6; + } + return -1; + } + + /** + * This is using a SWAR (SIMD Within A Register) batch read technique to minimize bound-checks and improve memory + * usage while searching for {@code value}. + */ + static int firstIndexOf(AbstractByteBuf buffer, int fromIndex, int toIndex, byte value) { + fromIndex = Math.max(fromIndex, 0); + if (fromIndex >= toIndex || buffer.capacity() == 0) { + return -1; + } + final int length = toIndex - fromIndex; + buffer.checkIndex(fromIndex, length); + if (!PlatformDependent.isUnaligned()) { + return linearFirstIndexOf(buffer, fromIndex, toIndex, value); + } + assert PlatformDependent.isUnaligned(); + int offset = fromIndex; + final int byteCount = length & 7; + if (byteCount > 0) { + final int index = unrolledFirstIndexOf(buffer, fromIndex, byteCount, value); + if (index != -1) { + return index; + } + offset += byteCount; + if (offset == toIndex) { + return -1; + } + } + final int longCount = length >>> 3; + final ByteOrder nativeOrder = ByteOrder.nativeOrder(); + final boolean isNative = nativeOrder == buffer.order(); + final boolean useLE = nativeOrder == ByteOrder.LITTLE_ENDIAN; + final long pattern = SWARByteSearch.compilePattern(value); + for (int i = 0; i < longCount; i++) { + // use the faster available getLong + final long word = useLE? buffer._getLongLE(offset) : buffer._getLong(offset); + int index = SWARByteSearch.firstAnyPattern(word, pattern, isNative); + if (index < Long.BYTES) { + return offset + index; + } + offset += Long.BYTES; + } + return -1; + } + + private static int linearFirstIndexOf(AbstractByteBuf buffer, int fromIndex, int toIndex, byte value) { + for (int i = fromIndex; i < toIndex; i++) { + if (buffer._getByte(i) == value) { + return i; + } + } + return -1; + } + /** * The default implementation of {@link ByteBuf#indexOf(int, int, byte)}. * This method is useful when implementing a new buffer type. */ public static int indexOf(ByteBuf buffer, int fromIndex, int toIndex, byte value) { - if (fromIndex <= toIndex) { - return firstIndexOf(buffer, fromIndex, toIndex, value); - } else { - return lastIndexOf(buffer, fromIndex, toIndex, value); - } + return buffer.indexOf(fromIndex, toIndex, value); } /** @@ -403,7 +660,8 @@ public static long swapLong(long value) { */ @SuppressWarnings("deprecation") public static ByteBuf writeShortBE(ByteBuf buf, int shortValue) { - return buf.order() == ByteOrder.BIG_ENDIAN? buf.writeShort(shortValue) : buf.writeShortLE(shortValue); + return buf.order() == ByteOrder.BIG_ENDIAN? buf.writeShort(shortValue) : + buf.writeShort(swapShort((short) shortValue)); } /** @@ -411,7 +669,8 @@ public static ByteBuf writeShortBE(ByteBuf buf, int shortValue) { */ @SuppressWarnings("deprecation") public static ByteBuf setShortBE(ByteBuf buf, int index, int shortValue) { - return buf.order() == ByteOrder.BIG_ENDIAN? buf.setShort(index, shortValue) : buf.setShortLE(index, shortValue); + return buf.order() == ByteOrder.BIG_ENDIAN? buf.setShort(index, shortValue) : + buf.setShort(index, swapShort((short) shortValue)); } /** @@ -419,7 +678,8 @@ public static ByteBuf setShortBE(ByteBuf buf, int index, int shortValue) { */ @SuppressWarnings("deprecation") public static ByteBuf writeMediumBE(ByteBuf buf, int mediumValue) { - return buf.order() == ByteOrder.BIG_ENDIAN? buf.writeMedium(mediumValue) : buf.writeMediumLE(mediumValue); + return buf.order() == ByteOrder.BIG_ENDIAN? buf.writeMedium(mediumValue) : + buf.writeMedium(swapMedium(mediumValue)); } /** @@ -439,30 +699,37 @@ public static ByteBuf readBytes(ByteBufAllocator alloc, ByteBuf buffer, int leng } } - private static int firstIndexOf(ByteBuf buffer, int fromIndex, int toIndex, byte value) { - fromIndex = Math.max(fromIndex, 0); - if (fromIndex >= toIndex || buffer.capacity() == 0) { + static int lastIndexOf(AbstractByteBuf buffer, int fromIndex, int toIndex, byte value) { + assert fromIndex > toIndex; + final int capacity = buffer.capacity(); + fromIndex = Math.min(fromIndex, capacity); + if (fromIndex < 0 || capacity == 0) { return -1; } + buffer.checkIndex(toIndex, fromIndex - toIndex); + for (int i = fromIndex - 1; i >= toIndex; i--) { + if (buffer._getByte(i) == value) { + return i; + } + } - return buffer.forEachByte(fromIndex, toIndex - fromIndex, new ByteProcessor.IndexOfProcessor(value)); + return -1; } - private static int lastIndexOf(ByteBuf buffer, int fromIndex, int toIndex, byte value) { - fromIndex = Math.min(fromIndex, buffer.capacity()); - if (fromIndex < 0 || buffer.capacity() == 0) { - return -1; + private static CharSequence checkCharSequenceBounds(CharSequence seq, int start, int end) { + if (MathUtil.isOutOfBounds(start, end - start, seq.length())) { + throw new IndexOutOfBoundsException("expected: 0 <= start(" + start + ") <= end (" + end + + ") <= seq.length(" + seq.length() + ')'); } - - return buffer.forEachByteDesc(toIndex, fromIndex - toIndex, new ByteProcessor.IndexOfProcessor(value)); + return seq; } /** - * Encode a {@link CharSequence} in UTF-8 and write + * Encode a {@link CharSequence} in UTF-8 and write * it to a {@link ByteBuf} allocated with {@code alloc}. * @param alloc The allocator used to allocate a new {@link ByteBuf}. * @param seq The characters to write into a buffer. - * @return The {@link ByteBuf} which contains the UTF-8 encoded + * @return The {@link ByteBuf} which contains the UTF-8 encoded * result. */ public static ByteBuf writeUtf8(ByteBufAllocator alloc, CharSequence seq) { @@ -473,7 +740,7 @@ public static ByteBuf writeUtf8(ByteBufAllocator alloc, CharSequence seq) { } /** - * Encode a {@link CharSequence} in UTF-8 and write + * Encode a {@link CharSequence} in UTF-8 and write * it to a {@link ByteBuf}. *

    * It behaves like {@link #reserveAndWriteUtf8(ByteBuf, CharSequence, int)} with {@code reserveBytes} @@ -481,11 +748,21 @@ public static ByteBuf writeUtf8(ByteBufAllocator alloc, CharSequence seq) { * This method returns the actual number of bytes written. */ public static int writeUtf8(ByteBuf buf, CharSequence seq) { - return reserveAndWriteUtf8(buf, seq, utf8MaxBytes(seq)); + int seqLength = seq.length(); + return reserveAndWriteUtf8Seq(buf, seq, 0, seqLength, utf8MaxBytes(seqLength)); } /** - * Encode a {@link CharSequence} in UTF-8 and write + * Equivalent to {@link #writeUtf8(ByteBuf, CharSequence) writeUtf8(buf, seq.subSequence(start, end))} + * but avoids subsequence object allocation. + */ + public static int writeUtf8(ByteBuf buf, CharSequence seq, int start, int end) { + checkCharSequenceBounds(seq, start, end); + return reserveAndWriteUtf8Seq(buf, seq, start, end, utf8MaxBytes(end - start)); + } + + /** + * Encode a {@link CharSequence} in UTF-8 and write * it into {@code reserveBytes} of a {@link ByteBuf}. *

    * The {@code reserveBytes} must be computed (ie eagerly using {@link #utf8MaxBytes(CharSequence)} @@ -494,31 +771,152 @@ public static int writeUtf8(ByteBuf buf, CharSequence seq) { * This method returns the actual number of bytes written. */ public static int reserveAndWriteUtf8(ByteBuf buf, CharSequence seq, int reserveBytes) { + return reserveAndWriteUtf8Seq(buf, seq, 0, seq.length(), reserveBytes); + } + + /** + * Equivalent to {@link #reserveAndWriteUtf8(ByteBuf, CharSequence, int) + * reserveAndWriteUtf8(buf, seq.subSequence(start, end), reserveBytes)} but avoids + * subsequence object allocation if possible. + * + * @return actual number of bytes written + */ + public static int reserveAndWriteUtf8(ByteBuf buf, CharSequence seq, int start, int end, int reserveBytes) { + return reserveAndWriteUtf8Seq(buf, checkCharSequenceBounds(seq, start, end), start, end, reserveBytes); + } + + private static int reserveAndWriteUtf8Seq(ByteBuf buf, CharSequence seq, int start, int end, int reserveBytes) { for (;;) { - if (buf instanceof AbstractByteBuf) { + if (buf instanceof WrappedCompositeByteBuf) { + // WrappedCompositeByteBuf is a sub-class of AbstractByteBuf so it needs special handling. + buf = buf.unwrap(); + } else if (buf instanceof AbstractByteBuf) { AbstractByteBuf byteBuf = (AbstractByteBuf) buf; byteBuf.ensureWritable0(reserveBytes); - int written = writeUtf8(byteBuf, byteBuf.writerIndex, seq, seq.length()); + int written = writeUtf8(byteBuf, byteBuf.writerIndex, reserveBytes, seq, start, end); byteBuf.writerIndex += written; return written; } else if (buf instanceof WrappedByteBuf) { // Unwrap as the wrapped buffer may be an AbstractByteBuf and so we can use fast-path. buf = buf.unwrap(); } else { - byte[] bytes = seq.toString().getBytes(CharsetUtil.UTF_8); + byte[] bytes = seq.subSequence(start, end).toString().getBytes(CharsetUtil.UTF_8); buf.writeBytes(bytes); return bytes.length; } } } + static int writeUtf8(AbstractByteBuf buffer, int writerIndex, int reservedBytes, CharSequence seq, int len) { + return writeUtf8(buffer, writerIndex, reservedBytes, seq, 0, len); + } + // Fast-Path implementation - static int writeUtf8(AbstractByteBuf buffer, int writerIndex, CharSequence seq, int len) { + static int writeUtf8(AbstractByteBuf buffer, int writerIndex, int reservedBytes, + CharSequence seq, int start, int end) { + if (seq instanceof AsciiString) { + writeAsciiString(buffer, writerIndex, (AsciiString) seq, start, end); + return end - start; + } + if (PlatformDependent.hasUnsafe()) { + if (buffer.hasArray()) { + return unsafeWriteUtf8(buffer.array(), PlatformDependent.byteArrayBaseOffset(), + buffer.arrayOffset() + writerIndex, seq, start, end); + } + if (buffer.hasMemoryAddress()) { + return unsafeWriteUtf8(null, buffer.memoryAddress(), writerIndex, seq, start, end); + } + } else { + if (buffer.hasArray()) { + return safeArrayWriteUtf8(buffer.array(), buffer.arrayOffset() + writerIndex, seq, start, end); + } + if (buffer.isDirect()) { + assert buffer.nioBufferCount() == 1; + final ByteBuffer internalDirectBuffer = buffer.internalNioBuffer(writerIndex, reservedBytes); + final int bufferPosition = internalDirectBuffer.position(); + return safeDirectWriteUtf8(internalDirectBuffer, bufferPosition, seq, start, end); + } + } + return safeWriteUtf8(buffer, writerIndex, seq, start, end); + } + + // AsciiString Fast-Path implementation - no explicit bound-checks + static void writeAsciiString(AbstractByteBuf buffer, int writerIndex, AsciiString seq, int start, int end) { + final int begin = seq.arrayOffset() + start; + final int length = end - start; + if (PlatformDependent.hasUnsafe()) { + if (buffer.hasArray()) { + PlatformDependent.copyMemory(seq.array(), begin, + buffer.array(), buffer.arrayOffset() + writerIndex, length); + return; + } + if (buffer.hasMemoryAddress()) { + PlatformDependent.copyMemory(seq.array(), begin, buffer.memoryAddress() + writerIndex, length); + return; + } + } + if (buffer.hasArray()) { + System.arraycopy(seq.array(), begin, buffer.array(), buffer.arrayOffset() + writerIndex, length); + return; + } + buffer.setBytes(writerIndex, seq.array(), begin, length); + } + + // Safe off-heap Fast-Path implementation + private static int safeDirectWriteUtf8(ByteBuffer buffer, int writerIndex, CharSequence seq, int start, int end) { + assert !(seq instanceof AsciiString); int oldWriterIndex = writerIndex; // We can use the _set methods as these not need to do any index checks and reference checks. // This is possible as we called ensureWritable(...) before. - for (int i = 0; i < len; i++) { + for (int i = start; i < end; i++) { + char c = seq.charAt(i); + if (c < 0x80) { + buffer.put(writerIndex++, (byte) c); + } else if (c < 0x800) { + buffer.put(writerIndex++, (byte) (0xc0 | (c >> 6))); + buffer.put(writerIndex++, (byte) (0x80 | (c & 0x3f))); + } else if (isSurrogate(c)) { + if (!Character.isHighSurrogate(c)) { + buffer.put(writerIndex++, WRITE_UTF_UNKNOWN); + continue; + } + // Surrogate Pair consumes 2 characters. + if (++i == end) { + buffer.put(writerIndex++, WRITE_UTF_UNKNOWN); + break; + } + // Extra method is copied here to NOT allow inlining of writeUtf8 + // and increase the chance to inline CharSequence::charAt instead + char c2 = seq.charAt(i); + if (!Character.isLowSurrogate(c2)) { + buffer.put(writerIndex++, WRITE_UTF_UNKNOWN); + buffer.put(writerIndex++, Character.isHighSurrogate(c2)? WRITE_UTF_UNKNOWN : (byte) c2); + } else { + int codePoint = Character.toCodePoint(c, c2); + // See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G2630. + buffer.put(writerIndex++, (byte) (0xf0 | (codePoint >> 18))); + buffer.put(writerIndex++, (byte) (0x80 | ((codePoint >> 12) & 0x3f))); + buffer.put(writerIndex++, (byte) (0x80 | ((codePoint >> 6) & 0x3f))); + buffer.put(writerIndex++, (byte) (0x80 | (codePoint & 0x3f))); + } + } else { + buffer.put(writerIndex++, (byte) (0xe0 | (c >> 12))); + buffer.put(writerIndex++, (byte) (0x80 | ((c >> 6) & 0x3f))); + buffer.put(writerIndex++, (byte) (0x80 | (c & 0x3f))); + } + } + return writerIndex - oldWriterIndex; + } + + // Safe off-heap Fast-Path implementation + private static int safeWriteUtf8(AbstractByteBuf buffer, int writerIndex, CharSequence seq, int start, int end) { + assert !(seq instanceof AsciiString); + int oldWriterIndex = writerIndex; + + // We can use the _set methods as these not need to do any index checks and reference checks. + // This is possible as we called ensureWritable(...) before. + for (int i = start; i < end; i++) { char c = seq.charAt(i); if (c < 0x80) { buffer._setByte(writerIndex++, (byte) c); @@ -530,27 +928,25 @@ static int writeUtf8(AbstractByteBuf buffer, int writerIndex, CharSequence seq, buffer._setByte(writerIndex++, WRITE_UTF_UNKNOWN); continue; } - final char c2; - try { - // Surrogate Pair consumes 2 characters. Optimistically try to get the next character to avoid - // duplicate bounds checking with charAt. If an IndexOutOfBoundsException is thrown we will - // re-throw a more informative exception describing the problem. - c2 = seq.charAt(++i); - } catch (IndexOutOfBoundsException ignored) { + // Surrogate Pair consumes 2 characters. + if (++i == end) { buffer._setByte(writerIndex++, WRITE_UTF_UNKNOWN); break; } + // Extra method is copied here to NOT allow inlining of writeUtf8 + // and increase the chance to inline CharSequence::charAt instead + char c2 = seq.charAt(i); if (!Character.isLowSurrogate(c2)) { buffer._setByte(writerIndex++, WRITE_UTF_UNKNOWN); - buffer._setByte(writerIndex++, Character.isHighSurrogate(c2) ? WRITE_UTF_UNKNOWN : c2); - continue; + buffer._setByte(writerIndex++, Character.isHighSurrogate(c2)? WRITE_UTF_UNKNOWN : c2); + } else { + int codePoint = Character.toCodePoint(c, c2); + // See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G2630. + buffer._setByte(writerIndex++, (byte) (0xf0 | (codePoint >> 18))); + buffer._setByte(writerIndex++, (byte) (0x80 | ((codePoint >> 12) & 0x3f))); + buffer._setByte(writerIndex++, (byte) (0x80 | ((codePoint >> 6) & 0x3f))); + buffer._setByte(writerIndex++, (byte) (0x80 | (codePoint & 0x3f))); } - int codePoint = Character.toCodePoint(c, c2); - // See http://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G2630. - buffer._setByte(writerIndex++, (byte) (0xf0 | (codePoint >> 18))); - buffer._setByte(writerIndex++, (byte) (0x80 | ((codePoint >> 12) & 0x3f))); - buffer._setByte(writerIndex++, (byte) (0x80 | ((codePoint >> 6) & 0x3f))); - buffer._setByte(writerIndex++, (byte) (0x80 | (codePoint & 0x3f))); } else { buffer._setByte(writerIndex++, (byte) (0xe0 | (c >> 12))); buffer._setByte(writerIndex++, (byte) (0x80 | ((c >> 6) & 0x3f))); @@ -560,6 +956,96 @@ static int writeUtf8(AbstractByteBuf buffer, int writerIndex, CharSequence seq, return writerIndex - oldWriterIndex; } + // safe byte[] Fast-Path implementation + private static int safeArrayWriteUtf8(byte[] buffer, int writerIndex, CharSequence seq, int start, int end) { + int oldWriterIndex = writerIndex; + for (int i = start; i < end; i++) { + char c = seq.charAt(i); + if (c < 0x80) { + buffer[writerIndex++] = (byte) c; + } else if (c < 0x800) { + buffer[writerIndex++] = (byte) (0xc0 | (c >> 6)); + buffer[writerIndex++] = (byte) (0x80 | (c & 0x3f)); + } else if (isSurrogate(c)) { + if (!Character.isHighSurrogate(c)) { + buffer[writerIndex++] = WRITE_UTF_UNKNOWN; + continue; + } + // Surrogate Pair consumes 2 characters. + if (++i == end) { + buffer[writerIndex++] = WRITE_UTF_UNKNOWN; + break; + } + char c2 = seq.charAt(i); + // Extra method is copied here to NOT allow inlining of writeUtf8 + // and increase the chance to inline CharSequence::charAt instead + if (!Character.isLowSurrogate(c2)) { + buffer[writerIndex++] = WRITE_UTF_UNKNOWN; + buffer[writerIndex++] = (byte) (Character.isHighSurrogate(c2)? WRITE_UTF_UNKNOWN : c2); + } else { + int codePoint = Character.toCodePoint(c, c2); + // See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G2630. + buffer[writerIndex++] = (byte) (0xf0 | (codePoint >> 18)); + buffer[writerIndex++] = (byte) (0x80 | ((codePoint >> 12) & 0x3f)); + buffer[writerIndex++] = (byte) (0x80 | ((codePoint >> 6) & 0x3f)); + buffer[writerIndex++] = (byte) (0x80 | (codePoint & 0x3f)); + } + } else { + buffer[writerIndex++] = (byte) (0xe0 | (c >> 12)); + buffer[writerIndex++] = (byte) (0x80 | ((c >> 6) & 0x3f)); + buffer[writerIndex++] = (byte) (0x80 | (c & 0x3f)); + } + } + return writerIndex - oldWriterIndex; + } + + // unsafe Fast-Path implementation + private static int unsafeWriteUtf8(byte[] buffer, long memoryOffset, int writerIndex, + CharSequence seq, int start, int end) { + assert !(seq instanceof AsciiString); + long writerOffset = memoryOffset + writerIndex; + final long oldWriterOffset = writerOffset; + for (int i = start; i < end; i++) { + char c = seq.charAt(i); + if (c < 0x80) { + PlatformDependent.putByte(buffer, writerOffset++, (byte) c); + } else if (c < 0x800) { + PlatformDependent.putByte(buffer, writerOffset++, (byte) (0xc0 | (c >> 6))); + PlatformDependent.putByte(buffer, writerOffset++, (byte) (0x80 | (c & 0x3f))); + } else if (isSurrogate(c)) { + if (!Character.isHighSurrogate(c)) { + PlatformDependent.putByte(buffer, writerOffset++, WRITE_UTF_UNKNOWN); + continue; + } + // Surrogate Pair consumes 2 characters. + if (++i == end) { + PlatformDependent.putByte(buffer, writerOffset++, WRITE_UTF_UNKNOWN); + break; + } + char c2 = seq.charAt(i); + // Extra method is copied here to NOT allow inlining of writeUtf8 + // and increase the chance to inline CharSequence::charAt instead + if (!Character.isLowSurrogate(c2)) { + PlatformDependent.putByte(buffer, writerOffset++, WRITE_UTF_UNKNOWN); + PlatformDependent.putByte(buffer, writerOffset++, + (byte) (Character.isHighSurrogate(c2)? WRITE_UTF_UNKNOWN : c2)); + } else { + int codePoint = Character.toCodePoint(c, c2); + // See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G2630. + PlatformDependent.putByte(buffer, writerOffset++, (byte) (0xf0 | (codePoint >> 18))); + PlatformDependent.putByte(buffer, writerOffset++, (byte) (0x80 | ((codePoint >> 12) & 0x3f))); + PlatformDependent.putByte(buffer, writerOffset++, (byte) (0x80 | ((codePoint >> 6) & 0x3f))); + PlatformDependent.putByte(buffer, writerOffset++, (byte) (0x80 | (codePoint & 0x3f))); + } + } else { + PlatformDependent.putByte(buffer, writerOffset++, (byte) (0xe0 | (c >> 12))); + PlatformDependent.putByte(buffer, writerOffset++, (byte) (0x80 | ((c >> 6) & 0x3f))); + PlatformDependent.putByte(buffer, writerOffset++, (byte) (0x80 | (c & 0x3f))); + } + } + return (int) (writerOffset - oldWriterOffset); + } + /** * Returns max bytes length of UTF8 character sequence of the given length. */ @@ -582,22 +1068,35 @@ public static int utf8MaxBytes(CharSequence seq) { * This method is producing the exact length according to {@link #writeUtf8(ByteBuf, CharSequence)}. */ public static int utf8Bytes(final CharSequence seq) { + return utf8ByteCount(seq, 0, seq.length()); + } + + /** + * Equivalent to {@link #utf8Bytes(CharSequence) utf8Bytes(seq.subSequence(start, end))} + * but avoids subsequence object allocation. + *

    + * This method is producing the exact length according to {@link #writeUtf8(ByteBuf, CharSequence, int, int)}. + */ + public static int utf8Bytes(final CharSequence seq, int start, int end) { + return utf8ByteCount(checkCharSequenceBounds(seq, start, end), start, end); + } + + private static int utf8ByteCount(final CharSequence seq, int start, int end) { if (seq instanceof AsciiString) { - return seq.length(); + return end - start; } - int seqLength = seq.length(); - int i = 0; + int i = start; // ASCII fast path - while (i < seqLength && seq.charAt(i) < 0x80) { + while (i < end && seq.charAt(i) < 0x80) { ++i; } // !ASCII is packed in a separate method to let the ASCII case be smaller - return i < seqLength ? i + utf8Bytes(seq, i, seqLength) : i; + return i < end ? (i - start) + utf8BytesNonAscii(seq, i, end) : i - start; } - private static int utf8Bytes(final CharSequence seq, final int start, final int length) { + private static int utf8BytesNonAscii(final CharSequence seq, final int start, final int end) { int encodedLength = 0; - for (int i = start; i < length; i++) { + for (int i = start; i < end; i++) { final char c = seq.charAt(i); // making it 100% branchless isn't rewarding due to the many bit operations necessary! if (c < 0x800) { @@ -609,22 +1108,18 @@ private static int utf8Bytes(final CharSequence seq, final int start, final int // WRITE_UTF_UNKNOWN continue; } - final char c2; - try { - // Surrogate Pair consumes 2 characters. Optimistically try to get the next character to avoid - // duplicate bounds checking with charAt. - c2 = seq.charAt(++i); - } catch (IndexOutOfBoundsException ignored) { + // Surrogate Pair consumes 2 characters. + if (++i == end) { encodedLength++; // WRITE_UTF_UNKNOWN break; } - if (!Character.isLowSurrogate(c2)) { + if (!Character.isLowSurrogate(seq.charAt(i))) { // WRITE_UTF_UNKNOWN + (Character.isHighSurrogate(c2) ? WRITE_UTF_UNKNOWN : c2) encodedLength += 2; continue; } - // See http://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G2630. + // See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G2630. encodedLength += 4; } else { encodedLength += 3; @@ -634,11 +1129,11 @@ private static int utf8Bytes(final CharSequence seq, final int start, final int } /** - * Encode a {@link CharSequence} in ASCII and write + * Encode a {@link CharSequence} in ASCII and write * it to a {@link ByteBuf} allocated with {@code alloc}. * @param alloc The allocator used to allocate a new {@link ByteBuf}. * @param seq The characters to write into a buffer. - * @return The {@link ByteBuf} which contains the ASCII encoded + * @return The {@link ByteBuf} which contains the ASCII encoded * result. */ public static ByteBuf writeAscii(ByteBufAllocator alloc, CharSequence seq) { @@ -649,36 +1144,38 @@ public static ByteBuf writeAscii(ByteBufAllocator alloc, CharSequence seq) { } /** - * Encode a {@link CharSequence} in ASCII and write it + * Encode a {@link CharSequence} in ASCII and write it * to a {@link ByteBuf}. * * This method returns the actual number of bytes written. */ public static int writeAscii(ByteBuf buf, CharSequence seq) { // ASCII uses 1 byte per char - final int len = seq.length(); - if (seq instanceof AsciiString) { - AsciiString asciiString = (AsciiString) seq; - buf.writeBytes(asciiString.array(), asciiString.arrayOffset(), len); - } else { - for (;;) { - if (buf instanceof AbstractByteBuf) { - AbstractByteBuf byteBuf = (AbstractByteBuf) buf; - byteBuf.ensureWritable0(len); - int written = writeAscii(byteBuf, byteBuf.writerIndex, seq, len); - byteBuf.writerIndex += written; - return written; - } else if (buf instanceof WrappedByteBuf) { - // Unwrap as the wrapped buffer may be an AbstractByteBuf and so we can use fast-path. - buf = buf.unwrap(); + for (;;) { + if (buf instanceof WrappedCompositeByteBuf) { + // WrappedCompositeByteBuf is a sub-class of AbstractByteBuf so it needs special handling. + buf = buf.unwrap(); + } else if (buf instanceof AbstractByteBuf) { + final int len = seq.length(); + AbstractByteBuf byteBuf = (AbstractByteBuf) buf; + byteBuf.ensureWritable0(len); + if (seq instanceof AsciiString) { + writeAsciiString(byteBuf, byteBuf.writerIndex, (AsciiString) seq, 0, len); } else { - byte[] bytes = seq.toString().getBytes(CharsetUtil.US_ASCII); - buf.writeBytes(bytes); - return bytes.length; + final int written = writeAscii(byteBuf, byteBuf.writerIndex, seq, len); + assert written == len; } + byteBuf.writerIndex += len; + return len; + } else if (buf instanceof WrappedByteBuf) { + // Unwrap as the wrapped buffer may be an AbstractByteBuf and so we can use fast-path. + buf = buf.unwrap(); + } else { + byte[] bytes = seq.toString().getBytes(CharsetUtil.US_ASCII); + buf.writeBytes(bytes); + return bytes.length; } } - return len; } // Fast-Path implementation @@ -747,52 +1244,27 @@ static ByteBuf encodeString0(ByteBufAllocator alloc, boolean enforceHeap, CharBu } } + @SuppressWarnings("deprecation") static String decodeString(ByteBuf src, int readerIndex, int len, Charset charset) { if (len == 0) { return StringUtil.EMPTY_STRING; } - final CharsetDecoder decoder = CharsetUtil.decoder(charset); - final int maxLength = (int) ((double) len * decoder.maxCharsPerByte()); - CharBuffer dst = CHAR_BUFFERS.get(); - if (dst.length() < maxLength) { - dst = CharBuffer.allocate(maxLength); - if (maxLength <= MAX_CHAR_BUFFER_SIZE) { - CHAR_BUFFERS.set(dst); - } - } else { - dst.clear(); - } - if (src.nioBufferCount() == 1) { - decodeString(decoder, src.nioBuffer(readerIndex, len), dst); + final byte[] array; + final int offset; + + if (src.hasArray()) { + array = src.array(); + offset = src.arrayOffset() + readerIndex; } else { - // We use a heap buffer as CharsetDecoder is most likely able to use a fast-path if src and dst buffers - // are both backed by a byte array. - ByteBuf buffer = src.alloc().heapBuffer(len); - try { - buffer.writeBytes(src, readerIndex, len); - // Use internalNioBuffer(...) to reduce object creation. - decodeString(decoder, buffer.internalNioBuffer(buffer.readerIndex(), len), dst); - } finally { - // Release the temporary buffer again. - buffer.release(); - } + array = threadLocalTempArray(len); + offset = 0; + src.getBytes(readerIndex, array, 0, len); } - return dst.flip().toString(); - } - - private static void decodeString(CharsetDecoder decoder, ByteBuffer src, CharBuffer dst) { - try { - CoderResult cr = decoder.decode(src, dst, true); - if (!cr.isUnderflow()) { - cr.throwException(); - } - cr = decoder.flush(dst); - if (!cr.isUnderflow()) { - cr.throwException(); - } - } catch (CharacterCodingException x) { - throw new IllegalStateException(x); + if (CharsetUtil.US_ASCII.equals(charset)) { + // Fast-path for US-ASCII which is used frequently. + return new String(array, 0, offset, len); } + return new String(array, offset, len, charset); } /** @@ -835,23 +1307,25 @@ public static byte[] getBytes(ByteBuf buf, int start, int length) { * If {@code copy} is false the underlying storage will be shared, if possible. */ public static byte[] getBytes(ByteBuf buf, int start, int length, boolean copy) { - if (isOutOfBounds(start, length, buf.capacity())) { + int capacity = buf.capacity(); + if (isOutOfBounds(start, length, capacity)) { throw new IndexOutOfBoundsException("expected: " + "0 <= start(" + start + ") <= start + length(" + length - + ") <= " + "buf.capacity(" + buf.capacity() + ')'); + + ") <= " + "buf.capacity(" + capacity + ')'); } if (buf.hasArray()) { - if (copy || start != 0 || length != buf.capacity()) { - int baseOffset = buf.arrayOffset() + start; - return Arrays.copyOfRange(buf.array(), baseOffset, baseOffset + length); + int baseOffset = buf.arrayOffset() + start; + byte[] bytes = buf.array(); + if (copy || baseOffset != 0 || length != bytes.length) { + return Arrays.copyOfRange(bytes, baseOffset, baseOffset + length); } else { - return buf.array(); + return bytes; } } - byte[] v = new byte[length]; - buf.getBytes(start, v); - return v; + byte[] bytes = PlatformDependent.allocateUninitializedArray(length); + buf.getBytes(start, bytes); + return bytes; } /** @@ -881,7 +1355,7 @@ public static void copy(AsciiString src, int srcIdx, ByteBuf dst, int dstIdx, in + length + ") <= srcLen(" + src.length() + ')'); } - checkNotNull(dst, "dst").setBytes(dstIdx, src.array(), srcIdx + src.arrayOffset(), length); + requireNonNull(dst, "dst").setBytes(dstIdx, src.array(), srcIdx + src.arrayOffset(), length); } /** @@ -898,7 +1372,7 @@ public static void copy(AsciiString src, int srcIdx, ByteBuf dst, int length) { + length + ") <= srcLen(" + src.length() + ')'); } - checkNotNull(dst, "dst").writeBytes(src.array(), srcIdx + src.arrayOffset(), length); + requireNonNull(dst, "dst").writeBytes(src.array(), srcIdx + src.arrayOffset(), length); } /** @@ -998,9 +1472,7 @@ private static final class HexUtil { } private static String hexDump(ByteBuf buffer, int fromIndex, int length) { - if (length < 0) { - throw new IllegalArgumentException("length: " + length); - } + checkPositiveOrZero(length, "length"); if (length == 0) { return ""; } @@ -1020,9 +1492,7 @@ private static String hexDump(ByteBuf buffer, int fromIndex, int length) { } private static String hexDump(byte[] array, int fromIndex, int length) { - if (length < 0) { - throw new IllegalArgumentException("length: " + length); - } + checkPositiveOrZero(length, "length"); if (length == 0) { return ""; } @@ -1045,7 +1515,7 @@ private static String prettyHexDump(ByteBuf buffer, int offset, int length) { if (length == 0) { return StringUtil.EMPTY_STRING; } else { - int rows = length / 16 + (length % 15 == 0? 0 : 1) + 4; + int rows = length / 16 + ((length & 15) == 0? 0 : 1) + 4; StringBuilder buf = new StringBuilder(rows * 80); appendPrettyHexDump(buf, buffer, offset, length); return buf.toString(); @@ -1066,13 +1536,12 @@ private static void appendPrettyHexDump(StringBuilder dump, ByteBuf buf, int off NEWLINE + " | 0 1 2 3 4 5 6 7 8 9 a b c d e f |" + NEWLINE + "+--------+-------------------------------------------------+----------------+"); - final int startIndex = offset; final int fullRows = length >>> 4; final int remainder = length & 0xF; // Dump the rows which have 16 bytes. for (int row = 0; row < fullRows; row ++) { - int rowStartIndex = (row << 4) + startIndex; + int rowStartIndex = (row << 4) + offset; // Per-row prefix. appendHexDumpRowPrefix(dump, row, rowStartIndex); @@ -1093,7 +1562,7 @@ private static void appendPrettyHexDump(StringBuilder dump, ByteBuf buf, int off // Dump the last row which has less than 16 bytes. if (remainder != 0) { - int rowStartIndex = (fullRows << 4) + startIndex; + int rowStartIndex = (fullRows << 4) + offset; appendHexDumpRowPrefix(dump, fullRows, rowStartIndex); // Hex dump @@ -1130,17 +1599,12 @@ private static void appendHexDumpRowPrefix(StringBuilder dump, int row, int rowS static final class ThreadLocalUnsafeDirectByteBuf extends UnpooledUnsafeDirectByteBuf { - private static final Recycler RECYCLER = - new Recycler() { - @Override - protected ThreadLocalUnsafeDirectByteBuf newObject(Handle handle) { - return new ThreadLocalUnsafeDirectByteBuf(handle); - } - }; + private static final ObjectPool RECYCLER = + ObjectPool.newPool(ThreadLocalUnsafeDirectByteBuf::new); static ThreadLocalUnsafeDirectByteBuf newInstance() { ThreadLocalUnsafeDirectByteBuf buf = RECYCLER.get(); - buf.setRefCnt(1); + buf.resetRefCnt(); return buf; } @@ -1164,16 +1628,12 @@ protected void deallocate() { static final class ThreadLocalDirectByteBuf extends UnpooledDirectByteBuf { - private static final Recycler RECYCLER = new Recycler() { - @Override - protected ThreadLocalDirectByteBuf newObject(Handle handle) { - return new ThreadLocalDirectByteBuf(handle); - } - }; + private static final ObjectPool RECYCLER = ObjectPool.newPool( + ThreadLocalDirectByteBuf::new); static ThreadLocalDirectByteBuf newInstance() { ThreadLocalDirectByteBuf buf = RECYCLER.get(); - buf.setRefCnt(1); + buf.resetRefCnt(); return buf; } @@ -1218,8 +1678,8 @@ public static boolean isText(ByteBuf buf, Charset charset) { * @throws IndexOutOfBoundsException if {@code index} + {@code length} is greater than {@code buf.readableBytes} */ public static boolean isText(ByteBuf buf, int index, int length, Charset charset) { - checkNotNull(buf, "buf"); - checkNotNull(charset, "charset"); + requireNonNull(buf, "buf"); + requireNonNull(charset, "charset"); final int maxIndex = buf.readerIndex() + buf.readableBytes(); if (index < 0 || length < 0 || index > maxIndex - length) { throw new IndexOutOfBoundsException("index: " + index + " length: " + length); @@ -1252,12 +1712,7 @@ public static boolean isText(ByteBuf buf, int index, int length, Charset charset /** * Aborts on a byte which is not a valid ASCII character. */ - private static final ByteProcessor FIND_NON_ASCII = new ByteProcessor() { - @Override - public boolean process(byte value) { - return value >= 0; - } - }; + private static final ByteProcessor FIND_NON_ASCII = value -> value >= 0; /** * Returns {@code true} if the specified {@link ByteBuf} starting at {@code index} with {@code length} is valid @@ -1280,7 +1735,7 @@ private static boolean isAscii(ByteBuf buf, int index, int length) { * @param length The length of the specified buffer. * * @see - * UTF-8 Definition + * UTF-8 Definition * *

          * 1. Bytes format of UTF-8
    @@ -1392,5 +1847,43 @@ private static boolean isUtf8(ByteBuf buf, int index, int length) {
             return true;
         }
     
    +    /**
    +     * Read bytes from the given {@link ByteBuffer} into the given {@link OutputStream} using the {@code position} and
    +     * {@code length}. The position and limit of the given {@link ByteBuffer} may be adjusted.
    +     */
    +    static void readBytes(ByteBufAllocator allocator, ByteBuffer buffer, int position, int length, OutputStream out)
    +            throws IOException {
    +        if (buffer.hasArray()) {
    +            out.write(buffer.array(), position + buffer.arrayOffset(), length);
    +        } else {
    +            int chunkLen = Math.min(length, WRITE_CHUNK_SIZE);
    +            buffer.clear().position(position);
    +
    +            if (length <= MAX_TL_ARRAY_LEN || !allocator.isDirectBufferPooled()) {
    +                getBytes(buffer, threadLocalTempArray(chunkLen), 0, chunkLen, out, length);
    +            } else {
    +                // if direct buffers are pooled chances are good that heap buffers are pooled as well.
    +                ByteBuf tmpBuf = allocator.heapBuffer(chunkLen);
    +                try {
    +                    byte[] tmp = tmpBuf.array();
    +                    int offset = tmpBuf.arrayOffset();
    +                    getBytes(buffer, tmp, offset, chunkLen, out, length);
    +                } finally {
    +                    tmpBuf.release();
    +                }
    +            }
    +        }
    +    }
    +
    +    private static void getBytes(ByteBuffer inBuffer, byte[] in, int inOffset, int inLen, OutputStream out, int outLen)
    +            throws IOException {
    +        do {
    +            int len = Math.min(inLen, outLen);
    +            inBuffer.get(in, inOffset, len);
    +            out.write(in, inOffset, len);
    +            outLen -= len;
    +        } while (outLen > 0);
    +    }
    +
         private ByteBufUtil() { }
     }
    diff --git a/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java b/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java
    index 7afff7e7f13..8a32dd27ace 100644
    --- a/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java
    +++ b/buffer/src/main/java/io/netty/buffer/CompositeByteBuf.java
    @@ -5,7 +5,7 @@
      * version 2.0 (the "License"); you may not use this file except in compliance
      * with the License. You may obtain a copy of the License at:
      *
    - *   http://www.apache.org/licenses/LICENSE-2.0
    + *   https://www.apache.org/licenses/LICENSE-2.0
      *
      * Unless required by applicable law or agreed to in writing, software
      * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    @@ -15,7 +15,13 @@
      */
     package io.netty.buffer;
     
    +import static java.util.Objects.requireNonNull;
    +
    +import io.netty.util.ByteProcessor;
    +import io.netty.util.IllegalReferenceCountException;
    +import io.netty.util.ReferenceCountUtil;
     import io.netty.util.internal.EmptyArrays;
    +import io.netty.util.internal.RecyclableArrayList;
     
     import java.io.IOException;
     import java.io.InputStream;
    @@ -26,15 +32,14 @@
     import java.nio.channels.GatheringByteChannel;
     import java.nio.channels.ScatteringByteChannel;
     import java.util.ArrayList;
    +import java.util.Arrays;
     import java.util.Collection;
     import java.util.Collections;
     import java.util.ConcurrentModificationException;
     import java.util.Iterator;
     import java.util.List;
    -import java.util.ListIterator;
     import java.util.NoSuchElementException;
     
    -import static io.netty.util.internal.ObjectUtil.checkNotNull;
     
     /**
      * A virtual buffer which shows multiple buffers as a single merged buffer.  It is recommended to use
    @@ -48,70 +53,92 @@ public class CompositeByteBuf extends AbstractReferenceCountedByteBuf implements
     
         private final ByteBufAllocator alloc;
         private final boolean direct;
    -    private final ComponentList components;
         private final int maxNumComponents;
     
    +    private int componentCount;
    +    private Component[] components; // resized when needed
    +
         private boolean freed;
     
    -    public CompositeByteBuf(ByteBufAllocator alloc, boolean direct, int maxNumComponents) {
    +    private CompositeByteBuf(ByteBufAllocator alloc, boolean direct, int maxNumComponents, int initSize) {
             super(AbstractByteBufAllocator.DEFAULT_MAX_CAPACITY);
    -        if (alloc == null) {
    -            throw new NullPointerException("alloc");
    +        requireNonNull(alloc, "alloc");
    +        if (maxNumComponents < 1) {
    +            throw new IllegalArgumentException(
    +                    "maxNumComponents: " + maxNumComponents + " (expected: >= 1)");
             }
             this.alloc = alloc;
             this.direct = direct;
             this.maxNumComponents = maxNumComponents;
    -        components = newList(maxNumComponents);
    +        components = newCompArray(initSize, maxNumComponents);
    +    }
    +
    +    public CompositeByteBuf(ByteBufAllocator alloc, boolean direct, int maxNumComponents) {
    +        this(alloc, direct, maxNumComponents, 0);
         }
     
         public CompositeByteBuf(ByteBufAllocator alloc, boolean direct, int maxNumComponents, ByteBuf... buffers) {
    -        this(alloc, direct, maxNumComponents, buffers, 0, buffers.length);
    +        this(alloc, direct, maxNumComponents, buffers, 0);
         }
     
    -    CompositeByteBuf(
    -            ByteBufAllocator alloc, boolean direct, int maxNumComponents, ByteBuf[] buffers, int offset, int len) {
    -        super(AbstractByteBufAllocator.DEFAULT_MAX_CAPACITY);
    -        if (alloc == null) {
    -            throw new NullPointerException("alloc");
    -        }
    -        if (maxNumComponents < 2) {
    -            throw new IllegalArgumentException(
    -                    "maxNumComponents: " + maxNumComponents + " (expected: >= 2)");
    -        }
    +    CompositeByteBuf(ByteBufAllocator alloc, boolean direct, int maxNumComponents,
    +            ByteBuf[] buffers, int offset) {
    +        this(alloc, direct, maxNumComponents, buffers.length - offset);
     
    -        this.alloc = alloc;
    -        this.direct = direct;
    -        this.maxNumComponents = maxNumComponents;
    -        components = newList(maxNumComponents);
    -
    -        addComponents0(false, 0, buffers, offset, len);
    +        addComponents0(false, 0, buffers, offset);
             consolidateIfNeeded();
    -        setIndex(0, capacity());
    +        setIndex0(0, capacity());
         }
     
         public CompositeByteBuf(
                 ByteBufAllocator alloc, boolean direct, int maxNumComponents, Iterable buffers) {
    -        super(AbstractByteBufAllocator.DEFAULT_MAX_CAPACITY);
    -        if (alloc == null) {
    -            throw new NullPointerException("alloc");
    +        this(alloc, direct, maxNumComponents,
    +                buffers instanceof Collection ? ((Collection) buffers).size() : 0);
    +
    +        addComponents(false, 0, buffers);
    +        setIndex(0, capacity());
    +    }
    +
    +    // support passing arrays of other types instead of having to copy to a ByteBuf[] first
    +    interface ByteWrapper {
    +        ByteBuf wrap(T bytes);
    +        boolean isEmpty(T bytes);
    +    }
    +
    +    static final ByteWrapper BYTE_ARRAY_WRAPPER = new ByteWrapper() {
    +        @Override
    +        public ByteBuf wrap(byte[] bytes) {
    +            return Unpooled.wrappedBuffer(bytes);
             }
    -        if (maxNumComponents < 2) {
    -            throw new IllegalArgumentException(
    -                    "maxNumComponents: " + maxNumComponents + " (expected: >= 2)");
    +        @Override
    +        public boolean isEmpty(byte[] bytes) {
    +            return bytes.length == 0;
             }
    +    };
     
    -        this.alloc = alloc;
    -        this.direct = direct;
    -        this.maxNumComponents = maxNumComponents;
    -        components = newList(maxNumComponents);
    +    static final ByteWrapper BYTE_BUFFER_WRAPPER = new ByteWrapper() {
    +        @Override
    +        public ByteBuf wrap(ByteBuffer bytes) {
    +            return Unpooled.wrappedBuffer(bytes);
    +        }
    +        @Override
    +        public boolean isEmpty(ByteBuffer bytes) {
    +            return !bytes.hasRemaining();
    +        }
    +    };
    +
    +     CompositeByteBuf(ByteBufAllocator alloc, boolean direct, int maxNumComponents,
    +            ByteWrapper wrapper, T[] buffers, int offset) {
    +        this(alloc, direct, maxNumComponents, buffers.length - offset);
     
    -        addComponents0(false, 0, buffers);
    +        addComponents0(false, 0, wrapper, buffers, offset);
             consolidateIfNeeded();
             setIndex(0, capacity());
         }
     
    -    private static ComponentList newList(int maxNumComponents) {
    -        return new ComponentList(Math.min(AbstractByteBufAllocator.DEFAULT_MAX_COMPONENTS, maxNumComponents));
    +    private static Component[] newCompArray(int initComponents, int maxNumComponents) {
    +        int capacityGuess = Math.min(AbstractByteBufAllocator.DEFAULT_MAX_COMPONENTS, maxNumComponents);
    +        return new Component[Math.max(initComponents, capacityGuess)];
         }
     
         // Special constructor used by WrappedCompositeByteBuf
    @@ -129,8 +156,8 @@ private static ComponentList newList(int maxNumComponents) {
          * Be aware that this method does not increase the {@code writerIndex} of the {@link CompositeByteBuf}.
          * If you need to have it increased use {@link #addComponent(boolean, ByteBuf)}.
          * 

    - * {@link ByteBuf#release()} ownership of {@code buffer} is transfered to this {@link CompositeByteBuf}. - * @param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transfered to this + * {@link ByteBuf#release()} ownership of {@code buffer} is transferred to this {@link CompositeByteBuf}. + * @param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transferred to this * {@link CompositeByteBuf}. */ public CompositeByteBuf addComponent(ByteBuf buffer) { @@ -143,10 +170,10 @@ public CompositeByteBuf addComponent(ByteBuf buffer) { * Be aware that this method does not increase the {@code writerIndex} of the {@link CompositeByteBuf}. * If you need to have it increased use {@link #addComponents(boolean, ByteBuf[])}. *

    - * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transfered to this + * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this * {@link CompositeByteBuf}. * @param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all {@link ByteBuf#release()} - * ownership of all {@link ByteBuf} objects is transfered to this {@link CompositeByteBuf}. + * ownership of all {@link ByteBuf} objects is transferred to this {@link CompositeByteBuf}. */ public CompositeByteBuf addComponents(ByteBuf... buffers) { return addComponents(false, buffers); @@ -158,10 +185,10 @@ public CompositeByteBuf addComponents(ByteBuf... buffers) { * Be aware that this method does not increase the {@code writerIndex} of the {@link CompositeByteBuf}. * If you need to have it increased use {@link #addComponents(boolean, Iterable)}. *

    - * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transfered to this + * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this * {@link CompositeByteBuf}. * @param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all {@link ByteBuf#release()} - * ownership of all {@link ByteBuf} objects is transfered to this {@link CompositeByteBuf}. + * ownership of all {@link ByteBuf} objects is transferred to this {@link CompositeByteBuf}. */ public CompositeByteBuf addComponents(Iterable buffers) { return addComponents(false, buffers); @@ -173,9 +200,9 @@ public CompositeByteBuf addComponents(Iterable buffers) { * Be aware that this method does not increase the {@code writerIndex} of the {@link CompositeByteBuf}. * If you need to have it increased use {@link #addComponent(boolean, int, ByteBuf)}. *

    - * {@link ByteBuf#release()} ownership of {@code buffer} is transfered to this {@link CompositeByteBuf}. + * {@link ByteBuf#release()} ownership of {@code buffer} is transferred to this {@link CompositeByteBuf}. * @param cIndex the index on which the {@link ByteBuf} will be added. - * @param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transfered to this + * @param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transferred to this * {@link CompositeByteBuf}. */ public CompositeByteBuf addComponent(int cIndex, ByteBuf buffer) { @@ -186,28 +213,26 @@ public CompositeByteBuf addComponent(int cIndex, ByteBuf buffer) { * Add the given {@link ByteBuf} and increase the {@code writerIndex} if {@code increaseWriterIndex} is * {@code true}. * - * {@link ByteBuf#release()} ownership of {@code buffer} is transfered to this {@link CompositeByteBuf}. - * @param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transfered to this + * {@link ByteBuf#release()} ownership of {@code buffer} is transferred to this {@link CompositeByteBuf}. + * @param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transferred to this * {@link CompositeByteBuf}. */ public CompositeByteBuf addComponent(boolean increaseWriterIndex, ByteBuf buffer) { - checkNotNull(buffer, "buffer"); - addComponent0(increaseWriterIndex, components.size(), buffer); - consolidateIfNeeded(); - return this; + return addComponent(increaseWriterIndex, componentCount, buffer); } /** * Add the given {@link ByteBuf}s and increase the {@code writerIndex} if {@code increaseWriterIndex} is * {@code true}. * - * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transfered to this + * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this * {@link CompositeByteBuf}. * @param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all {@link ByteBuf#release()} - * ownership of all {@link ByteBuf} objects is transfered to this {@link CompositeByteBuf}. + * ownership of all {@link ByteBuf} objects is transferred to this {@link CompositeByteBuf}. */ public CompositeByteBuf addComponents(boolean increaseWriterIndex, ByteBuf... buffers) { - addComponents0(increaseWriterIndex, components.size(), buffers, 0, buffers.length); + requireNonNull(buffers, "buffers"); + addComponents0(increaseWriterIndex, componentCount, buffers, 0); consolidateIfNeeded(); return this; } @@ -216,33 +241,38 @@ public CompositeByteBuf addComponents(boolean increaseWriterIndex, ByteBuf... bu * Add the given {@link ByteBuf}s and increase the {@code writerIndex} if {@code increaseWriterIndex} is * {@code true}. * - * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transfered to this + * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this * {@link CompositeByteBuf}. * @param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all {@link ByteBuf#release()} - * ownership of all {@link ByteBuf} objects is transfered to this {@link CompositeByteBuf}. + * ownership of all {@link ByteBuf} objects is transferred to this {@link CompositeByteBuf}. */ public CompositeByteBuf addComponents(boolean increaseWriterIndex, Iterable buffers) { - addComponents0(increaseWriterIndex, components.size(), buffers); - consolidateIfNeeded(); - return this; + return addComponents(increaseWriterIndex, componentCount, buffers); } /** * Add the given {@link ByteBuf} on the specific index and increase the {@code writerIndex} * if {@code increaseWriterIndex} is {@code true}. * - * {@link ByteBuf#release()} ownership of {@code buffer} is transfered to this {@link CompositeByteBuf}. + * {@link ByteBuf#release()} ownership of {@code buffer} is transferred to this {@link CompositeByteBuf}. * @param cIndex the index on which the {@link ByteBuf} will be added. - * @param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transfered to this + * @param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transferred to this * {@link CompositeByteBuf}. */ public CompositeByteBuf addComponent(boolean increaseWriterIndex, int cIndex, ByteBuf buffer) { - checkNotNull(buffer, "buffer"); + requireNonNull(buffer, "buffer"); addComponent0(increaseWriterIndex, cIndex, buffer); consolidateIfNeeded(); return this; } + private static void checkForOverflow(int capacity, int readableBytes) { + if (capacity + readableBytes < 0) { + throw new IllegalArgumentException("Can't increase by " + readableBytes + " as capacity(" + capacity + ")" + + " would overflow " + Integer.MAX_VALUE); + } + } + /** * Precondition is that {@code buffer != null}. */ @@ -252,29 +282,23 @@ private int addComponent0(boolean increaseWriterIndex, int cIndex, ByteBuf buffe try { checkComponentIndex(cIndex); - int readableBytes = buffer.readableBytes(); - // No need to consolidate - just add a component to the list. - @SuppressWarnings("deprecation") - Component c = new Component(buffer.order(ByteOrder.BIG_ENDIAN).slice()); - if (cIndex == components.size()) { - wasAdded = components.add(c); - if (cIndex == 0) { - c.endOffset = readableBytes; - } else { - Component prev = components.get(cIndex - 1); - c.offset = prev.endOffset; - c.endOffset = c.offset + readableBytes; - } - } else { - components.add(cIndex, c); - wasAdded = true; - if (readableBytes != 0) { - updateComponentOffsets(cIndex); - } + Component c = newComponent(ensureAccessible(buffer), 0); + int readableBytes = c.length(); + + // Check if we would overflow. + // See https://github.com/netty/netty/issues/10194 + checkForOverflow(capacity(), readableBytes); + + addComp(cIndex, c); + wasAdded = true; + if (readableBytes > 0 && cIndex < componentCount - 1) { + updateComponentOffsets(cIndex); + } else if (cIndex > 0) { + c.reposition(components[cIndex - 1].endOffset); } if (increaseWriterIndex) { - writerIndex(writerIndex() + buffer.readableBytes()); + writerIndex += readableBytes; } return cIndex; } finally { @@ -284,59 +308,135 @@ private int addComponent0(boolean increaseWriterIndex, int cIndex, ByteBuf buffe } } + private static ByteBuf ensureAccessible(final ByteBuf buf) { + if (checkAccessible && !buf.isAccessible()) { + throw new IllegalReferenceCountException(0); + } + return buf; + } + + @SuppressWarnings("deprecation") + private Component newComponent(final ByteBuf buf, final int offset) { + final int srcIndex = buf.readerIndex(); + final int len = buf.readableBytes(); + + // unpeel any intermediate outer layers (UnreleasableByteBuf, LeakAwareByteBufs, SwappedByteBuf) + ByteBuf unwrapped = buf; + int unwrappedIndex = srcIndex; + while (unwrapped instanceof WrappedByteBuf || unwrapped instanceof SwappedByteBuf) { + unwrapped = unwrapped.unwrap(); + } + + // unwrap if already sliced + if (unwrapped instanceof AbstractUnpooledSlicedByteBuf) { + unwrappedIndex += ((AbstractUnpooledSlicedByteBuf) unwrapped).idx(0); + unwrapped = unwrapped.unwrap(); + } else if (unwrapped instanceof PooledSlicedByteBuf) { + unwrappedIndex += ((PooledSlicedByteBuf) unwrapped).adjustment; + unwrapped = unwrapped.unwrap(); + } else if (unwrapped instanceof DuplicatedByteBuf || unwrapped instanceof PooledDuplicatedByteBuf) { + unwrapped = unwrapped.unwrap(); + } + + // We don't need to slice later to expose the internal component if the readable range + // is already the entire buffer + final ByteBuf slice = buf.capacity() == len ? buf : null; + + return new Component(buf.order(ByteOrder.BIG_ENDIAN), srcIndex, + unwrapped.order(ByteOrder.BIG_ENDIAN), unwrappedIndex, offset, len, slice); + } + /** * Add the given {@link ByteBuf}s on the specific index *

    * Be aware that this method does not increase the {@code writerIndex} of the {@link CompositeByteBuf}. * If you need to have it increased you need to handle it by your own. *

    - * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transfered to this + * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this * {@link CompositeByteBuf}. * @param cIndex the index on which the {@link ByteBuf} will be added. {@link ByteBuf#release()} ownership of all - * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects is transfered to this + * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects is transferred to this * {@link CompositeByteBuf}. * @param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all {@link ByteBuf#release()} - * ownership of all {@link ByteBuf} objects is transfered to this {@link CompositeByteBuf}. + * ownership of all {@link ByteBuf} objects is transferred to this {@link CompositeByteBuf}. */ public CompositeByteBuf addComponents(int cIndex, ByteBuf... buffers) { - addComponents0(false, cIndex, buffers, 0, buffers.length); + requireNonNull(buffers, "buffers"); + addComponents0(false, cIndex, buffers, 0); consolidateIfNeeded(); return this; } - private int addComponents0(boolean increaseWriterIndex, int cIndex, ByteBuf[] buffers, int offset, int len) { - checkNotNull(buffers, "buffers"); - int i = offset; + private CompositeByteBuf addComponents0(boolean increaseWriterIndex, + final int cIndex, ByteBuf[] buffers, int arrOffset) { + final int len = buffers.length, count = len - arrOffset; + + int readableBytes = 0; + int capacity = capacity(); + for (int i = arrOffset; i < buffers.length; i++) { + ByteBuf b = buffers[i]; + if (b == null) { + break; + } + readableBytes += b.readableBytes(); + + // Check if we would overflow. + // See https://github.com/netty/netty/issues/10194 + checkForOverflow(capacity, readableBytes); + } + // only set ci after we've shifted so that finally block logic is always correct + int ci = Integer.MAX_VALUE; try { checkComponentIndex(cIndex); - - // No need for consolidation - while (i < len) { - // Increment i now to prepare for the next iteration and prevent a duplicate release (addComponent0 - // will release if an exception occurs, and we also release in the finally block here). - ByteBuf b = buffers[i++]; + shiftComps(cIndex, count); // will increase componentCount + int nextOffset = cIndex > 0 ? components[cIndex - 1].endOffset : 0; + for (ci = cIndex; arrOffset < len; arrOffset++, ci++) { + ByteBuf b = buffers[arrOffset]; if (b == null) { break; } - cIndex = addComponent0(increaseWriterIndex, cIndex, b) + 1; - int size = components.size(); - if (cIndex > size) { - cIndex = size; - } + Component c = newComponent(ensureAccessible(b), nextOffset); + components[ci] = c; + nextOffset = c.endOffset; } - return cIndex; + return this; } finally { - for (; i < len; ++i) { - ByteBuf b = buffers[i]; - if (b != null) { - try { - b.release(); - } catch (Throwable ignored) { - // ignore + // ci is now the index following the last successfully added component + if (ci < componentCount) { + if (ci < cIndex + count) { + // we bailed early + removeCompRange(ci, cIndex + count); + for (; arrOffset < len; ++arrOffset) { + ReferenceCountUtil.safeRelease(buffers[arrOffset]); } } + updateComponentOffsets(ci); // only need to do this here for components after the added ones + } + if (increaseWriterIndex && ci > cIndex && ci <= componentCount) { + writerIndex += components[ci - 1].endOffset - components[cIndex].offset; + } + } + } + + private int addComponents0(boolean increaseWriterIndex, int cIndex, + ByteWrapper wrapper, T[] buffers, int offset) { + checkComponentIndex(cIndex); + + // No need for consolidation + for (int i = offset, len = buffers.length; i < len; i++) { + T b = buffers[i]; + if (b == null) { + break; + } + if (!wrapper.isEmpty(b)) { + cIndex = addComponent0(increaseWriterIndex, cIndex, wrapper.wrap(b)) + 1; + int size = componentCount; + if (cIndex > size) { + cIndex = size; + } } } + return cIndex; } /** @@ -345,50 +445,117 @@ private int addComponents0(boolean increaseWriterIndex, int cIndex, ByteBuf[] bu * Be aware that this method does not increase the {@code writerIndex} of the {@link CompositeByteBuf}. * If you need to have it increased you need to handle it by your own. *

    - * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transfered to this + * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects in {@code buffers} is transferred to this * {@link CompositeByteBuf}. * @param cIndex the index on which the {@link ByteBuf} will be added. * @param buffers the {@link ByteBuf}s to add. {@link ByteBuf#release()} ownership of all - * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects is transfered to this + * {@link ByteBuf#release()} ownership of all {@link ByteBuf} objects is transferred to this * {@link CompositeByteBuf}. */ public CompositeByteBuf addComponents(int cIndex, Iterable buffers) { - addComponents0(false, cIndex, buffers); - consolidateIfNeeded(); - return this; + return addComponents(false, cIndex, buffers); } - private int addComponents0(boolean increaseIndex, int cIndex, Iterable buffers) { + /** + * Add the given {@link ByteBuf} and increase the {@code writerIndex} if {@code increaseWriterIndex} is + * {@code true}. If the provided buffer is a {@link CompositeByteBuf} itself, a "shallow copy" of its + * readable components will be performed. Thus the actual number of new components added may vary + * and in particular will be zero if the provided buffer is not readable. + *

    + * {@link ByteBuf#release()} ownership of {@code buffer} is transferred to this {@link CompositeByteBuf}. + * @param buffer the {@link ByteBuf} to add. {@link ByteBuf#release()} ownership is transferred to this + * {@link CompositeByteBuf}. + */ + public CompositeByteBuf addFlattenedComponents(boolean increaseWriterIndex, ByteBuf buffer) { + requireNonNull(buffer, "buffer"); + final int ridx = buffer.readerIndex(); + final int widx = buffer.writerIndex(); + if (ridx == widx) { + buffer.release(); + return this; + } + if (!(buffer instanceof CompositeByteBuf)) { + addComponent0(increaseWriterIndex, componentCount, buffer); + consolidateIfNeeded(); + return this; + } + final CompositeByteBuf from; + if (buffer instanceof WrappedCompositeByteBuf) { + from = (CompositeByteBuf) buffer.unwrap(); + } else { + from = (CompositeByteBuf) buffer; + } + from.checkIndex(ridx, widx - ridx); + final Component[] fromComponents = from.components; + final int compCountBefore = componentCount; + final int writerIndexBefore = writerIndex; + try { + for (int cidx = from.toComponentIndex0(ridx), newOffset = capacity();; cidx++) { + final Component component = fromComponents[cidx]; + final int compOffset = component.offset; + final int fromIdx = Math.max(ridx, compOffset); + final int toIdx = Math.min(widx, component.endOffset); + final int len = toIdx - fromIdx; + if (len > 0) { // skip empty components + addComp(componentCount, new Component( + component.srcBuf.retain(), component.srcIdx(fromIdx), + component.buf, component.idx(fromIdx), newOffset, len, null)); + } + if (widx == toIdx) { + break; + } + newOffset += len; + } + if (increaseWriterIndex) { + writerIndex = writerIndexBefore + (widx - ridx); + } + consolidateIfNeeded(); + buffer.release(); + buffer = null; + return this; + } finally { + if (buffer != null) { + // if we did not succeed, attempt to rollback any components that were added + if (increaseWriterIndex) { + writerIndex = writerIndexBefore; + } + for (int cidx = componentCount - 1; cidx >= compCountBefore; cidx--) { + components[cidx].free(); + removeComp(cidx); + } + } + } + } + + // TODO optimize further, similar to ByteBuf[] version + // (difference here is that we don't know *always* know precise size increase in advance, + // but we do in the most common case that the Iterable is a Collection) + private CompositeByteBuf addComponents(boolean increaseIndex, int cIndex, Iterable buffers) { if (buffers instanceof ByteBuf) { // If buffers also implements ByteBuf (e.g. CompositeByteBuf), it has to go to addComponent(ByteBuf). - return addComponent0(increaseIndex, cIndex, (ByteBuf) buffers); + return addComponent(increaseIndex, cIndex, (ByteBuf) buffers); } - checkNotNull(buffers, "buffers"); + requireNonNull(buffers, "buffers"); + Iterator it = buffers.iterator(); + try { + checkComponentIndex(cIndex); - if (!(buffers instanceof Collection)) { - List list = new ArrayList(); - try { - for (ByteBuf b: buffers) { - list.add(b); - } - buffers = list; - } finally { - if (buffers != list) { - for (ByteBuf b: buffers) { - if (b != null) { - try { - b.release(); - } catch (Throwable ignored) { - // ignore - } - } - } + // No need for consolidation + while (it.hasNext()) { + ByteBuf b = it.next(); + if (b == null) { + break; } + cIndex = addComponent0(increaseIndex, cIndex, b) + 1; + cIndex = Math.min(cIndex, componentCount); + } + } finally { + while (it.hasNext()) { + ReferenceCountUtil.safeRelease(it.next()); } } - - Collection col = (Collection) buffers; - return addComponents0(increaseIndex, cIndex, col.toArray(new ByteBuf[col.size()]), 0 , col.size()); + consolidateIfNeeded(); + return this; } /** @@ -398,63 +565,42 @@ private int addComponents0(boolean increaseIndex, int cIndex, Iterable private void consolidateIfNeeded() { // Consolidate if the number of components will exceed the allowed maximum by the current // operation. - final int numComponents = components.size(); - if (numComponents > maxNumComponents) { - final int capacity = components.get(numComponents - 1).endOffset; - - ByteBuf consolidated = allocBuffer(capacity); - - // We're not using foreach to avoid creating an iterator. - for (int i = 0; i < numComponents; i ++) { - Component c = components.get(i); - ByteBuf b = c.buf; - consolidated.writeBytes(b); - c.freeIfNecessary(); - } - Component c = new Component(consolidated); - c.endOffset = c.length; - components.clear(); - components.add(c); + int size = componentCount; + if (size > maxNumComponents) { + consolidate0(0, size); } } private void checkComponentIndex(int cIndex) { ensureAccessible(); - if (cIndex < 0 || cIndex > components.size()) { + if (cIndex < 0 || cIndex > componentCount) { throw new IndexOutOfBoundsException(String.format( "cIndex: %d (expected: >= 0 && <= numComponents(%d))", - cIndex, components.size())); + cIndex, componentCount)); } } private void checkComponentIndex(int cIndex, int numComponents) { ensureAccessible(); - if (cIndex < 0 || cIndex + numComponents > components.size()) { + if (cIndex < 0 || cIndex + numComponents > componentCount) { throw new IndexOutOfBoundsException(String.format( "cIndex: %d, numComponents: %d " + "(expected: cIndex >= 0 && cIndex + numComponents <= totalNumComponents(%d))", - cIndex, numComponents, components.size())); + cIndex, numComponents, componentCount)); } } private void updateComponentOffsets(int cIndex) { - int size = components.size(); + int size = componentCount; if (size <= cIndex) { return; } - Component c = components.get(cIndex); - if (cIndex == 0) { - c.offset = 0; - c.endOffset = c.length; - cIndex ++; - } - - for (int i = cIndex; i < size; i ++) { - Component prev = components.get(i - 1); - Component cur = components.get(i); - cur.offset = prev.endOffset; - cur.endOffset = cur.offset + cur.length; + int nextIndex = cIndex > 0 ? components[cIndex - 1].endOffset : 0; + for (; cIndex < size; cIndex++) { + Component c = components[cIndex]; + c.reposition(nextIndex); + nextIndex = c.endOffset; } } @@ -465,9 +611,13 @@ private void updateComponentOffsets(int cIndex) { */ public CompositeByteBuf removeComponent(int cIndex) { checkComponentIndex(cIndex); - Component comp = components.remove(cIndex); - comp.freeIfNecessary(); - if (comp.length > 0) { + Component comp = components[cIndex]; + if (lastAccessed == comp) { + lastAccessed = null; + } + comp.free(); + removeComp(cIndex); + if (comp.length() > 0) { // Only need to call updateComponentOffsets if the length was > 0 updateComponentOffsets(cIndex); } @@ -489,13 +639,16 @@ public CompositeByteBuf removeComponents(int cIndex, int numComponents) { int endIndex = cIndex + numComponents; boolean needsUpdate = false; for (int i = cIndex; i < endIndex; ++i) { - Component c = components.get(i); - if (c.length > 0) { + Component c = components[i]; + if (c.length() > 0) { needsUpdate = true; } - c.freeIfNecessary(); + if (lastAccessed == c) { + lastAccessed = null; + } + c.free(); } - components.removeRange(cIndex, endIndex); + removeCompRange(cIndex, endIndex); if (needsUpdate) { // Only need to call updateComponentOffsets if the length was > 0 @@ -507,10 +660,59 @@ public CompositeByteBuf removeComponents(int cIndex, int numComponents) { @Override public Iterator iterator() { ensureAccessible(); - if (components.isEmpty()) { - return EMPTY_ITERATOR; + return componentCount == 0 ? EMPTY_ITERATOR : new CompositeByteBufIterator(); + } + + @Override + protected int forEachByteAsc0(int start, int end, ByteProcessor processor) { + if (end <= start) { + return -1; + } + for (int i = toComponentIndex0(start), length = end - start; length > 0; i++) { + Component c = components[i]; + if (c.offset == c.endOffset) { + continue; // empty + } + ByteBuf s = c.buf; + int localStart = c.idx(start); + int localLength = Math.min(length, c.endOffset - start); + // avoid additional checks in AbstractByteBuf case + int result = s instanceof AbstractByteBuf + ? ((AbstractByteBuf) s).forEachByteAsc0(localStart, localStart + localLength, processor) + : s.forEachByte(localStart, localLength, processor); + if (result != -1) { + return result - c.adjustment; + } + start += localLength; + length -= localLength; } - return new CompositeByteBufIterator(); + return -1; + } + + @Override + protected int forEachByteDesc0(int rStart, int rEnd, ByteProcessor processor) { + if (rEnd > rStart) { // rStart *and* rEnd are inclusive + return -1; + } + for (int i = toComponentIndex0(rStart), length = 1 + rStart - rEnd; length > 0; i--) { + Component c = components[i]; + if (c.offset == c.endOffset) { + continue; // empty + } + ByteBuf s = c.buf; + int localRStart = c.idx(length + rEnd); + int localLength = Math.min(length, localRStart), localIndex = localRStart - localLength; + // avoid additional checks in AbstractByteBuf case + int result = s instanceof AbstractByteBuf + ? ((AbstractByteBuf) s).forEachByteDesc0(localRStart - 1, localIndex, processor) + : s.forEachByteDesc(localIndex, localLength, processor); + + if (result != -1) { + return result - c.adjustment; + } + length -= localLength; + } + return -1; } /** @@ -522,50 +724,40 @@ public List decompose(int offset, int length) { return Collections.emptyList(); } - int componentId = toComponentIndex(offset); - List slice = new ArrayList(components.size()); - - // The first component - Component firstC = components.get(componentId); - ByteBuf first = firstC.buf.duplicate(); - first.readerIndex(offset - firstC.offset); - - ByteBuf buf = first; + int componentId = toComponentIndex0(offset); int bytesToSlice = length; - do { - int readableBytes = buf.readableBytes(); - if (bytesToSlice <= readableBytes) { - // Last component - buf.writerIndex(buf.readerIndex() + bytesToSlice); - slice.add(buf); - break; - } else { - // Not the last component - slice.add(buf); - bytesToSlice -= readableBytes; - componentId ++; + // The first component + Component firstC = components[componentId]; - // Fetch the next component. - buf = components.get(componentId).buf.duplicate(); - } - } while (bytesToSlice > 0); + ByteBuf slice = firstC.buf.slice(firstC.idx(offset), Math.min(firstC.endOffset - offset, bytesToSlice)); + bytesToSlice -= slice.readableBytes(); - // Slice all components because only readable bytes are interesting. - for (int i = 0; i < slice.size(); i ++) { - slice.set(i, slice.get(i).slice()); + if (bytesToSlice == 0) { + return Collections.singletonList(slice); } - return slice; + List sliceList = new ArrayList<>(componentCount - componentId); + sliceList.add(slice); + + // Add all the slices until there is nothing more left and then return the List. + do { + Component component = components[++componentId]; + slice = component.buf.slice(component.idx(component.offset), Math.min(component.length(), bytesToSlice)); + bytesToSlice -= slice.readableBytes(); + sliceList.add(slice); + } while (bytesToSlice > 0); + + return sliceList; } @Override public boolean isDirect() { - int size = components.size(); + int size = componentCount; if (size == 0) { return false; } for (int i = 0; i < size; i++) { - if (!components.get(i).buf.isDirect()) { + if (!components[i].buf.isDirect()) { return false; } } @@ -574,11 +766,11 @@ public boolean isDirect() { @Override public boolean hasArray() { - switch (components.size()) { + switch (componentCount) { case 0: return true; case 1: - return components.get(0).buf.hasArray(); + return components[0].buf.hasArray(); default: return false; } @@ -586,11 +778,11 @@ public boolean hasArray() { @Override public byte[] array() { - switch (components.size()) { + switch (componentCount) { case 0: return EmptyArrays.EMPTY_BYTES; case 1: - return components.get(0).buf.array(); + return components[0].buf.array(); default: throw new UnsupportedOperationException(); } @@ -598,11 +790,12 @@ public byte[] array() { @Override public int arrayOffset() { - switch (components.size()) { + switch (componentCount) { case 0: return 0; case 1: - return components.get(0).buf.arrayOffset(); + Component c = components[0]; + return c.idx(c.buf.arrayOffset()); default: throw new UnsupportedOperationException(); } @@ -610,11 +803,11 @@ public int arrayOffset() { @Override public boolean hasMemoryAddress() { - switch (components.size()) { + switch (componentCount) { case 0: return Unpooled.EMPTY_BUFFER.hasMemoryAddress(); case 1: - return components.get(0).buf.hasMemoryAddress(); + return components[0].buf.hasMemoryAddress(); default: return false; } @@ -622,11 +815,12 @@ public boolean hasMemoryAddress() { @Override public long memoryAddress() { - switch (components.size()) { + switch (componentCount) { case 0: return Unpooled.EMPTY_BUFFER.memoryAddress(); case 1: - return components.get(0).buf.memoryAddress(); + Component c = components[0]; + return c.buf.memoryAddress() + c.adjustment; default: throw new UnsupportedOperationException(); } @@ -634,56 +828,50 @@ public long memoryAddress() { @Override public int capacity() { - final int numComponents = components.size(); - if (numComponents == 0) { - return 0; - } - return components.get(numComponents - 1).endOffset; + int size = componentCount; + return size > 0 ? components[size - 1].endOffset : 0; } @Override public CompositeByteBuf capacity(int newCapacity) { checkNewCapacity(newCapacity); - int oldCapacity = capacity(); + final int size = componentCount, oldCapacity = capacity(); if (newCapacity > oldCapacity) { final int paddingLength = newCapacity - oldCapacity; - ByteBuf padding; - int nComponents = components.size(); - if (nComponents < maxNumComponents) { - padding = allocBuffer(paddingLength); - padding.setIndex(0, paddingLength); - addComponent0(false, components.size(), padding); - } else { - padding = allocBuffer(paddingLength); - padding.setIndex(0, paddingLength); + ByteBuf padding = allocBuffer(paddingLength).setIndex(0, paddingLength); + addComponent0(false, size, padding); + if (componentCount >= maxNumComponents) { // FIXME: No need to create a padding buffer and consolidate. // Just create a big single buffer and put the current content there. - addComponent0(false, components.size(), padding); consolidateIfNeeded(); } } else if (newCapacity < oldCapacity) { - int bytesToTrim = oldCapacity - newCapacity; - for (ListIterator i = components.listIterator(components.size()); i.hasPrevious();) { - Component c = i.previous(); - if (bytesToTrim >= c.length) { - bytesToTrim -= c.length; - i.remove(); - continue; + lastAccessed = null; + int i = size - 1; + for (int bytesToTrim = oldCapacity - newCapacity; i >= 0; i--) { + Component c = components[i]; + final int cLength = c.length(); + if (bytesToTrim < cLength) { + // Trim the last component + c.endOffset -= bytesToTrim; + ByteBuf slice = c.slice; + if (slice != null) { + // We must replace the cached slice with a derived one to ensure that + // it can later be released properly in the case of PooledSlicedByteBuf. + c.slice = slice.slice(0, c.length()); + } + break; } - - // Replace the last component with the trimmed slice. - Component newC = new Component(c.buf.slice(0, c.length - bytesToTrim)); - newC.offset = c.offset; - newC.endOffset = newC.offset + newC.length; - i.set(newC); - break; + c.free(); + bytesToTrim -= cLength; } + removeCompRange(i + 1, size); if (readerIndex() > newCapacity) { - setIndex(newCapacity, newCapacity); - } else if (writerIndex() > newCapacity) { - writerIndex(newCapacity); + setIndex0(newCapacity, newCapacity); + } else if (writerIndex > newCapacity) { + writerIndex = newCapacity; } } return this; @@ -703,7 +891,7 @@ public ByteOrder order() { * Return the current number of {@link ByteBuf}'s that are composed in this instance */ public int numComponents() { - return components.size(); + return componentCount; } /** @@ -718,10 +906,24 @@ public int maxNumComponents() { */ public int toComponentIndex(int offset) { checkIndex(offset); + return toComponentIndex0(offset); + } - for (int low = 0, high = components.size(); low <= high;) { + private int toComponentIndex0(int offset) { + int size = componentCount; + if (offset == 0) { // fast-path zero offset + for (int i = 0; i < size; i++) { + if (components[i].endOffset > 0) { + return i; + } + } + } + if (size <= 2) { // fast-path for 1 and 2 component count + return size == 1 || offset < components[0].endOffset ? 0 : 1; + } + for (int low = 0, high = size; low <= high;) { int mid = low + high >>> 1; - Component c = components.get(mid); + Component c = components[mid]; if (offset >= c.endOffset) { low = mid + 1; } else if (offset < c.offset) { @@ -736,25 +938,26 @@ public int toComponentIndex(int offset) { public int toByteIndex(int cIndex) { checkComponentIndex(cIndex); - return components.get(cIndex).offset; + return components[cIndex].offset; } @Override public byte getByte(int index) { - return _getByte(index); + Component c = findComponent(index); + return c.buf.getByte(c.idx(index)); } @Override protected byte _getByte(int index) { - Component c = findComponent(index); - return c.buf.getByte(index - c.offset); + Component c = findComponent0(index); + return c.buf.getByte(c.idx(index)); } @Override protected short _getShort(int index) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 2 <= c.endOffset) { - return c.buf.getShort(index - c.offset); + return c.buf.getShort(c.idx(index)); } else if (order() == ByteOrder.BIG_ENDIAN) { return (short) ((_getByte(index) & 0xff) << 8 | _getByte(index + 1) & 0xff); } else { @@ -764,9 +967,9 @@ protected short _getShort(int index) { @Override protected short _getShortLE(int index) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 2 <= c.endOffset) { - return c.buf.getShortLE(index - c.offset); + return c.buf.getShortLE(c.idx(index)); } else if (order() == ByteOrder.BIG_ENDIAN) { return (short) (_getByte(index) & 0xff | (_getByte(index + 1) & 0xff) << 8); } else { @@ -776,9 +979,9 @@ protected short _getShortLE(int index) { @Override protected int _getUnsignedMedium(int index) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 3 <= c.endOffset) { - return c.buf.getUnsignedMedium(index - c.offset); + return c.buf.getUnsignedMedium(c.idx(index)); } else if (order() == ByteOrder.BIG_ENDIAN) { return (_getShort(index) & 0xffff) << 8 | _getByte(index + 2) & 0xff; } else { @@ -788,9 +991,9 @@ protected int _getUnsignedMedium(int index) { @Override protected int _getUnsignedMediumLE(int index) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 3 <= c.endOffset) { - return c.buf.getUnsignedMediumLE(index - c.offset); + return c.buf.getUnsignedMediumLE(c.idx(index)); } else if (order() == ByteOrder.BIG_ENDIAN) { return _getShortLE(index) & 0xffff | (_getByte(index + 2) & 0xff) << 16; } else { @@ -800,9 +1003,9 @@ protected int _getUnsignedMediumLE(int index) { @Override protected int _getInt(int index) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 4 <= c.endOffset) { - return c.buf.getInt(index - c.offset); + return c.buf.getInt(c.idx(index)); } else if (order() == ByteOrder.BIG_ENDIAN) { return (_getShort(index) & 0xffff) << 16 | _getShort(index + 2) & 0xffff; } else { @@ -812,9 +1015,9 @@ protected int _getInt(int index) { @Override protected int _getIntLE(int index) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 4 <= c.endOffset) { - return c.buf.getIntLE(index - c.offset); + return c.buf.getIntLE(c.idx(index)); } else if (order() == ByteOrder.BIG_ENDIAN) { return _getShortLE(index) & 0xffff | (_getShortLE(index + 2) & 0xffff) << 16; } else { @@ -824,9 +1027,9 @@ protected int _getIntLE(int index) { @Override protected long _getLong(int index) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 8 <= c.endOffset) { - return c.buf.getLong(index - c.offset); + return c.buf.getLong(c.idx(index)); } else if (order() == ByteOrder.BIG_ENDIAN) { return (_getInt(index) & 0xffffffffL) << 32 | _getInt(index + 4) & 0xffffffffL; } else { @@ -836,9 +1039,9 @@ protected long _getLong(int index) { @Override protected long _getLongLE(int index) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 8 <= c.endOffset) { - return c.buf.getLongLE(index - c.offset); + return c.buf.getLongLE(c.idx(index)); } else if (order() == ByteOrder.BIG_ENDIAN) { return _getIntLE(index) & 0xffffffffL | (_getIntLE(index + 4) & 0xffffffffL) << 32; } else { @@ -853,13 +1056,11 @@ public CompositeByteBuf getBytes(int index, byte[] dst, int dstIndex, int length return this; } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); while (length > 0) { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); - s.getBytes(index - adjustment, dst, dstIndex, localLength); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); + c.buf.getBytes(c.idx(index), dst, dstIndex, localLength); index += localLength; dstIndex += localLength; length -= localLength; @@ -878,15 +1079,13 @@ public CompositeByteBuf getBytes(int index, ByteBuffer dst) { return this; } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); try { while (length > 0) { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); dst.limit(dst.position() + localLength); - s.getBytes(index - adjustment, dst); + c.buf.getBytes(c.idx(index), dst); index += localLength; length -= localLength; i ++; @@ -904,13 +1103,11 @@ public CompositeByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int lengt return this; } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); while (length > 0) { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); - s.getBytes(index - adjustment, dst, dstIndex, localLength); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); + c.buf.getBytes(c.idx(index), dst, dstIndex, localLength); index += localLength; dstIndex += localLength; length -= localLength; @@ -960,13 +1157,11 @@ public CompositeByteBuf getBytes(int index, OutputStream out, int length) throws return this; } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); while (length > 0) { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); - s.getBytes(index - adjustment, out, localLength); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); + c.buf.getBytes(c.idx(index), out, localLength); index += localLength; length -= localLength; i ++; @@ -977,25 +1172,28 @@ public CompositeByteBuf getBytes(int index, OutputStream out, int length) throws @Override public CompositeByteBuf setByte(int index, int value) { Component c = findComponent(index); - c.buf.setByte(index - c.offset, value); + c.buf.setByte(c.idx(index), value); return this; } @Override protected void _setByte(int index, int value) { - setByte(index, value); + Component c = findComponent0(index); + c.buf.setByte(c.idx(index), value); } @Override public CompositeByteBuf setShort(int index, int value) { - return (CompositeByteBuf) super.setShort(index, value); + checkIndex(index, 2); + _setShort(index, value); + return this; } @Override protected void _setShort(int index, int value) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 2 <= c.endOffset) { - c.buf.setShort(index - c.offset, value); + c.buf.setShort(c.idx(index), value); } else if (order() == ByteOrder.BIG_ENDIAN) { _setByte(index, (byte) (value >>> 8)); _setByte(index + 1, (byte) value); @@ -1007,9 +1205,9 @@ protected void _setShort(int index, int value) { @Override protected void _setShortLE(int index, int value) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 2 <= c.endOffset) { - c.buf.setShortLE(index - c.offset, value); + c.buf.setShortLE(c.idx(index), value); } else if (order() == ByteOrder.BIG_ENDIAN) { _setByte(index, (byte) value); _setByte(index + 1, (byte) (value >>> 8)); @@ -1021,14 +1219,16 @@ protected void _setShortLE(int index, int value) { @Override public CompositeByteBuf setMedium(int index, int value) { - return (CompositeByteBuf) super.setMedium(index, value); + checkIndex(index, 3); + _setMedium(index, value); + return this; } @Override protected void _setMedium(int index, int value) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 3 <= c.endOffset) { - c.buf.setMedium(index - c.offset, value); + c.buf.setMedium(c.idx(index), value); } else if (order() == ByteOrder.BIG_ENDIAN) { _setShort(index, (short) (value >> 8)); _setByte(index + 2, (byte) value); @@ -1040,9 +1240,9 @@ protected void _setMedium(int index, int value) { @Override protected void _setMediumLE(int index, int value) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 3 <= c.endOffset) { - c.buf.setMediumLE(index - c.offset, value); + c.buf.setMediumLE(c.idx(index), value); } else if (order() == ByteOrder.BIG_ENDIAN) { _setShortLE(index, (short) value); _setByte(index + 2, (byte) (value >>> 16)); @@ -1054,14 +1254,16 @@ protected void _setMediumLE(int index, int value) { @Override public CompositeByteBuf setInt(int index, int value) { - return (CompositeByteBuf) super.setInt(index, value); + checkIndex(index, 4); + _setInt(index, value); + return this; } @Override protected void _setInt(int index, int value) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 4 <= c.endOffset) { - c.buf.setInt(index - c.offset, value); + c.buf.setInt(c.idx(index), value); } else if (order() == ByteOrder.BIG_ENDIAN) { _setShort(index, (short) (value >>> 16)); _setShort(index + 2, (short) value); @@ -1073,9 +1275,9 @@ protected void _setInt(int index, int value) { @Override protected void _setIntLE(int index, int value) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 4 <= c.endOffset) { - c.buf.setIntLE(index - c.offset, value); + c.buf.setIntLE(c.idx(index), value); } else if (order() == ByteOrder.BIG_ENDIAN) { _setShortLE(index, (short) value); _setShortLE(index + 2, (short) (value >>> 16)); @@ -1087,14 +1289,16 @@ protected void _setIntLE(int index, int value) { @Override public CompositeByteBuf setLong(int index, long value) { - return (CompositeByteBuf) super.setLong(index, value); + checkIndex(index, 8); + _setLong(index, value); + return this; } @Override protected void _setLong(int index, long value) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 8 <= c.endOffset) { - c.buf.setLong(index - c.offset, value); + c.buf.setLong(c.idx(index), value); } else if (order() == ByteOrder.BIG_ENDIAN) { _setInt(index, (int) (value >>> 32)); _setInt(index + 4, (int) value); @@ -1106,9 +1310,9 @@ protected void _setLong(int index, long value) { @Override protected void _setLongLE(int index, long value) { - Component c = findComponent(index); + Component c = findComponent0(index); if (index + 8 <= c.endOffset) { - c.buf.setLongLE(index - c.offset, value); + c.buf.setLongLE(c.idx(index), value); } else if (order() == ByteOrder.BIG_ENDIAN) { _setIntLE(index, (int) value); _setIntLE(index + 4, (int) (value >>> 32)); @@ -1125,13 +1329,11 @@ public CompositeByteBuf setBytes(int index, byte[] src, int srcIndex, int length return this; } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); while (length > 0) { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); - s.setBytes(index - adjustment, src, srcIndex, localLength); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); + c.buf.setBytes(c.idx(index), src, srcIndex, localLength); index += localLength; srcIndex += localLength; length -= localLength; @@ -1150,15 +1352,13 @@ public CompositeByteBuf setBytes(int index, ByteBuffer src) { return this; } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); try { while (length > 0) { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); src.limit(src.position() + localLength); - s.setBytes(index - adjustment, src); + c.buf.setBytes(c.idx(index), src); index += localLength; length -= localLength; i ++; @@ -1176,13 +1376,11 @@ public CompositeByteBuf setBytes(int index, ByteBuf src, int srcIndex, int lengt return this; } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); while (length > 0) { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); - s.setBytes(index - adjustment, src, srcIndex, localLength); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); + c.buf.setBytes(c.idx(index), src, srcIndex, localLength); index += localLength; srcIndex += localLength; length -= localLength; @@ -1198,20 +1396,17 @@ public int setBytes(int index, InputStream in, int length) throws IOException { return in.read(EmptyArrays.EMPTY_BYTES); } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); int readBytes = 0; - do { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); if (localLength == 0) { // Skip empty buffer i++; continue; } - int localReadBytes = s.setBytes(index - adjustment, in, localLength); + int localReadBytes = c.buf.setBytes(c.idx(index), in, localLength); if (localReadBytes < 0) { if (readBytes == 0) { return -1; @@ -1220,15 +1415,11 @@ public int setBytes(int index, InputStream in, int length) throws IOException { } } + index += localReadBytes; + length -= localReadBytes; + readBytes += localReadBytes; if (localReadBytes == localLength) { - index += localLength; - length -= localLength; - readBytes += localLength; i ++; - } else { - index += localReadBytes; - length -= localReadBytes; - readBytes += localReadBytes; } } while (length > 0); @@ -1242,19 +1433,17 @@ public int setBytes(int index, ScatteringByteChannel in, int length) throws IOEx return in.read(EMPTY_NIO_BUFFER); } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); int readBytes = 0; do { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); if (localLength == 0) { // Skip empty buffer i++; continue; } - int localReadBytes = s.setBytes(index - adjustment, in, localLength); + int localReadBytes = c.buf.setBytes(c.idx(index), in, localLength); if (localReadBytes == 0) { break; @@ -1268,15 +1457,11 @@ public int setBytes(int index, ScatteringByteChannel in, int length) throws IOEx } } + index += localReadBytes; + length -= localReadBytes; + readBytes += localReadBytes; if (localReadBytes == localLength) { - index += localLength; - length -= localLength; - readBytes += localLength; i ++; - } else { - index += localReadBytes; - length -= localReadBytes; - readBytes += localReadBytes; } } while (length > 0); @@ -1290,19 +1475,17 @@ public int setBytes(int index, FileChannel in, long position, int length) throws return in.read(EMPTY_NIO_BUFFER, position); } - int i = toComponentIndex(index); + int i = toComponentIndex0(index); int readBytes = 0; do { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); if (localLength == 0) { // Skip empty buffer i++; continue; } - int localReadBytes = s.setBytes(index - adjustment, in, position + readBytes, localLength); + int localReadBytes = c.buf.setBytes(c.idx(index), in, position + readBytes, localLength); if (localReadBytes == 0) { break; @@ -1316,15 +1499,11 @@ public int setBytes(int index, FileChannel in, long position, int length) throws } } + index += localReadBytes; + length -= localReadBytes; + readBytes += localReadBytes; if (localReadBytes == localLength) { - index += localLength; - length -= localLength; - readBytes += localLength; i ++; - } else { - index += localReadBytes; - length -= localReadBytes; - readBytes += localReadBytes; } } while (length > 0); @@ -1336,7 +1515,7 @@ public ByteBuf copy(int index, int length) { checkIndex(index, length); ByteBuf dst = allocBuffer(length); if (length != 0) { - copyTo(index, length, toComponentIndex(index), dst); + copyTo(index, length, toComponentIndex0(index), dst); } return dst; } @@ -1346,11 +1525,9 @@ private void copyTo(int index, int length, int componentId, ByteBuf dst) { int i = componentId; while (length > 0) { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); - s.getBytes(index - adjustment, dst, dstIndex, localLength); + Component c = components[i]; + int localLength = Math.min(length, c.endOffset - index); + c.buf.getBytes(c.idx(index), dst, dstIndex, localLength); index += localLength; dstIndex += localLength; length -= localLength; @@ -1367,7 +1544,8 @@ private void copyTo(int index, int length, int componentId, ByteBuf dst) { * @return buf the {@link ByteBuf} on the specified index */ public ByteBuf component(int cIndex) { - return internalComponent(cIndex).duplicate(); + checkComponentIndex(cIndex); + return components[cIndex].duplicate(); } /** @@ -1377,7 +1555,7 @@ public ByteBuf component(int cIndex) { * @return the {@link ByteBuf} on the specified index */ public ByteBuf componentAtOffset(int offset) { - return internalComponentAtOffset(offset).duplicate(); + return findComponent(offset).duplicate(); } /** @@ -1388,7 +1566,7 @@ public ByteBuf componentAtOffset(int offset) { */ public ByteBuf internalComponent(int cIndex) { checkComponentIndex(cIndex); - return components.get(cIndex).buf; + return components[cIndex].slice(); } /** @@ -1398,21 +1576,44 @@ public ByteBuf internalComponent(int cIndex) { * @param offset the offset for which the {@link ByteBuf} should be returned */ public ByteBuf internalComponentAtOffset(int offset) { - return findComponent(offset).buf; + return findComponent(offset).slice(); } + // weak cache - check it first when looking for component + private Component lastAccessed; + private Component findComponent(int offset) { + Component la = lastAccessed; + if (la != null && offset >= la.offset && offset < la.endOffset) { + ensureAccessible(); + return la; + } checkIndex(offset); + return findIt(offset); + } - for (int low = 0, high = components.size(); low <= high;) { + private Component findComponent0(int offset) { + Component la = lastAccessed; + if (la != null && offset >= la.offset && offset < la.endOffset) { + return la; + } + return findIt(offset); + } + + private Component findIt(int offset) { + for (int low = 0, high = componentCount; low <= high;) { int mid = low + high >>> 1; - Component c = components.get(mid); + Component c = components[mid]; + if (c == null) { + throw new IllegalStateException("No component found for offset. " + + "Composite buffer layout might be outdated, e.g. from a discardReadBytes call."); + } if (offset >= c.endOffset) { low = mid + 1; } else if (offset < c.offset) { high = mid - 1; } else { - assert c.length != 0; + lastAccessed = c; return c; } } @@ -1422,17 +1623,16 @@ private Component findComponent(int offset) { @Override public int nioBufferCount() { - switch (components.size()) { + int size = componentCount; + switch (size) { case 0: return 1; case 1: - return components.get(0).buf.nioBufferCount(); + return components[0].buf.nioBufferCount(); default: int count = 0; - int componentsCount = components.size(); - for (int i = 0; i < componentsCount; i++) { - Component c = components.get(i); - count += c.buf.nioBufferCount(); + for (int i = 0; i < size; i++) { + count += components[i].buf.nioBufferCount(); } return count; } @@ -1440,11 +1640,11 @@ public int nioBufferCount() { @Override public ByteBuffer internalNioBuffer(int index, int length) { - switch (components.size()) { + switch (componentCount) { case 0: return EMPTY_NIO_BUFFER; case 1: - return components.get(0).buf.internalNioBuffer(index, length); + return components[0].internalNioBuffer(index, length); default: throw new UnsupportedOperationException(); } @@ -1454,19 +1654,27 @@ public ByteBuffer internalNioBuffer(int index, int length) { public ByteBuffer nioBuffer(int index, int length) { checkIndex(index, length); - switch (components.size()) { + switch (componentCount) { case 0: return EMPTY_NIO_BUFFER; case 1: - ByteBuf buf = components.get(0).buf; + Component c = components[0]; + ByteBuf buf = c.buf; if (buf.nioBufferCount() == 1) { - return components.get(0).buf.nioBuffer(index, length); + return buf.nioBuffer(c.idx(index), length); } + break; + default: + break; } - ByteBuffer merged = ByteBuffer.allocate(length).order(order()); ByteBuffer[] buffers = nioBuffers(index, length); + if (buffers.length == 1) { + return buffers[0]; + } + + ByteBuffer merged = ByteBuffer.allocate(length).order(order()); for (ByteBuffer buf: buffers) { merged.put(buf); } @@ -1482,29 +1690,32 @@ public ByteBuffer[] nioBuffers(int index, int length) { return new ByteBuffer[] { EMPTY_NIO_BUFFER }; } - List buffers = new ArrayList(components.size()); - int i = toComponentIndex(index); - while (length > 0) { - Component c = components.get(i); - ByteBuf s = c.buf; - int adjustment = c.offset; - int localLength = Math.min(length, s.capacity() - (index - adjustment)); - switch (s.nioBufferCount()) { + RecyclableArrayList buffers = RecyclableArrayList.newInstance(componentCount); + try { + int i = toComponentIndex0(index); + while (length > 0) { + Component c = components[i]; + ByteBuf s = c.buf; + int localLength = Math.min(length, c.endOffset - index); + switch (s.nioBufferCount()) { case 0: throw new UnsupportedOperationException(); case 1: - buffers.add(s.nioBuffer(index - adjustment, localLength)); + buffers.add(s.nioBuffer(c.idx(index), localLength)); break; default: - Collections.addAll(buffers, s.nioBuffers(index - adjustment, localLength)); + Collections.addAll(buffers, s.nioBuffers(c.idx(index), localLength)); + } + + index += localLength; + length -= localLength; + i ++; } - index += localLength; - length -= localLength; - i ++; + return buffers.toArray(new ByteBuffer[0]); + } finally { + buffers.recycle(); } - - return buffers.toArray(new ByteBuffer[buffers.size()]); } /** @@ -1512,25 +1723,7 @@ public ByteBuffer[] nioBuffers(int index, int length) { */ public CompositeByteBuf consolidate() { ensureAccessible(); - final int numComponents = numComponents(); - if (numComponents <= 1) { - return this; - } - - final Component last = components.get(numComponents - 1); - final int capacity = last.endOffset; - final ByteBuf consolidated = allocBuffer(capacity); - - for (int i = 0; i < numComponents; i ++) { - Component c = components.get(i); - ByteBuf b = c.buf; - consolidated.writeBytes(b); - c.freeIfNecessary(); - } - - components.clear(); - components.add(new Component(consolidated)); - updateComponentOffsets(0); + consolidate0(0, componentCount); return this; } @@ -1542,26 +1735,29 @@ public CompositeByteBuf consolidate() { */ public CompositeByteBuf consolidate(int cIndex, int numComponents) { checkComponentIndex(cIndex, numComponents); + consolidate0(cIndex, numComponents); + return this; + } + + private void consolidate0(int cIndex, int numComponents) { if (numComponents <= 1) { - return this; + return; } final int endCIndex = cIndex + numComponents; - final Component last = components.get(endCIndex - 1); - final int capacity = last.endOffset - components.get(cIndex).offset; + final int startOffset = cIndex != 0 ? components[cIndex].offset : 0; + final int capacity = components[endCIndex - 1].endOffset - startOffset; final ByteBuf consolidated = allocBuffer(capacity); for (int i = cIndex; i < endCIndex; i ++) { - Component c = components.get(i); - ByteBuf b = c.buf; - consolidated.writeBytes(b); - c.freeIfNecessary(); + components[i].transferTo(consolidated); + } + lastAccessed = null; + removeCompRange(cIndex + 1, endCIndex); + components[cIndex] = newComponent(consolidated, 0); + if (cIndex != 0 || numComponents != componentCount) { + updateComponentOffsets(cIndex); } - - components.removeRange(cIndex + 1, endCIndex); - components.set(cIndex, new Component(consolidated)); - updateComponentOffsets(cIndex); - return this; } /** @@ -1577,29 +1773,38 @@ public CompositeByteBuf discardReadComponents() { // Discard everything if (readerIndex = writerIndex = capacity). int writerIndex = writerIndex(); if (readerIndex == writerIndex && writerIndex == capacity()) { - int size = components.size(); - for (int i = 0; i < size; i++) { - components.get(i).freeIfNecessary(); + for (int i = 0, size = componentCount; i < size; i++) { + components[i].free(); } - components.clear(); + lastAccessed = null; + clearComps(); setIndex(0, 0); - adjustMarkers(readerIndex); return this; } // Remove read components. - int firstComponentId = toComponentIndex(readerIndex); - for (int i = 0; i < firstComponentId; i ++) { - components.get(i).freeIfNecessary(); + int firstComponentId = 0; + Component c = null; + for (int size = componentCount; firstComponentId < size; firstComponentId++) { + c = components[firstComponentId]; + if (c.endOffset > readerIndex) { + break; + } + c.free(); } - components.removeRange(0, firstComponentId); + if (firstComponentId == 0) { + return this; // Nothing to discard + } + Component la = lastAccessed; + if (la != null && la.endOffset <= readerIndex) { + lastAccessed = null; + } + removeCompRange(0, firstComponentId); // Update indexes and markers. - Component first = components.get(0); - int offset = first.offset; + int offset = c.offset; updateComponentOffsets(0); setIndex(readerIndex - offset, writerIndex - offset); - adjustMarkers(offset); return this; } @@ -1614,39 +1819,47 @@ public CompositeByteBuf discardReadBytes() { // Discard everything if (readerIndex = writerIndex = capacity). int writerIndex = writerIndex(); if (readerIndex == writerIndex && writerIndex == capacity()) { - int size = components.size(); - for (int i = 0; i < size; i++) { - components.get(i).freeIfNecessary(); + for (int i = 0, size = componentCount; i < size; i++) { + components[i].free(); } - components.clear(); + lastAccessed = null; + clearComps(); setIndex(0, 0); - adjustMarkers(readerIndex); return this; } - // Remove read components. - int firstComponentId = toComponentIndex(readerIndex); - for (int i = 0; i < firstComponentId; i ++) { - components.get(i).freeIfNecessary(); + int firstComponentId = 0; + Component c = null; + for (int size = componentCount; firstComponentId < size; firstComponentId++) { + c = components[firstComponentId]; + if (c.endOffset > readerIndex) { + break; + } + c.free(); } - // Remove or replace the first readable component with a new slice. - Component c = components.get(firstComponentId); - int adjustment = readerIndex - c.offset; - if (adjustment == c.length) { - // new slice would be empty, so remove instead - firstComponentId++; - } else { - Component newC = new Component(c.buf.slice(adjustment, c.length - adjustment)); - components.set(firstComponentId, newC); + // Replace the first readable component with a new slice. + int trimmedBytes = readerIndex - c.offset; + c.offset = 0; + c.endOffset -= readerIndex; + c.srcAdjustment += readerIndex; + c.adjustment += readerIndex; + ByteBuf slice = c.slice; + if (slice != null) { + // We must replace the cached slice with a derived one to ensure that + // it can later be released properly in the case of PooledSlicedByteBuf. + c.slice = slice.slice(trimmedBytes, c.length()); + } + Component la = lastAccessed; + if (la != null && la.endOffset <= readerIndex) { + lastAccessed = null; } - components.removeRange(0, firstComponentId); + removeCompRange(0, firstComponentId); // Update indexes and markers. updateComponentOffsets(0); setIndex(0, writerIndex - readerIndex); - adjustMarkers(readerIndex); return this; } @@ -1658,253 +1871,328 @@ private ByteBuf allocBuffer(int capacity) { public String toString() { String result = super.toString(); result = result.substring(0, result.length() - 1); - return result + ", components=" + components.size() + ')'; + return result + ", components=" + componentCount + ')'; } private static final class Component { - final ByteBuf buf; - final int length; - int offset; - int endOffset; + final ByteBuf srcBuf; // the originally added buffer + final ByteBuf buf; // srcBuf unwrapped zero or more times + + int srcAdjustment; // index of the start of this CompositeByteBuf relative to srcBuf + int adjustment; // index of the start of this CompositeByteBuf relative to buf + + int offset; // offset of this component within this CompositeByteBuf + int endOffset; // end offset of this component within this CompositeByteBuf + + private ByteBuf slice; // cached slice, may be null - Component(ByteBuf buf) { + Component(ByteBuf srcBuf, int srcOffset, ByteBuf buf, int bufOffset, + int offset, int len, ByteBuf slice) { + this.srcBuf = srcBuf; + this.srcAdjustment = srcOffset - offset; this.buf = buf; - length = buf.readableBytes(); + this.adjustment = bufOffset - offset; + this.offset = offset; + this.endOffset = offset + len; + this.slice = slice; } - void freeIfNecessary() { - buf.release(); // We should not get a NPE here. If so, it must be a bug. + int srcIdx(int index) { + return index + srcAdjustment; } - } - @Override - public CompositeByteBuf readerIndex(int readerIndex) { - return (CompositeByteBuf) super.readerIndex(readerIndex); - } + int idx(int index) { + return index + adjustment; + } - @Override - public CompositeByteBuf writerIndex(int writerIndex) { - return (CompositeByteBuf) super.writerIndex(writerIndex); - } + int length() { + return endOffset - offset; + } - @Override - public CompositeByteBuf setIndex(int readerIndex, int writerIndex) { - return (CompositeByteBuf) super.setIndex(readerIndex, writerIndex); - } + void reposition(int newOffset) { + int move = newOffset - offset; + endOffset += move; + srcAdjustment -= move; + adjustment -= move; + offset = newOffset; + } - @Override - public CompositeByteBuf clear() { - return (CompositeByteBuf) super.clear(); + // copy then release + void transferTo(ByteBuf dst) { + dst.writeBytes(buf, idx(offset), length()); + free(); + } + + ByteBuf slice() { + ByteBuf s = slice; + if (s == null) { + slice = s = srcBuf.slice(srcIdx(offset), length()); + } + return s; + } + + ByteBuf duplicate() { + return srcBuf.duplicate(); + } + + ByteBuffer internalNioBuffer(int index, int length) { + // Some buffers override this so we must use srcBuf + return srcBuf.internalNioBuffer(srcIdx(index), length); + } + + void free() { + slice = null; + // Release the original buffer since it may have a different + // refcount to the unwrapped buf (e.g. if PooledSlicedByteBuf) + srcBuf.release(); + } } @Override - public CompositeByteBuf markReaderIndex() { - return (CompositeByteBuf) super.markReaderIndex(); + public CompositeByteBuf readerIndex(int readerIndex) { + super.readerIndex(readerIndex); + return this; } @Override - public CompositeByteBuf resetReaderIndex() { - return (CompositeByteBuf) super.resetReaderIndex(); + public CompositeByteBuf writerIndex(int writerIndex) { + super.writerIndex(writerIndex); + return this; } @Override - public CompositeByteBuf markWriterIndex() { - return (CompositeByteBuf) super.markWriterIndex(); + public CompositeByteBuf setIndex(int readerIndex, int writerIndex) { + super.setIndex(readerIndex, writerIndex); + return this; } @Override - public CompositeByteBuf resetWriterIndex() { - return (CompositeByteBuf) super.resetWriterIndex(); + public CompositeByteBuf clear() { + super.clear(); + return this; } @Override public CompositeByteBuf ensureWritable(int minWritableBytes) { - return (CompositeByteBuf) super.ensureWritable(minWritableBytes); + super.ensureWritable(minWritableBytes); + return this; } @Override public CompositeByteBuf getBytes(int index, ByteBuf dst) { - return (CompositeByteBuf) super.getBytes(index, dst); + return getBytes(index, dst, dst.writableBytes()); } @Override public CompositeByteBuf getBytes(int index, ByteBuf dst, int length) { - return (CompositeByteBuf) super.getBytes(index, dst, length); + getBytes(index, dst, dst.writerIndex(), length); + dst.writerIndex(dst.writerIndex() + length); + return this; } @Override public CompositeByteBuf getBytes(int index, byte[] dst) { - return (CompositeByteBuf) super.getBytes(index, dst); + return getBytes(index, dst, 0, dst.length); } @Override public CompositeByteBuf setBoolean(int index, boolean value) { - return (CompositeByteBuf) super.setBoolean(index, value); + return setByte(index, value? 1 : 0); } @Override public CompositeByteBuf setChar(int index, int value) { - return (CompositeByteBuf) super.setChar(index, value); + return setShort(index, value); } @Override public CompositeByteBuf setFloat(int index, float value) { - return (CompositeByteBuf) super.setFloat(index, value); + return setInt(index, Float.floatToRawIntBits(value)); } @Override public CompositeByteBuf setDouble(int index, double value) { - return (CompositeByteBuf) super.setDouble(index, value); + return setLong(index, Double.doubleToRawLongBits(value)); } @Override public CompositeByteBuf setBytes(int index, ByteBuf src) { - return (CompositeByteBuf) super.setBytes(index, src); + super.setBytes(index, src, src.readableBytes()); + return this; } @Override public CompositeByteBuf setBytes(int index, ByteBuf src, int length) { - return (CompositeByteBuf) super.setBytes(index, src, length); + super.setBytes(index, src, length); + return this; } @Override public CompositeByteBuf setBytes(int index, byte[] src) { - return (CompositeByteBuf) super.setBytes(index, src); + return setBytes(index, src, 0, src.length); } @Override public CompositeByteBuf setZero(int index, int length) { - return (CompositeByteBuf) super.setZero(index, length); + super.setZero(index, length); + return this; } @Override public CompositeByteBuf readBytes(ByteBuf dst) { - return (CompositeByteBuf) super.readBytes(dst); + super.readBytes(dst, dst.writableBytes()); + return this; } @Override public CompositeByteBuf readBytes(ByteBuf dst, int length) { - return (CompositeByteBuf) super.readBytes(dst, length); + super.readBytes(dst, length); + return this; } @Override public CompositeByteBuf readBytes(ByteBuf dst, int dstIndex, int length) { - return (CompositeByteBuf) super.readBytes(dst, dstIndex, length); + super.readBytes(dst, dstIndex, length); + return this; } @Override public CompositeByteBuf readBytes(byte[] dst) { - return (CompositeByteBuf) super.readBytes(dst); + super.readBytes(dst, 0, dst.length); + return this; } @Override public CompositeByteBuf readBytes(byte[] dst, int dstIndex, int length) { - return (CompositeByteBuf) super.readBytes(dst, dstIndex, length); + super.readBytes(dst, dstIndex, length); + return this; } @Override public CompositeByteBuf readBytes(ByteBuffer dst) { - return (CompositeByteBuf) super.readBytes(dst); + super.readBytes(dst); + return this; } @Override public CompositeByteBuf readBytes(OutputStream out, int length) throws IOException { - return (CompositeByteBuf) super.readBytes(out, length); + super.readBytes(out, length); + return this; } @Override public CompositeByteBuf skipBytes(int length) { - return (CompositeByteBuf) super.skipBytes(length); + super.skipBytes(length); + return this; } @Override public CompositeByteBuf writeBoolean(boolean value) { - return (CompositeByteBuf) super.writeBoolean(value); + writeByte(value ? 1 : 0); + return this; } @Override public CompositeByteBuf writeByte(int value) { - return (CompositeByteBuf) super.writeByte(value); + ensureWritable0(1); + _setByte(writerIndex++, value); + return this; } @Override public CompositeByteBuf writeShort(int value) { - return (CompositeByteBuf) super.writeShort(value); + super.writeShort(value); + return this; } @Override public CompositeByteBuf writeMedium(int value) { - return (CompositeByteBuf) super.writeMedium(value); + super.writeMedium(value); + return this; } @Override public CompositeByteBuf writeInt(int value) { - return (CompositeByteBuf) super.writeInt(value); + super.writeInt(value); + return this; } @Override public CompositeByteBuf writeLong(long value) { - return (CompositeByteBuf) super.writeLong(value); + super.writeLong(value); + return this; } @Override public CompositeByteBuf writeChar(int value) { - return (CompositeByteBuf) super.writeChar(value); + super.writeShort(value); + return this; } @Override public CompositeByteBuf writeFloat(float value) { - return (CompositeByteBuf) super.writeFloat(value); + super.writeInt(Float.floatToRawIntBits(value)); + return this; } @Override public CompositeByteBuf writeDouble(double value) { - return (CompositeByteBuf) super.writeDouble(value); + super.writeLong(Double.doubleToRawLongBits(value)); + return this; } @Override public CompositeByteBuf writeBytes(ByteBuf src) { - return (CompositeByteBuf) super.writeBytes(src); + super.writeBytes(src, src.readableBytes()); + return this; } @Override public CompositeByteBuf writeBytes(ByteBuf src, int length) { - return (CompositeByteBuf) super.writeBytes(src, length); + super.writeBytes(src, length); + return this; } @Override public CompositeByteBuf writeBytes(ByteBuf src, int srcIndex, int length) { - return (CompositeByteBuf) super.writeBytes(src, srcIndex, length); + super.writeBytes(src, srcIndex, length); + return this; } @Override public CompositeByteBuf writeBytes(byte[] src) { - return (CompositeByteBuf) super.writeBytes(src); + super.writeBytes(src, 0, src.length); + return this; } @Override public CompositeByteBuf writeBytes(byte[] src, int srcIndex, int length) { - return (CompositeByteBuf) super.writeBytes(src, srcIndex, length); + super.writeBytes(src, srcIndex, length); + return this; } @Override public CompositeByteBuf writeBytes(ByteBuffer src) { - return (CompositeByteBuf) super.writeBytes(src); + super.writeBytes(src); + return this; } @Override public CompositeByteBuf writeZero(int length) { - return (CompositeByteBuf) super.writeZero(length); + super.writeZero(length); + return this; } @Override public CompositeByteBuf retain(int increment) { - return (CompositeByteBuf) super.retain(increment); + super.retain(increment); + return this; } @Override public CompositeByteBuf retain() { - return (CompositeByteBuf) super.retain(); + super.retain(); + return this; } @Override @@ -1934,21 +2222,25 @@ protected void deallocate() { } freed = true; - int size = components.size(); // We're not using foreach to avoid creating an iterator. // see https://github.com/netty/netty/issues/2642 - for (int i = 0; i < size; i++) { - components.get(i).freeIfNecessary(); + for (int i = 0, size = componentCount; i < size; i++) { + components[i].free(); } } + @Override + boolean isAccessible() { + return !freed; + } + @Override public ByteBuf unwrap() { return null; } private final class CompositeByteBufIterator implements Iterator { - private final int size = components.size(); + private final int size = numComponents(); private int index; @Override @@ -1958,14 +2250,14 @@ public boolean hasNext() { @Override public ByteBuf next() { - if (size != components.size()) { + if (size != numComponents()) { throw new ConcurrentModificationException(); } if (!hasNext()) { throw new NoSuchElementException(); } try { - return components.get(index++).buf; + return components[index++].slice(); } catch (IndexOutOfBoundsException e) { throw new ConcurrentModificationException(); } @@ -1977,16 +2269,59 @@ public void remove() { } } - private static final class ComponentList extends ArrayList { + // Component array manipulation - range checking omitted + + private void clearComps() { + removeCompRange(0, componentCount); + } + + private void removeComp(int i) { + removeCompRange(i, i + 1); + } - ComponentList(int initialCapacity) { - super(initialCapacity); + private void removeCompRange(int from, int to) { + if (from >= to) { + return; + } + final int size = componentCount; + assert from >= 0 && to <= size; + if (to < size) { + System.arraycopy(components, to, components, from, size - to); + } + int newSize = size - to + from; + for (int i = newSize; i < size; i++) { + components[i] = null; } + componentCount = newSize; + } - // Expose this methods so we not need to create a new subList just to remove a range of elements. - @Override - public void removeRange(int fromIndex, int toIndex) { - super.removeRange(fromIndex, toIndex); + private void addComp(int i, Component c) { + shiftComps(i, 1); + components[i] = c; + } + + private void shiftComps(int i, int count) { + final int size = componentCount, newSize = size + count; + assert i >= 0 && i <= size && count > 0; + if (newSize > components.length) { + // grow the array + int newArrSize = Math.max(size + (size >> 1), newSize); + Component[] newArr; + if (i == size) { + newArr = Arrays.copyOf(components, newArrSize, Component[].class); + } else { + newArr = new Component[newArrSize]; + if (i > 0) { + System.arraycopy(components, 0, newArr, 0, i); + } + if (i < size) { + System.arraycopy(components, i, newArr, i + count, size - i); + } + } + components = newArr; + } else if (i < size) { + System.arraycopy(components, i, components, i + count, size - i); } + componentCount = newSize; } } diff --git a/buffer/src/main/java/io/netty/buffer/DefaultByteBufHolder.java b/buffer/src/main/java/io/netty/buffer/DefaultByteBufHolder.java index 8198eaec942..da3fcf92838 100644 --- a/buffer/src/main/java/io/netty/buffer/DefaultByteBufHolder.java +++ b/buffer/src/main/java/io/netty/buffer/DefaultByteBufHolder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,8 @@ */ package io.netty.buffer; -import io.netty.util.IllegalReferenceCountException; +import static java.util.Objects.requireNonNull; + import io.netty.util.internal.StringUtil; /** @@ -27,18 +28,13 @@ public class DefaultByteBufHolder implements ByteBufHolder { private final ByteBuf data; public DefaultByteBufHolder(ByteBuf data) { - if (data == null) { - throw new NullPointerException("data"); - } + requireNonNull(data, "data"); this.data = data; } @Override public ByteBuf content() { - if (data.refCnt() <= 0) { - throw new IllegalReferenceCountException(data.refCnt()); - } - return data; + return ByteBufUtil.ensureAccessible(data); } /** @@ -135,13 +131,24 @@ public String toString() { return StringUtil.simpleClassName(this) + '(' + contentToString() + ')'; } + /** + * This implementation of the {@code equals} operation is restricted to + * work only with instances of the same class. The reason for that is that + * Netty library already has a number of classes that extend {@link DefaultByteBufHolder} and + * override {@code equals} method with an additional comparison logic and we + * need the symmetric property of the {@code equals} operation to be preserved. + * + * @param o the reference object with which to compare. + * @return {@code true} if this object is the same as the obj + * argument; {@code false} otherwise. + */ @Override public boolean equals(Object o) { if (this == o) { return true; } - if (o instanceof ByteBufHolder) { - return data.equals(((ByteBufHolder) o).content()); + if (o != null && getClass() == o.getClass()) { + return data.equals(((DefaultByteBufHolder) o).data); } return false; } diff --git a/buffer/src/main/java/io/netty/buffer/DuplicatedByteBuf.java b/buffer/src/main/java/io/netty/buffer/DuplicatedByteBuf.java index f48c6ceb98c..50f84611507 100644 --- a/buffer/src/main/java/io/netty/buffer/DuplicatedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/DuplicatedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -54,8 +54,6 @@ public DuplicatedByteBuf(ByteBuf buffer) { } setIndex(readerIndex, writerIndex); - markReaderIndex(); - markWriterIndex(); } @Override diff --git a/buffer/src/main/java/io/netty/buffer/EmptyByteBuf.java b/buffer/src/main/java/io/netty/buffer/EmptyByteBuf.java index ebe3b45391e..93fdc58a9d3 100644 --- a/buffer/src/main/java/io/netty/buffer/EmptyByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/EmptyByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,6 +16,9 @@ package io.netty.buffer; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import static java.util.Objects.requireNonNull; + import io.netty.util.ByteProcessor; import io.netty.util.internal.EmptyArrays; import io.netty.util.internal.PlatformDependent; @@ -36,6 +39,7 @@ */ public final class EmptyByteBuf extends ByteBuf { + static final int EMPTY_BYTE_BUF_HASH_CODE = 1; private static final ByteBuffer EMPTY_BYTE_BUFFER = ByteBuffer.allocateDirect(0); private static final long EMPTY_BYTE_BUFFER_ADDRESS; @@ -61,9 +65,7 @@ public EmptyByteBuf(ByteBufAllocator alloc) { } private EmptyByteBuf(ByteBufAllocator alloc, ByteOrder order) { - if (alloc == null) { - throw new NullPointerException("alloc"); - } + requireNonNull(alloc, "alloc"); this.alloc = alloc; this.order = order; @@ -117,9 +119,7 @@ public int maxCapacity() { @Override public ByteBuf order(ByteOrder endianness) { - if (endianness == null) { - throw new NullPointerException("endianness"); - } + requireNonNull(endianness, "endianness"); if (endianness == order()) { return this; } @@ -190,26 +190,6 @@ public ByteBuf clear() { return this; } - @Override - public ByteBuf markReaderIndex() { - return this; - } - - @Override - public ByteBuf resetReaderIndex() { - return this; - } - - @Override - public ByteBuf markWriterIndex() { - return this; - } - - @Override - public ByteBuf resetWriterIndex() { - return this; - } - @Override public ByteBuf discardReadBytes() { return this; @@ -222,9 +202,7 @@ public ByteBuf discardSomeReadBytes() { @Override public ByteBuf ensureWritable(int minWritableBytes) { - if (minWritableBytes < 0) { - throw new IllegalArgumentException("minWritableBytes: " + minWritableBytes + " (expected: >= 0)"); - } + checkPositiveOrZero(minWritableBytes, "minWritableBytes"); if (minWritableBytes != 0) { throw new IndexOutOfBoundsException(); } @@ -233,9 +211,7 @@ public ByteBuf ensureWritable(int minWritableBytes) { @Override public int ensureWritable(int minWritableBytes, boolean force) { - if (minWritableBytes < 0) { - throw new IllegalArgumentException("minWritableBytes: " + minWritableBytes + " (expected: >= 0)"); - } + checkPositiveOrZero(minWritableBytes, "minWritableBytes"); if (minWritableBytes == 0) { return 0; @@ -685,7 +661,7 @@ public int readBytes(FileChannel out, long position, int length) { @Override public CharSequence readCharSequence(int length, Charset charset) { checkLength(length); - return null; + return StringUtil.EMPTY_STRING; } @Override @@ -963,6 +939,11 @@ public long memoryAddress() { } } + @Override + public boolean isContiguous() { + return true; + } + @Override public String toString(Charset charset) { return ""; @@ -976,7 +957,7 @@ public String toString(int index, int length, Charset charset) { @Override public int hashCode() { - return 0; + return EMPTY_BYTE_BUF_HASH_CODE; } @Override @@ -1047,9 +1028,7 @@ private ByteBuf checkIndex(int index) { } private ByteBuf checkIndex(int index, int length) { - if (length < 0) { - throw new IllegalArgumentException("length: " + length); - } + checkPositiveOrZero(length, "length"); if (index != 0 || length != 0) { throw new IndexOutOfBoundsException(); } @@ -1057,9 +1036,7 @@ private ByteBuf checkIndex(int index, int length) { } private ByteBuf checkLength(int length) { - if (length < 0) { - throw new IllegalArgumentException("length: " + length + " (expected: >= 0)"); - } + checkPositiveOrZero(length, "length"); if (length != 0) { throw new IndexOutOfBoundsException(); } diff --git a/buffer/src/main/java/io/netty/buffer/FixedCompositeByteBuf.java b/buffer/src/main/java/io/netty/buffer/FixedCompositeByteBuf.java index bb933ce7b10..a7b39907e2a 100644 --- a/buffer/src/main/java/io/netty/buffer/FixedCompositeByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/FixedCompositeByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -39,7 +39,7 @@ final class FixedCompositeByteBuf extends AbstractReferenceCountedByteBuf { private final int capacity; private final ByteBufAllocator allocator; private final ByteOrder order; - private final Object[] buffers; + private final ByteBuf[] buffers; private final boolean direct; FixedCompositeByteBuf(ByteBufAllocator allocator, ByteBuf... buffers) { @@ -49,11 +49,10 @@ final class FixedCompositeByteBuf extends AbstractReferenceCountedByteBuf { order = ByteOrder.BIG_ENDIAN; nioBufferCount = 1; capacity = 0; - direct = false; + direct = Unpooled.EMPTY_BUFFER.isDirect(); } else { ByteBuf b = buffers[0]; - this.buffers = new Object[buffers.length]; - this.buffers[0] = b; + this.buffers = buffers; boolean direct = true; int nioBufferCount = b.nioBufferCount(); int capacity = b.readableBytes(); @@ -68,7 +67,6 @@ final class FixedCompositeByteBuf extends AbstractReferenceCountedByteBuf { if (!b.isDirect()) { direct = false; } - this.buffers[i] = b; } this.nioBufferCount = nioBufferCount; this.capacity = capacity; @@ -232,20 +230,14 @@ private Component findComponent(int index) { int readable = 0; for (int i = 0 ; i < buffers.length; i++) { Component comp = null; - ByteBuf b; - Object obj = buffers[i]; - boolean isBuffer; - if (obj instanceof ByteBuf) { - b = (ByteBuf) obj; - isBuffer = true; - } else { - comp = (Component) obj; + ByteBuf b = buffers[i]; + if (b instanceof Component) { + comp = (Component) b; b = comp.buf; - isBuffer = false; } readable += b.readableBytes(); if (index < readable) { - if (isBuffer) { + if (comp == null) { // Create a new component and store it in the array so it not create a new object // on the next access. comp = new Component(i, readable - b.readableBytes(), b); @@ -261,11 +253,8 @@ private Component findComponent(int index) { * Return the {@link ByteBuf} stored at the given index of the array. */ private ByteBuf buffer(int i) { - Object obj = buffers[i]; - if (obj instanceof ByteBuf) { - return (ByteBuf) obj; - } - return ((Component) obj).buf; + ByteBuf b = buffers[i]; + return b instanceof Component ? ((Component) b).buf : b; } @Override @@ -604,7 +593,7 @@ public ByteBuffer[] nioBuffers(int index, int length) { s = buffer(++i); } - return array.toArray(new ByteBuffer[array.size()]); + return array.toArray(new ByteBuffer[0]); } finally { array.recycle(); } @@ -612,27 +601,62 @@ public ByteBuffer[] nioBuffers(int index, int length) { @Override public boolean hasArray() { - return false; + switch (buffers.length) { + case 0: + return true; + case 1: + return buffer(0).hasArray(); + default: + return false; + } } @Override public byte[] array() { - throw new UnsupportedOperationException(); + switch (buffers.length) { + case 0: + return EmptyArrays.EMPTY_BYTES; + case 1: + return buffer(0).array(); + default: + throw new UnsupportedOperationException(); + } } @Override public int arrayOffset() { - throw new UnsupportedOperationException(); + switch (buffers.length) { + case 0: + return 0; + case 1: + return buffer(0).arrayOffset(); + default: + throw new UnsupportedOperationException(); + } } @Override public boolean hasMemoryAddress() { - return false; + switch (buffers.length) { + case 0: + return Unpooled.EMPTY_BUFFER.hasMemoryAddress(); + case 1: + return buffer(0).hasMemoryAddress(); + default: + return false; + } } @Override public long memoryAddress() { - throw new UnsupportedOperationException(); + switch (buffers.length) { + case 0: + return Unpooled.EMPTY_BUFFER.memoryAddress(); + case 1: + return buffer(0).memoryAddress(); + default: + throw new UnsupportedOperationException(); + } } @Override @@ -649,17 +673,16 @@ public String toString() { return result + ", components=" + buffers.length + ')'; } - private static final class Component { + private static final class Component extends WrappedByteBuf { private final int index; private final int offset; - private final ByteBuf buf; private final int endOffset; Component(int index, int offset, ByteBuf buf) { + super(buf); this.index = index; this.offset = offset; endOffset = offset + buf.readableBytes(); - this.buf = buf; } } } diff --git a/buffer/src/main/java/io/netty/buffer/HeapByteBufUtil.java b/buffer/src/main/java/io/netty/buffer/HeapByteBufUtil.java index abb93cda74e..9f7972a2737 100644 --- a/buffer/src/main/java/io/netty/buffer/HeapByteBufUtil.java +++ b/buffer/src/main/java/io/netty/buffer/HeapByteBufUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/PoolArena.java b/buffer/src/main/java/io/netty/buffer/PoolArena.java index 465315b6744..7deb7d89624 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolArena.java +++ b/buffer/src/main/java/io/netty/buffer/PoolArena.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,7 +16,6 @@ package io.netty.buffer; -import io.netty.util.internal.LongCounter; import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.StringUtil; @@ -25,31 +24,23 @@ import java.util.Collections; import java.util.List; import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.LongAdder; +import static io.netty.buffer.PoolChunk.isSubpage; import static java.lang.Math.max; -abstract class PoolArena implements PoolArenaMetric { +abstract class PoolArena extends SizeClasses implements PoolArenaMetric { static final boolean HAS_UNSAFE = PlatformDependent.hasUnsafe(); enum SizeClass { - Tiny, Small, Normal } - static final int numTinySubpagePools = 512 >>> 4; - final PooledByteBufAllocator parent; - private final int maxOrder; - final int pageSize; - final int pageShifts; - final int chunkSize; - final int subpageOverflowMask; final int numSmallSubpagePools; final int directMemoryCacheAlignment; - final int directMemoryCacheAlignmentMask; - private final PoolSubpage[] tinySubpagePools; private final PoolSubpage[] smallSubpagePools; private final PoolChunkList q050; @@ -63,18 +54,17 @@ enum SizeClass { // Metrics for allocations and deallocations private long allocationsNormal; - // We need to use the LongCounter here as this is not guarded via synchronized block. - private final LongCounter allocationsTiny = PlatformDependent.newLongCounter(); - private final LongCounter allocationsSmall = PlatformDependent.newLongCounter(); - private final LongCounter allocationsHuge = PlatformDependent.newLongCounter(); - private final LongCounter activeBytesHuge = PlatformDependent.newLongCounter(); - private long deallocationsTiny; + // We need to use the LongAdder here as this is not guarded via synchronized block. + private final LongAdder allocationsSmall = new LongAdder(); + private final LongAdder allocationsHuge = new LongAdder(); + private final LongAdder activeBytesHuge = new LongAdder(); + private long deallocationsSmall; private long deallocationsNormal; - // We need to use the LongCounter here as this is not guarded via synchronized block. - private final LongCounter deallocationsHuge = PlatformDependent.newLongCounter(); + // We need to use the LongAdder here as this is not guarded via synchronized block. + private final LongAdder deallocationsHuge = new LongAdder(); // Number of thread caches backed by this arena. final AtomicInteger numThreadCaches = new AtomicInteger(); @@ -83,32 +73,23 @@ enum SizeClass { //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; protected PoolArena(PooledByteBufAllocator parent, int pageSize, - int maxOrder, int pageShifts, int chunkSize, int cacheAlignment) { + int pageShifts, int chunkSize, int cacheAlignment) { + super(pageSize, pageShifts, chunkSize, cacheAlignment); this.parent = parent; - this.pageSize = pageSize; - this.maxOrder = maxOrder; - this.pageShifts = pageShifts; - this.chunkSize = chunkSize; directMemoryCacheAlignment = cacheAlignment; - directMemoryCacheAlignmentMask = cacheAlignment - 1; - subpageOverflowMask = ~(pageSize - 1); - tinySubpagePools = newSubpagePoolArray(numTinySubpagePools); - for (int i = 0; i < tinySubpagePools.length; i ++) { - tinySubpagePools[i] = newSubpagePoolHead(pageSize); - } - numSmallSubpagePools = pageShifts - 9; + numSmallSubpagePools = nSubpages; smallSubpagePools = newSubpagePoolArray(numSmallSubpagePools); for (int i = 0; i < smallSubpagePools.length; i ++) { - smallSubpagePools[i] = newSubpagePoolHead(pageSize); + smallSubpagePools[i] = newSubpagePoolHead(); } - q100 = new PoolChunkList(this, null, 100, Integer.MAX_VALUE, chunkSize); - q075 = new PoolChunkList(this, q100, 75, 100, chunkSize); - q050 = new PoolChunkList(this, q075, 50, 100, chunkSize); - q025 = new PoolChunkList(this, q050, 25, 75, chunkSize); - q000 = new PoolChunkList(this, q025, 1, 50, chunkSize); - qInit = new PoolChunkList(this, q000, Integer.MIN_VALUE, 25, chunkSize); + q100 = new PoolChunkList<>(this, null, 100, Integer.MAX_VALUE, chunkSize); + q075 = new PoolChunkList<>(this, q100, 75, 100, chunkSize); + q050 = new PoolChunkList<>(this, q075, 50, 100, chunkSize); + q025 = new PoolChunkList<>(this, q050, 25, 75, chunkSize); + q000 = new PoolChunkList<>(this, q025, 1, 50, chunkSize); + qInit = new PoolChunkList<>(this, q000, Integer.MIN_VALUE, 25, chunkSize); q100.prevList(q075); q075.prevList(q050); @@ -117,7 +98,7 @@ protected PoolArena(PooledByteBufAllocator parent, int pageSize, q000.prevList(null); qInit.prevList(qInit); - List metrics = new ArrayList(6); + List metrics = new ArrayList<>(6); metrics.add(qInit); metrics.add(q000); metrics.add(q025); @@ -127,8 +108,8 @@ protected PoolArena(PooledByteBufAllocator parent, int pageSize, chunkListMetrics = Collections.unmodifiableList(metrics); } - private PoolSubpage newSubpagePoolHead(int pageSize) { - PoolSubpage head = new PoolSubpage(pageSize); + private PoolSubpage newSubpagePoolHead() { + PoolSubpage head = new PoolSubpage<>(); head.prev = head; head.next = head; return head; @@ -147,113 +128,86 @@ PooledByteBuf allocate(PoolThreadCache cache, int reqCapacity, int maxCapacit return buf; } - static int tinyIdx(int normCapacity) { - return normCapacity >>> 4; - } + private void allocate(PoolThreadCache cache, PooledByteBuf buf, final int reqCapacity) { + final int sizeIdx = size2SizeIdx(reqCapacity); - static int smallIdx(int normCapacity) { - int tableIdx = 0; - int i = normCapacity >>> 10; - while (i != 0) { - i >>>= 1; - tableIdx ++; + if (sizeIdx <= smallMaxSizeIdx) { + tcacheAllocateSmall(cache, buf, reqCapacity, sizeIdx); + } else if (sizeIdx < nSizes) { + tcacheAllocateNormal(cache, buf, reqCapacity, sizeIdx); + } else { + int normCapacity = directMemoryCacheAlignment > 0 + ? normalizeSize(reqCapacity) : reqCapacity; + // Huge allocations are never served via the cache so just call allocateHuge + allocateHuge(buf, normCapacity); } - return tableIdx; } - // capacity < pageSize - boolean isTinyOrSmall(int normCapacity) { - return (normCapacity & subpageOverflowMask) == 0; - } + private void tcacheAllocateSmall(PoolThreadCache cache, PooledByteBuf buf, final int reqCapacity, + final int sizeIdx) { - // normCapacity < 512 - static boolean isTiny(int normCapacity) { - return (normCapacity & 0xFFFFFE00) == 0; - } + if (cache.allocateSmall(this, buf, reqCapacity, sizeIdx)) { + // was able to allocate out of the cache so move on + return; + } - private void allocate(PoolThreadCache cache, PooledByteBuf buf, final int reqCapacity) { - final int normCapacity = normalizeCapacity(reqCapacity); - if (isTinyOrSmall(normCapacity)) { // capacity < pageSize - int tableIdx; - PoolSubpage[] table; - boolean tiny = isTiny(normCapacity); - if (tiny) { // < 512 - if (cache.allocateTiny(this, buf, reqCapacity, normCapacity)) { - // was able to allocate out of the cache so move on - return; - } - tableIdx = tinyIdx(normCapacity); - table = tinySubpagePools; - } else { - if (cache.allocateSmall(this, buf, reqCapacity, normCapacity)) { - // was able to allocate out of the cache so move on - return; - } - tableIdx = smallIdx(normCapacity); - table = smallSubpagePools; + /* + * Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and + * {@link PoolChunk#free(long)} may modify the doubly linked list as well. + */ + final PoolSubpage head = smallSubpagePools[sizeIdx]; + final boolean needsNormalAllocation; + synchronized (head) { + final PoolSubpage s = head.next; + needsNormalAllocation = s == head; + if (!needsNormalAllocation) { + assert s.doNotDestroy && s.elemSize == sizeIdx2size(sizeIdx); + long handle = s.allocate(); + assert handle >= 0; + s.chunk.initBufWithSubpage(buf, null, handle, reqCapacity, cache); } + } - final PoolSubpage head = table[tableIdx]; - - /** - * Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and - * {@link PoolChunk#free(long)} may modify the doubly linked list as well. - */ - synchronized (head) { - final PoolSubpage s = head.next; - if (s != head) { - assert s.doNotDestroy && s.elemSize == normCapacity; - long handle = s.allocate(); - assert handle >= 0; - s.chunk.initBufWithSubpage(buf, handle, reqCapacity); - incTinySmallAllocation(tiny); - return; - } - } + if (needsNormalAllocation) { synchronized (this) { - allocateNormal(buf, reqCapacity, normCapacity); + allocateNormal(buf, reqCapacity, sizeIdx, cache); } + } + + incSmallAllocation(); + } - incTinySmallAllocation(tiny); + private void tcacheAllocateNormal(PoolThreadCache cache, PooledByteBuf buf, final int reqCapacity, + final int sizeIdx) { + if (cache.allocateNormal(this, buf, reqCapacity, sizeIdx)) { + // was able to allocate out of the cache so move on return; } - if (normCapacity <= chunkSize) { - if (cache.allocateNormal(this, buf, reqCapacity, normCapacity)) { - // was able to allocate out of the cache so move on - return; - } - synchronized (this) { - allocateNormal(buf, reqCapacity, normCapacity); - ++allocationsNormal; - } - } else { - // Huge allocations are never served via the cache so just call allocateHuge - allocateHuge(buf, reqCapacity); + synchronized (this) { + allocateNormal(buf, reqCapacity, sizeIdx, cache); + ++allocationsNormal; } } - // Method must be called inside synchronized(this) { ... } block - private void allocateNormal(PooledByteBuf buf, int reqCapacity, int normCapacity) { - if (q050.allocate(buf, reqCapacity, normCapacity) || q025.allocate(buf, reqCapacity, normCapacity) || - q000.allocate(buf, reqCapacity, normCapacity) || qInit.allocate(buf, reqCapacity, normCapacity) || - q075.allocate(buf, reqCapacity, normCapacity)) { + // Method must be called inside synchronized(this) { ... } block + private void allocateNormal(PooledByteBuf buf, int reqCapacity, int sizeIdx, PoolThreadCache threadCache) { + if (q050.allocate(buf, reqCapacity, sizeIdx, threadCache) || + q025.allocate(buf, reqCapacity, sizeIdx, threadCache) || + q000.allocate(buf, reqCapacity, sizeIdx, threadCache) || + qInit.allocate(buf, reqCapacity, sizeIdx, threadCache) || + q075.allocate(buf, reqCapacity, sizeIdx, threadCache)) { return; } // Add a new chunk. - PoolChunk c = newChunk(pageSize, maxOrder, pageShifts, chunkSize); - long handle = c.allocate(normCapacity); - assert handle > 0; - c.initBuf(buf, handle, reqCapacity); + PoolChunk c = newChunk(pageSize, nPSizes, pageShifts, chunkSize); + boolean success = c.allocate(buf, reqCapacity, sizeIdx, threadCache); + assert success; qInit.add(c); } - private void incTinySmallAllocation(boolean tiny) { - if (tiny) { - allocationsTiny.increment(); - } else { - allocationsSmall.increment(); - } + private void incSmallAllocation() { + allocationsSmall.increment(); } private void allocateHuge(PooledByteBuf buf, int reqCapacity) { @@ -263,47 +217,46 @@ private void allocateHuge(PooledByteBuf buf, int reqCapacity) { allocationsHuge.increment(); } - void free(PoolChunk chunk, long handle, int normCapacity, PoolThreadCache cache) { + void free(PoolChunk chunk, ByteBuffer nioBuffer, long handle, int normCapacity, PoolThreadCache cache) { if (chunk.unpooled) { int size = chunk.chunkSize(); destroyChunk(chunk); activeBytesHuge.add(-size); deallocationsHuge.increment(); } else { - SizeClass sizeClass = sizeClass(normCapacity); - if (cache != null && cache.add(this, chunk, handle, normCapacity, sizeClass)) { + SizeClass sizeClass = sizeClass(handle); + if (cache != null && cache.add(this, chunk, nioBuffer, handle, normCapacity, sizeClass)) { // cached so not free it. return; } - freeChunk(chunk, handle, sizeClass); + freeChunk(chunk, handle, normCapacity, sizeClass, nioBuffer, false); } } - private SizeClass sizeClass(int normCapacity) { - if (!isTinyOrSmall(normCapacity)) { - return SizeClass.Normal; - } - return isTiny(normCapacity) ? SizeClass.Tiny : SizeClass.Small; + private static SizeClass sizeClass(long handle) { + return isSubpage(handle) ? SizeClass.Small : SizeClass.Normal; } - void freeChunk(PoolChunk chunk, long handle, SizeClass sizeClass) { + void freeChunk(PoolChunk chunk, long handle, int normCapacity, SizeClass sizeClass, ByteBuffer nioBuffer, + boolean finalizer) { final boolean destroyChunk; synchronized (this) { - switch (sizeClass) { - case Normal: - ++deallocationsNormal; - break; - case Small: - ++deallocationsSmall; - break; - case Tiny: - ++deallocationsTiny; - break; - default: - throw new Error(); + // We only call this if freeChunk is not called because of the PoolThreadCache finalizer as otherwise this + // may fail due lazy class-loading in for example tomcat. + if (!finalizer) { + switch (sizeClass) { + case Normal: + ++deallocationsNormal; + break; + case Small: + ++deallocationsSmall; + break; + default: + throw new Error(); + } } - destroyChunk = !chunk.parent.free(chunk, handle); + destroyChunk = !chunk.parent.free(chunk, handle, normCapacity, nioBuffer); } if (destroyChunk) { // destroyChunk not need to be called while holding the synchronized lock. @@ -311,75 +264,12 @@ void freeChunk(PoolChunk chunk, long handle, SizeClass sizeClass) { } } - PoolSubpage findSubpagePoolHead(int elemSize) { - int tableIdx; - PoolSubpage[] table; - if (isTiny(elemSize)) { // < 512 - tableIdx = elemSize >>> 4; - table = tinySubpagePools; - } else { - tableIdx = 0; - elemSize >>>= 10; - while (elemSize != 0) { - elemSize >>>= 1; - tableIdx ++; - } - table = smallSubpagePools; - } - - return table[tableIdx]; - } - - int normalizeCapacity(int reqCapacity) { - if (reqCapacity < 0) { - throw new IllegalArgumentException("capacity: " + reqCapacity + " (expected: 0+)"); - } - - if (reqCapacity >= chunkSize) { - return directMemoryCacheAlignment == 0 ? reqCapacity : alignCapacity(reqCapacity); - } - - if (!isTiny(reqCapacity)) { // >= 512 - // Doubled - - int normalizedCapacity = reqCapacity; - normalizedCapacity --; - normalizedCapacity |= normalizedCapacity >>> 1; - normalizedCapacity |= normalizedCapacity >>> 2; - normalizedCapacity |= normalizedCapacity >>> 4; - normalizedCapacity |= normalizedCapacity >>> 8; - normalizedCapacity |= normalizedCapacity >>> 16; - normalizedCapacity ++; - - if (normalizedCapacity < 0) { - normalizedCapacity >>>= 1; - } - assert directMemoryCacheAlignment == 0 || (normalizedCapacity & directMemoryCacheAlignmentMask) == 0; - - return normalizedCapacity; - } - - if (directMemoryCacheAlignment > 0) { - return alignCapacity(reqCapacity); - } - - // Quantum-spaced - if ((reqCapacity & 15) == 0) { - return reqCapacity; - } - - return (reqCapacity & ~15) + 16; - } - - int alignCapacity(int reqCapacity) { - int delta = reqCapacity & directMemoryCacheAlignmentMask; - return delta == 0 ? reqCapacity : reqCapacity + directMemoryCacheAlignment - delta; + PoolSubpage findSubpagePoolHead(int sizeIdx) { + return smallSubpagePools[sizeIdx]; } void reallocate(PooledByteBuf buf, int newCapacity, boolean freeOldMemory) { - if (newCapacity < 0 || newCapacity > buf.maxCapacity()) { - throw new IllegalArgumentException("newCapacity: " + newCapacity); - } + assert newCapacity >= 0 && newCapacity <= buf.maxCapacity(); int oldCapacity = buf.length; if (oldCapacity == newCapacity) { @@ -387,35 +277,24 @@ void reallocate(PooledByteBuf buf, int newCapacity, boolean freeOldMemory) { } PoolChunk oldChunk = buf.chunk; + ByteBuffer oldNioBuffer = buf.tmpNioBuf; long oldHandle = buf.handle; T oldMemory = buf.memory; int oldOffset = buf.offset; int oldMaxLength = buf.maxLength; - int readerIndex = buf.readerIndex(); - int writerIndex = buf.writerIndex(); + // This does not touch buf's reader/writer indices allocate(parent.threadCache(), buf, newCapacity); + int bytesToCopy; if (newCapacity > oldCapacity) { - memoryCopy( - oldMemory, oldOffset, - buf.memory, buf.offset, oldCapacity); - } else if (newCapacity < oldCapacity) { - if (readerIndex < newCapacity) { - if (writerIndex > newCapacity) { - writerIndex = newCapacity; - } - memoryCopy( - oldMemory, oldOffset + readerIndex, - buf.memory, buf.offset + readerIndex, writerIndex - readerIndex); - } else { - readerIndex = writerIndex = newCapacity; - } + bytesToCopy = oldCapacity; + } else { + buf.trimIndicesToCapacity(newCapacity); + bytesToCopy = newCapacity; } - - buf.setIndex(readerIndex, writerIndex); - + memoryCopy(oldMemory, oldOffset, buf, bytesToCopy); if (freeOldMemory) { - free(oldChunk, oldHandle, oldMaxLength, buf.cache); + free(oldChunk, oldNioBuffer, oldHandle, oldMaxLength, buf.cache); } } @@ -426,7 +305,7 @@ public int numThreadCaches() { @Override public int numTinySubpages() { - return tinySubpagePools.length; + return 0; } @Override @@ -441,7 +320,7 @@ public int numChunkLists() { @Override public List tinySubpages() { - return subPageMetricList(tinySubpagePools); + return Collections.emptyList(); } @Override @@ -455,19 +334,16 @@ public List chunkLists() { } private static List subPageMetricList(PoolSubpage[] pages) { - List metrics = new ArrayList(); + List metrics = new ArrayList<>(); for (PoolSubpage head : pages) { if (head.next == head) { continue; } PoolSubpage s = head.next; - for (;;) { + do { metrics.add(s); s = s.next; - if (s == head) { - break; - } - } + } while (s != head); } return metrics; } @@ -478,17 +354,18 @@ public long numAllocations() { synchronized (this) { allocsNormal = allocationsNormal; } - return allocationsTiny.value() + allocationsSmall.value() + allocsNormal + allocationsHuge.value(); + + return allocationsSmall.longValue() + allocsNormal + allocationsHuge.longValue(); } @Override public long numTinyAllocations() { - return allocationsTiny.value(); + return 0; } @Override public long numSmallAllocations() { - return allocationsSmall.value(); + return allocationsSmall.longValue(); } @Override @@ -500,14 +377,14 @@ public synchronized long numNormalAllocations() { public long numDeallocations() { final long deallocs; synchronized (this) { - deallocs = deallocationsTiny + deallocationsSmall + deallocationsNormal; + deallocs = deallocationsSmall + deallocationsNormal; } - return deallocs + deallocationsHuge.value(); + return deallocs + deallocationsHuge.longValue(); } @Override - public synchronized long numTinyDeallocations() { - return deallocationsTiny; + public long numTinyDeallocations() { + return 0; } @Override @@ -522,27 +399,28 @@ public synchronized long numNormalDeallocations() { @Override public long numHugeAllocations() { - return allocationsHuge.value(); + return allocationsHuge.longValue(); } @Override public long numHugeDeallocations() { - return deallocationsHuge.value(); + return deallocationsHuge.longValue(); } @Override public long numActiveAllocations() { - long val = allocationsTiny.value() + allocationsSmall.value() + allocationsHuge.value() - - deallocationsHuge.value(); + + long val = allocationsSmall.longValue() + allocationsHuge.longValue() + - deallocationsHuge.longValue(); synchronized (this) { - val += allocationsNormal - (deallocationsTiny + deallocationsSmall + deallocationsNormal); + val += allocationsNormal - (deallocationsSmall + deallocationsNormal); } return max(val, 0); } @Override public long numActiveTinyAllocations() { - return max(numTinyAllocations() - numTinyDeallocations(), 0); + return 0; } @Override @@ -566,7 +444,7 @@ public long numActiveHugeAllocations() { @Override public long numActiveBytes() { - long val = activeBytesHuge.value(); + long val = activeBytesHuge.longValue(); synchronized (this) { for (int i = 0; i < chunkListMetrics.size(); i++) { for (PoolChunkMetric m: chunkListMetrics.get(i)) { @@ -577,10 +455,10 @@ public long numActiveBytes() { return max(0, val); } - protected abstract PoolChunk newChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize); + protected abstract PoolChunk newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize); protected abstract PoolChunk newUnpooledChunk(int capacity); protected abstract PooledByteBuf newByteBuf(int maxCapacity); - protected abstract void memoryCopy(T src, int srcOffset, T dst, int dstOffset, int length); + protected abstract void memoryCopy(T src, int srcOffset, PooledByteBuf dst, int length); protected abstract void destroyChunk(PoolChunk chunk); @Override @@ -610,10 +488,7 @@ public synchronized String toString() { .append(StringUtil.NEWLINE) .append(q100) .append(StringUtil.NEWLINE) - .append("tiny subpages:"); - appendPoolSubPages(buf, tinySubpagePools); - buf.append(StringUtil.NEWLINE) - .append("small subpages:"); + .append("small subpages:"); appendPoolSubPages(buf, smallSubpagePools); buf.append(StringUtil.NEWLINE); @@ -631,13 +506,10 @@ private static void appendPoolSubPages(StringBuilder buf, PoolSubpage[] subpa .append(i) .append(": "); PoolSubpage s = head.next; - for (;;) { + do { buf.append(s); s = s.next; - if (s == head) { - break; - } - } + } while (s != head); } } @@ -647,7 +519,6 @@ protected final void finalize() throws Throwable { super.finalize(); } finally { destroyPoolSubPages(smallSubpagePools); - destroyPoolSubPages(tinySubpagePools); destroyPoolChunkLists(qInit, q000, q025, q050, q075, q100); } } @@ -666,10 +537,10 @@ private void destroyPoolChunkLists(PoolChunkList... chunkLists) { static final class HeapArena extends PoolArena { - HeapArena(PooledByteBufAllocator parent, int pageSize, int maxOrder, - int pageShifts, int chunkSize, int directMemoryCacheAlignment) { - super(parent, pageSize, maxOrder, pageShifts, chunkSize, - directMemoryCacheAlignment); + HeapArena(PooledByteBufAllocator parent, int pageSize, int pageShifts, + int chunkSize, int directMemoryCacheAlignment) { + super(parent, pageSize, pageShifts, chunkSize, + directMemoryCacheAlignment); } private static byte[] newByteArray(int size) { @@ -682,13 +553,14 @@ boolean isDirect() { } @Override - protected PoolChunk newChunk(int pageSize, int maxOrder, int pageShifts, int chunkSize) { - return new PoolChunk(this, newByteArray(chunkSize), pageSize, maxOrder, pageShifts, chunkSize, 0); + protected PoolChunk newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) { + return new PoolChunk<>( + this, null, newByteArray(chunkSize), pageSize, pageShifts, chunkSize, maxPageIdx); } @Override protected PoolChunk newUnpooledChunk(int capacity) { - return new PoolChunk(this, newByteArray(capacity), capacity, 0); + return new PoolChunk<>(this, null, newByteArray(capacity), capacity); } @Override @@ -703,21 +575,21 @@ protected PooledByteBuf newByteBuf(int maxCapacity) { } @Override - protected void memoryCopy(byte[] src, int srcOffset, byte[] dst, int dstOffset, int length) { + protected void memoryCopy(byte[] src, int srcOffset, PooledByteBuf dst, int length) { if (length == 0) { return; } - System.arraycopy(src, srcOffset, dst, dstOffset, length); + System.arraycopy(src, srcOffset, dst.memory, dst.offset, length); } } static final class DirectArena extends PoolArena { - DirectArena(PooledByteBufAllocator parent, int pageSize, int maxOrder, - int pageShifts, int chunkSize, int directMemoryCacheAlignment) { - super(parent, pageSize, maxOrder, pageShifts, chunkSize, - directMemoryCacheAlignment); + DirectArena(PooledByteBufAllocator parent, int pageSize, int pageShifts, + int chunkSize, int directMemoryCacheAlignment) { + super(parent, pageSize, pageShifts, chunkSize, + directMemoryCacheAlignment); } @Override @@ -725,38 +597,31 @@ boolean isDirect() { return true; } - private int offsetCacheLine(ByteBuffer memory) { - // We can only calculate the offset if Unsafe is present as otherwise directBufferAddress(...) will - // throw an NPE. - return HAS_UNSAFE ? - (int) (PlatformDependent.directBufferAddress(memory) & directMemoryCacheAlignmentMask) : 0; - } - @Override - protected PoolChunk newChunk(int pageSize, int maxOrder, - int pageShifts, int chunkSize) { + protected PoolChunk newChunk(int pageSize, int maxPageIdx, + int pageShifts, int chunkSize) { if (directMemoryCacheAlignment == 0) { - return new PoolChunk(this, - allocateDirect(chunkSize), pageSize, maxOrder, - pageShifts, chunkSize, 0); + ByteBuffer memory = allocateDirect(chunkSize); + return new PoolChunk<>(this, memory, memory, pageSize, pageShifts, + chunkSize, maxPageIdx); } - final ByteBuffer memory = allocateDirect(chunkSize - + directMemoryCacheAlignment); - return new PoolChunk(this, memory, pageSize, - maxOrder, pageShifts, chunkSize, - offsetCacheLine(memory)); + + final ByteBuffer base = allocateDirect(chunkSize + directMemoryCacheAlignment); + final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, directMemoryCacheAlignment); + return new PoolChunk<>(this, base, memory, pageSize, + pageShifts, chunkSize, maxPageIdx); } @Override protected PoolChunk newUnpooledChunk(int capacity) { if (directMemoryCacheAlignment == 0) { - return new PoolChunk(this, - allocateDirect(capacity), capacity, 0); + ByteBuffer memory = allocateDirect(capacity); + return new PoolChunk<>(this, memory, memory, capacity); } - final ByteBuffer memory = allocateDirect(capacity - + directMemoryCacheAlignment); - return new PoolChunk(this, memory, capacity, - offsetCacheLine(memory)); + + final ByteBuffer base = allocateDirect(capacity + directMemoryCacheAlignment); + final ByteBuffer memory = PlatformDependent.alignDirectBuffer(base, directMemoryCacheAlignment); + return new PoolChunk<>(this, base, memory, capacity); } private static ByteBuffer allocateDirect(int capacity) { @@ -767,9 +632,9 @@ private static ByteBuffer allocateDirect(int capacity) { @Override protected void destroyChunk(PoolChunk chunk) { if (PlatformDependent.useDirectBufferNoCleaner()) { - PlatformDependent.freeDirectNoCleaner(chunk.memory); + PlatformDependent.freeDirectNoCleaner((ByteBuffer) chunk.base); } else { - PlatformDependent.freeDirectBuffer(chunk.memory); + PlatformDependent.freeDirectBuffer((ByteBuffer) chunk.base); } } @@ -783,7 +648,7 @@ protected PooledByteBuf newByteBuf(int maxCapacity) { } @Override - protected void memoryCopy(ByteBuffer src, int srcOffset, ByteBuffer dst, int dstOffset, int length) { + protected void memoryCopy(ByteBuffer src, int srcOffset, PooledByteBuf dstBuf, int length) { if (length == 0) { return; } @@ -791,13 +656,13 @@ protected void memoryCopy(ByteBuffer src, int srcOffset, ByteBuffer dst, int dst if (HAS_UNSAFE) { PlatformDependent.copyMemory( PlatformDependent.directBufferAddress(src) + srcOffset, - PlatformDependent.directBufferAddress(dst) + dstOffset, length); + PlatformDependent.directBufferAddress(dstBuf.memory) + dstBuf.offset, length); } else { // We must duplicate the NIO buffers because they may be accessed by other Netty buffers. src = src.duplicate(); - dst = dst.duplicate(); + ByteBuffer dst = dstBuf.internalNioBuffer(); src.position(srcOffset).limit(srcOffset + length); - dst.position(dstOffset); + dst.position(dstBuf.offset); dst.put(src); } } diff --git a/buffer/src/main/java/io/netty/buffer/PoolArenaMetric.java b/buffer/src/main/java/io/netty/buffer/PoolArenaMetric.java index d3281f3bee4..b11a3c4f1a8 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolArenaMetric.java +++ b/buffer/src/main/java/io/netty/buffer/PoolArenaMetric.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ /** * Expose metrics for an arena. */ -public interface PoolArenaMetric { +public interface PoolArenaMetric extends SizeClassesMetric { /** * Returns the number of thread caches backed by this arena. @@ -30,7 +30,10 @@ public interface PoolArenaMetric { /** * Returns the number of tiny sub-pages for the arena. + * + * @deprecated Tiny sub-pages have been merged into small sub-pages. */ + @Deprecated int numTinySubpages(); /** @@ -45,7 +48,10 @@ public interface PoolArenaMetric { /** * Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for tiny sub-pages. + * + * @deprecated Tiny sub-pages have been merged into small sub-pages. */ + @Deprecated List tinySubpages(); /** @@ -65,7 +71,10 @@ public interface PoolArenaMetric { /** * Return the number of tiny allocations done via the arena. + * + * @deprecated Tiny allocations have been merged into small allocations. */ + @Deprecated long numTinyAllocations(); /** @@ -90,7 +99,10 @@ public interface PoolArenaMetric { /** * Return the number of tiny deallocations done via the arena. + * + * @deprecated Tiny deallocations have been merged into small deallocations. */ + @Deprecated long numTinyDeallocations(); /** @@ -115,7 +127,10 @@ public interface PoolArenaMetric { /** * Return the number of currently active tiny allocations. + * + * @deprecated Tiny allocations have been merged into small allocations. */ + @Deprecated long numActiveTinyAllocations(); /** diff --git a/buffer/src/main/java/io/netty/buffer/PoolChunk.java b/buffer/src/main/java/io/netty/buffer/PoolChunk.java index b3ca160223a..9fb4bde7f93 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolChunk.java +++ b/buffer/src/main/java/io/netty/buffer/PoolChunk.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -13,16 +13,24 @@ * License for the specific language governing permissions and limitations * under the License. */ - package io.netty.buffer; +import io.netty.util.internal.LongLongHashMap; +import io.netty.util.internal.LongPriorityQueue; + +import java.nio.ByteBuffer; +import java.util.ArrayDeque; +import java.util.Deque; +import java.util.PriorityQueue; + /** * Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk * * Notation: The following terms are important to understand the code * > page - a page is the smallest unit of memory chunk that can be allocated - * > chunk - a chunk is a collection of pages - * > in this code chunkSize = 2^{maxOrder} * pageSize + * > run - a run is a collection of pages + * > chunk - a chunk is a collection of runs + * > in this code chunkSize = maxPages * pageSize * * To begin we allocate a byte array of size = chunkSize * Whenever a ByteBuf of given size needs to be created we search for the first position @@ -30,100 +38,143 @@ * return a (long) handle that encodes this offset information, (this memory segment is then * marked as reserved so it is always used by exactly one ByteBuf and no more) * - * For simplicity all sizes are normalized according to PoolArena#normalizeCapacity method - * This ensures that when we request for memory segments of size >= pageSize the normalizedCapacity - * equals the next nearest power of 2 + * For simplicity all sizes are normalized according to {@link PoolArena#size2SizeIdx(int)} method. + * This ensures that when we request for memory segments of size > pageSize the normalizedCapacity + * equals the next nearest size in {@link SizeClasses}. + * + * + * A chunk has the following layout: + * + * /-----------------\ + * | run | + * | | + * | | + * |-----------------| + * | run | + * | | + * |-----------------| + * | unalloctated | + * | (freed) | + * | | + * |-----------------| + * | subpage | + * |-----------------| + * | unallocated | + * | (freed) | + * | ... | + * | ... | + * | ... | + * | | + * | | + * | | + * \-----------------/ + * + * + * handle: + * ------- + * a handle is a long number, the bit layout of a run looks like: * - * To search for the first offset in chunk that has at least requested size available we construct a - * complete balanced binary tree and store it in an array (just like heaps) - memoryMap + * oooooooo ooooooos ssssssss ssssssue bbbbbbbb bbbbbbbb bbbbbbbb bbbbbbbb * - * The tree looks like this (the size of each node being mentioned in the parenthesis) + * o: runOffset (page offset in the chunk), 15bit + * s: size (number of pages) of this run, 15bit + * u: isUsed?, 1bit + * e: isSubpage?, 1bit + * b: bitmapIdx of subpage, zero if it's not subpage, 32bit * - * depth=0 1 node (chunkSize) - * depth=1 2 nodes (chunkSize/2) - * .. - * .. - * depth=d 2^d nodes (chunkSize/2^d) - * .. - * depth=maxOrder 2^maxOrder nodes (chunkSize/2^{maxOrder} = pageSize) + * runsAvailMap: + * ------ + * a map which manages all runs (used and not in used). + * For each run, the first runOffset and last runOffset are stored in runsAvailMap. + * key: runOffset + * value: handle * - * depth=maxOrder is the last level and the leafs consist of pages + * runsAvail: + * ---------- + * an array of {@link PriorityQueue}. + * Each queue manages same size of runs. + * Runs are sorted by offset, so that we always allocate runs with smaller offset. * - * With this tree available searching in chunkArray translates like this: - * To allocate a memory segment of size chunkSize/2^k we search for the first node (from left) at height k - * which is unused * * Algorithm: * ---------- - * Encode the tree in memoryMap with the notation - * memoryMap[id] = x => in the subtree rooted at id, the first node that is free to be allocated - * is at depth x (counted from depth=0) i.e., at depths [depth_of_id, x), there is no node that is free * - * As we allocate & free nodes, we update values stored in memoryMap so that the property is maintained + * As we allocate runs, we update values stored in runsAvailMap and runsAvail so that the property is maintained. * * Initialization - - * In the beginning we construct the memoryMap array by storing the depth of a node at each node - * i.e., memoryMap[id] = depth_of_id - * - * Observations: - * ------------- - * 1) memoryMap[id] = depth_of_id => it is free / unallocated - * 2) memoryMap[id] > depth_of_id => at least one of its child nodes is allocated, so we cannot allocate it, but - * some of its children can still be allocated based on their availability - * 3) memoryMap[id] = maxOrder + 1 => the node is fully allocated & thus none of its children can be allocated, it - * is thus marked as unusable + * In the beginning we store the initial run which is the whole chunk. + * The initial run: + * runOffset = 0 + * size = chunkSize + * isUsed = no + * isSubpage = no + * bitmapIdx = 0 * - * Algorithm: [allocateNode(d) => we want to find the first node (from left) at height h that can be allocated] - * ---------- - * 1) start at root (i.e., depth = 0 or id = 1) - * 2) if memoryMap[1] > d => cannot be allocated from this chunk - * 3) if left node value <= h; we can allocate from left subtree so move to left and repeat until found - * 4) else try in right subtree * * Algorithm: [allocateRun(size)] * ---------- - * 1) Compute d = log_2(chunkSize/size) - * 2) Return allocateNode(d) + * 1) find the first avail run using in runsAvails according to size + * 2) if pages of run is larger than request pages then split it, and save the tailing run + * for later using * * Algorithm: [allocateSubpage(size)] * ---------- - * 1) use allocateNode(maxOrder) to find an empty (i.e., unused) leaf (i.e., page) - * 2) use this handle to construct the PoolSubpage object or if it already exists just call init(normCapacity) - * note that this PoolSubpage object is added to subpagesPool in the PoolArena when we init() it + * 1) find a not full subpage according to size. + * if it already exists just return, otherwise allocate a new PoolSubpage and call init() + * note that this subpage object is added to subpagesPool in the PoolArena when we init() it + * 2) call subpage.allocate() * - * Note: - * ----- - * In the implementation for improving cache coherence, - * we store 2 pieces of information (i.e, 2 byte vals) as a short value in memoryMap + * Algorithm: [free(handle, length, nioBuffer)] + * ---------- + * 1) if it is a subpage, return the slab back into this subpage + * 2) if the subpage is not used or it is a run, then start free this run + * 3) merge continuous avail runs + * 4) save the merged run * - * memoryMap[id]= (depth_of_id, x) - * where as per convention defined above - * the second value (i.e, x) indicates that the first node which is free to be allocated is at depth x (from root) */ final class PoolChunk implements PoolChunkMetric { + private static final int SIZE_BIT_LENGTH = 15; + private static final int INUSED_BIT_LENGTH = 1; + private static final int SUBPAGE_BIT_LENGTH = 1; + private static final int BITMAP_IDX_BIT_LENGTH = 32; - private static final int INTEGER_SIZE_MINUS_ONE = Integer.SIZE - 1; + static final int IS_SUBPAGE_SHIFT = BITMAP_IDX_BIT_LENGTH; + static final int IS_USED_SHIFT = SUBPAGE_BIT_LENGTH + IS_SUBPAGE_SHIFT; + static final int SIZE_SHIFT = INUSED_BIT_LENGTH + IS_USED_SHIFT; + static final int RUN_OFFSET_SHIFT = SIZE_BIT_LENGTH + SIZE_SHIFT; final PoolArena arena; + final Object base; final T memory; final boolean unpooled; - final int offset; - private final byte[] memoryMap; - private final byte[] depthMap; + /** + * store the first page and last page of each avail run + */ + private final LongLongHashMap runsAvailMap; + + /** + * manage all avail runs + */ + private final LongPriorityQueue[] runsAvail; + + /** + * manage all subpages in this chunk + */ private final PoolSubpage[] subpages; - /** Used to determine if the requested capacity is equal to or greater than pageSize. */ - private final int subpageOverflowMask; + private final int pageSize; private final int pageShifts; - private final int maxOrder; private final int chunkSize; - private final int log2ChunkSize; - private final int maxSubpageAllocs; - /** Used to mark memory as unusable */ - private final byte unusable; - private int freeBytes; + // Use as cache for ByteBuffer created from the memory. These are just duplicates and so are only a container + // around the memory itself. These are often needed for operations within the Pooled*ByteBuf and so + // may produce extra GC, which can be greatly reduced by caching the duplicates. + // + // This may be null if the PoolChunk is unpooled as pooling the ByteBuffer instances does not make any sense here. + private final Deque cachedNioBuffers; + + int freeBytes; PoolChunkList parent; PoolChunk prev; @@ -132,62 +183,95 @@ final class PoolChunk implements PoolChunkMetric { // TODO: Test if adding padding helps under contention //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; - PoolChunk(PoolArena arena, T memory, int pageSize, int maxOrder, int pageShifts, int chunkSize, int offset) { + @SuppressWarnings("unchecked") + PoolChunk(PoolArena arena, Object base, T memory, int pageSize, int pageShifts, int chunkSize, int maxPageIdx) { unpooled = false; this.arena = arena; + this.base = base; this.memory = memory; this.pageSize = pageSize; this.pageShifts = pageShifts; - this.maxOrder = maxOrder; this.chunkSize = chunkSize; - this.offset = offset; - unusable = (byte) (maxOrder + 1); - log2ChunkSize = log2(chunkSize); - subpageOverflowMask = ~(pageSize - 1); freeBytes = chunkSize; - assert maxOrder < 30 : "maxOrder should be < 30, but is: " + maxOrder; - maxSubpageAllocs = 1 << maxOrder; - - // Generate the memory map. - memoryMap = new byte[maxSubpageAllocs << 1]; - depthMap = new byte[memoryMap.length]; - int memoryMapIndex = 1; - for (int d = 0; d <= maxOrder; ++ d) { // move down the tree one level at a time - int depth = 1 << d; - for (int p = 0; p < depth; ++ p) { - // in each level traverse left to right and set value to the depth of subtree - memoryMap[memoryMapIndex] = (byte) d; - depthMap[memoryMapIndex] = (byte) d; - memoryMapIndex ++; - } - } + runsAvail = newRunsAvailqueueArray(maxPageIdx); + runsAvailMap = new LongLongHashMap(-1); + subpages = new PoolSubpage[chunkSize >> pageShifts]; + + //insert initial run, offset = 0, pages = chunkSize / pageSize + int pages = chunkSize >> pageShifts; + long initHandle = (long) pages << SIZE_SHIFT; + insertAvailRun(0, pages, initHandle); - subpages = newSubpageArray(maxSubpageAllocs); + cachedNioBuffers = new ArrayDeque<>(8); } /** Creates a special chunk that is not pooled. */ - PoolChunk(PoolArena arena, T memory, int size, int offset) { + PoolChunk(PoolArena arena, Object base, T memory, int size) { unpooled = true; this.arena = arena; + this.base = base; this.memory = memory; - this.offset = offset; - memoryMap = null; - depthMap = null; - subpages = null; - subpageOverflowMask = 0; pageSize = 0; pageShifts = 0; - maxOrder = 0; - unusable = (byte) (maxOrder + 1); + runsAvailMap = null; + runsAvail = null; + subpages = null; chunkSize = size; - log2ChunkSize = log2(chunkSize); - maxSubpageAllocs = 0; + cachedNioBuffers = null; } - @SuppressWarnings("unchecked") - private PoolSubpage[] newSubpageArray(int size) { - return new PoolSubpage[size]; + private static LongPriorityQueue[] newRunsAvailqueueArray(int size) { + LongPriorityQueue[] queueArray = new LongPriorityQueue[size]; + for (int i = 0; i < queueArray.length; i++) { + queueArray[i] = new LongPriorityQueue(); + } + return queueArray; + } + + private void insertAvailRun(int runOffset, int pages, long handle) { + int pageIdxFloor = arena.pages2pageIdxFloor(pages); + LongPriorityQueue queue = runsAvail[pageIdxFloor]; + queue.offer(handle); + + //insert first page of run + insertAvailRun0(runOffset, handle); + if (pages > 1) { + //insert last page of run + insertAvailRun0(lastPage(runOffset, pages), handle); + } + } + + private void insertAvailRun0(int runOffset, long handle) { + long pre = runsAvailMap.put(runOffset, handle); + assert pre == -1; + } + + private void removeAvailRun(long handle) { + int pageIdxFloor = arena.pages2pageIdxFloor(runPages(handle)); + LongPriorityQueue queue = runsAvail[pageIdxFloor]; + removeAvailRun(queue, handle); + } + + private void removeAvailRun(LongPriorityQueue queue, long handle) { + queue.remove(handle); + + int runOffset = runOffset(handle); + int pages = runPages(handle); + //remove first page of run + runsAvailMap.remove(runOffset); + if (pages > 1) { + //remove last page of run + runsAvailMap.remove(lastPage(runOffset, pages)); + } + } + + private static int lastPage(int runOffset, int pages) { + return runOffset + pages - 1; + } + + private long getAvailRunByOffset(int runOffset) { + return runsAvailMap.get(runOffset); } @Override @@ -211,240 +295,285 @@ private int usage(int freeBytes) { return 100 - freePercentage; } - long allocate(int normCapacity) { - if ((normCapacity & subpageOverflowMask) != 0) { // >= pageSize - return allocateRun(normCapacity); + boolean allocate(PooledByteBuf buf, int reqCapacity, int sizeIdx, PoolThreadCache cache) { + final long handle; + if (sizeIdx <= arena.smallMaxSizeIdx) { + // small + handle = allocateSubpage(sizeIdx); + if (handle < 0) { + return false; + } + assert isSubpage(handle); } else { - return allocateSubpage(normCapacity); + // normal + // runSize must be multiple of pageSize + int runSize = arena.sizeIdx2size(sizeIdx); + handle = allocateRun(runSize); + if (handle < 0) { + return false; + } } + + ByteBuffer nioBuffer = cachedNioBuffers != null? cachedNioBuffers.pollLast() : null; + initBuf(buf, nioBuffer, handle, reqCapacity, cache); + return true; } - /** - * Update method used by allocate - * This is triggered only when a successor is allocated and all its predecessors - * need to update their state - * The minimal depth at which subtree rooted at id has some free space - * - * @param id id - */ - private void updateParentsAlloc(int id) { - while (id > 1) { - int parentId = id >>> 1; - byte val1 = value(id); - byte val2 = value(id ^ 1); - byte val = val1 < val2 ? val1 : val2; - setValue(parentId, val); - id = parentId; + private long allocateRun(int runSize) { + int pages = runSize >> pageShifts; + int pageIdx = arena.pages2pageIdx(pages); + + synchronized (runsAvail) { + //find first queue which has at least one big enough run + int queueIdx = runFirstBestFit(pageIdx); + if (queueIdx == -1) { + return -1; + } + + //get run with min offset in this queue + LongPriorityQueue queue = runsAvail[queueIdx]; + long handle = queue.poll(); + + assert handle != LongPriorityQueue.NO_VALUE && !isUsed(handle) : "invalid handle: " + handle; + + removeAvailRun(queue, handle); + + if (handle != -1) { + handle = splitLargeRun(handle, pages); + } + + freeBytes -= runSize(pageShifts, handle); + return handle; } } - /** - * Update method used by free - * This needs to handle the special case when both children are completely free - * in which case parent be directly allocated on request of size = child-size * 2 - * - * @param id id - */ - private void updateParentsFree(int id) { - int logChild = depth(id) + 1; - while (id > 1) { - int parentId = id >>> 1; - byte val1 = value(id); - byte val2 = value(id ^ 1); - logChild -= 1; // in first iteration equals log, subsequently reduce 1 from logChild as we traverse up - - if (val1 == logChild && val2 == logChild) { - setValue(parentId, (byte) (logChild - 1)); - } else { - byte val = val1 < val2 ? val1 : val2; - setValue(parentId, val); - } + private int calculateRunSize(int sizeIdx) { + int maxElements = 1 << pageShifts - SizeClasses.LOG2_QUANTUM; + int runSize = 0; + int nElements; + + final int elemSize = arena.sizeIdx2size(sizeIdx); - id = parentId; + //find lowest common multiple of pageSize and elemSize + do { + runSize += pageSize; + nElements = runSize / elemSize; + } while (nElements < maxElements && runSize != nElements * elemSize); + + while (nElements > maxElements) { + runSize -= pageSize; + nElements = runSize / elemSize; } + + assert nElements > 0; + assert runSize <= chunkSize; + assert runSize >= elemSize; + + return runSize; } - /** - * Algorithm to allocate an index in memoryMap when we query for a free node - * at depth d - * - * @param d depth - * @return index in memoryMap - */ - private int allocateNode(int d) { - int id = 1; - int initial = - (1 << d); // has last d bits = 0 and rest all = 1 - byte val = value(id); - if (val > d) { // unusable - return -1; + private int runFirstBestFit(int pageIdx) { + if (freeBytes == chunkSize) { + return arena.nPSizes - 1; } - while (val < d || (id & initial) == 0) { // id & initial == 1 << d for all ids at depth d, for < d it is 0 - id <<= 1; - val = value(id); - if (val > d) { - id ^= 1; - val = value(id); + for (int i = pageIdx; i < arena.nPSizes; i++) { + LongPriorityQueue queue = runsAvail[i]; + if (queue != null && !queue.isEmpty()) { + return i; } } - byte value = value(id); - assert value == d && (id & initial) == 1 << d : String.format("val = %d, id & initial = %d, d = %d", - value, id & initial, d); - setValue(id, unusable); // mark as unusable - updateParentsAlloc(id); - return id; + return -1; } - /** - * Allocate a run of pages (>=1) - * - * @param normCapacity normalized capacity - * @return index in memoryMap - */ - private long allocateRun(int normCapacity) { - int d = maxOrder - (log2(normCapacity) - pageShifts); - int id = allocateNode(d); - if (id < 0) { - return id; + private long splitLargeRun(long handle, int needPages) { + assert needPages > 0; + + int totalPages = runPages(handle); + assert needPages <= totalPages; + + int remPages = totalPages - needPages; + + if (remPages > 0) { + int runOffset = runOffset(handle); + + // keep track of trailing unused pages for later use + int availOffset = runOffset + needPages; + long availRun = toRunHandle(availOffset, remPages, 0); + insertAvailRun(availOffset, remPages, availRun); + + // not avail + return toRunHandle(runOffset, needPages, 1); } - freeBytes -= runLength(id); - return id; + + //mark it as used + handle |= 1L << IS_USED_SHIFT; + return handle; } /** - * Create/ initialize a new PoolSubpage of normCapacity - * Any PoolSubpage created/ initialized here is added to subpage pool in the PoolArena that owns this PoolChunk + * Create / initialize a new PoolSubpage of normCapacity. Any PoolSubpage created / initialized here is added to + * subpage pool in the PoolArena that owns this PoolChunk + * + * @param sizeIdx sizeIdx of normalized size * - * @param normCapacity normalized capacity * @return index in memoryMap */ - private long allocateSubpage(int normCapacity) { + private long allocateSubpage(int sizeIdx) { // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it. // This is need as we may add it back and so alter the linked-list structure. - PoolSubpage head = arena.findSubpagePoolHead(normCapacity); + PoolSubpage head = arena.findSubpagePoolHead(sizeIdx); synchronized (head) { - int d = maxOrder; // subpages are only be allocated from pages i.e., leaves - int id = allocateNode(d); - if (id < 0) { - return id; + //allocate a new run + int runSize = calculateRunSize(sizeIdx); + //runSize must be multiples of pageSize + long runHandle = allocateRun(runSize); + if (runHandle < 0) { + return -1; } - final PoolSubpage[] subpages = this.subpages; - final int pageSize = this.pageSize; + int runOffset = runOffset(runHandle); + assert subpages[runOffset] == null; + int elemSize = arena.sizeIdx2size(sizeIdx); - freeBytes -= pageSize; + PoolSubpage subpage = new PoolSubpage(head, this, pageShifts, runOffset, + runSize(pageShifts, runHandle), elemSize); - int subpageIdx = subpageIdx(id); - PoolSubpage subpage = subpages[subpageIdx]; - if (subpage == null) { - subpage = new PoolSubpage(head, this, id, runOffset(id), pageSize, normCapacity); - subpages[subpageIdx] = subpage; - } else { - subpage.init(head, normCapacity); - } + subpages[runOffset] = subpage; return subpage.allocate(); } } /** - * Free a subpage or a run of pages - * When a subpage is freed from PoolSubpage, it might be added back to subpage pool of the owning PoolArena - * If the subpage pool in PoolArena has at least one other PoolSubpage of given elemSize, we can - * completely free the owning Page so it is available for subsequent allocations + * Free a subpage or a run of pages When a subpage is freed from PoolSubpage, it might be added back to subpage pool + * of the owning PoolArena. If the subpage pool in PoolArena has at least one other PoolSubpage of given elemSize, + * we can completely free the owning Page so it is available for subsequent allocations * * @param handle handle to free */ - void free(long handle) { - int memoryMapIdx = memoryMapIdx(handle); - int bitmapIdx = bitmapIdx(handle); + void free(long handle, int normCapacity, ByteBuffer nioBuffer) { + if (isSubpage(handle)) { + int sizeIdx = arena.size2SizeIdx(normCapacity); + PoolSubpage head = arena.findSubpagePoolHead(sizeIdx); - if (bitmapIdx != 0) { // free a subpage - PoolSubpage subpage = subpages[subpageIdx(memoryMapIdx)]; + int sIdx = runOffset(handle); + PoolSubpage subpage = subpages[sIdx]; assert subpage != null && subpage.doNotDestroy; // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it. // This is need as we may add it back and so alter the linked-list structure. - PoolSubpage head = arena.findSubpagePoolHead(subpage.elemSize); synchronized (head) { - if (subpage.free(head, bitmapIdx & 0x3FFFFFFF)) { + if (subpage.free(head, bitmapIdx(handle))) { + //the subpage is still used, do not free it return; } + assert !subpage.doNotDestroy; + // Null out slot in the array as it was freed and we should not use it anymore. + subpages[sIdx] = null; } } - freeBytes += runLength(memoryMapIdx); - setValue(memoryMapIdx, depth(memoryMapIdx)); - updateParentsFree(memoryMapIdx); - } - void initBuf(PooledByteBuf buf, long handle, int reqCapacity) { - int memoryMapIdx = memoryMapIdx(handle); - int bitmapIdx = bitmapIdx(handle); - if (bitmapIdx == 0) { - byte val = value(memoryMapIdx); - assert val == unusable : String.valueOf(val); - buf.init(this, handle, runOffset(memoryMapIdx) + offset, reqCapacity, runLength(memoryMapIdx), - arena.parent.threadCache()); - } else { - initBufWithSubpage(buf, handle, bitmapIdx, reqCapacity); + //start free run + int pages = runPages(handle); + + synchronized (runsAvail) { + // collapse continuous runs, successfully collapsed runs + // will be removed from runsAvail and runsAvailMap + long finalRun = collapseRuns(handle); + + //set run as not used + finalRun &= ~(1L << IS_USED_SHIFT); + //if it is a subpage, set it to run + finalRun &= ~(1L << IS_SUBPAGE_SHIFT); + + insertAvailRun(runOffset(finalRun), runPages(finalRun), finalRun); + freeBytes += pages << pageShifts; + } + + if (nioBuffer != null && cachedNioBuffers != null && + cachedNioBuffers.size() < PooledByteBufAllocator.DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK) { + cachedNioBuffers.offer(nioBuffer); } } - void initBufWithSubpage(PooledByteBuf buf, long handle, int reqCapacity) { - initBufWithSubpage(buf, handle, bitmapIdx(handle), reqCapacity); + private long collapseRuns(long handle) { + return collapseNext(collapsePast(handle)); } - private void initBufWithSubpage(PooledByteBuf buf, long handle, int bitmapIdx, int reqCapacity) { - assert bitmapIdx != 0; + private long collapsePast(long handle) { + for (;;) { + int runOffset = runOffset(handle); + int runPages = runPages(handle); - int memoryMapIdx = memoryMapIdx(handle); + long pastRun = getAvailRunByOffset(runOffset - 1); + if (pastRun == -1) { + return handle; + } - PoolSubpage subpage = subpages[subpageIdx(memoryMapIdx)]; - assert subpage.doNotDestroy; - assert reqCapacity <= subpage.elemSize; + int pastOffset = runOffset(pastRun); + int pastPages = runPages(pastRun); - buf.init( - this, handle, - runOffset(memoryMapIdx) + (bitmapIdx & 0x3FFFFFFF) * subpage.elemSize + offset, - reqCapacity, subpage.elemSize, arena.parent.threadCache()); + //is continuous + if (pastRun != handle && pastOffset + pastPages == runOffset) { + //remove past run + removeAvailRun(pastRun); + handle = toRunHandle(pastOffset, pastPages + runPages, 0); + } else { + return handle; + } + } } - private byte value(int id) { - return memoryMap[id]; - } + private long collapseNext(long handle) { + for (;;) { + int runOffset = runOffset(handle); + int runPages = runPages(handle); - private void setValue(int id, byte val) { - memoryMap[id] = val; - } + long nextRun = getAvailRunByOffset(runOffset + runPages); + if (nextRun == -1) { + return handle; + } - private byte depth(int id) { - return depthMap[id]; - } + int nextOffset = runOffset(nextRun); + int nextPages = runPages(nextRun); - private static int log2(int val) { - // compute the (0-based, with lsb = 0) position of highest set bit i.e, log2 - return INTEGER_SIZE_MINUS_ONE - Integer.numberOfLeadingZeros(val); + //is continuous + if (nextRun != handle && runOffset + runPages == nextOffset) { + //remove next run + removeAvailRun(nextRun); + handle = toRunHandle(runOffset, runPages + nextPages, 0); + } else { + return handle; + } + } } - private int runLength(int id) { - // represents the size in #bytes supported by node 'id' in the tree - return 1 << log2ChunkSize - depth(id); + private static long toRunHandle(int runOffset, int runPages, int inUsed) { + return (long) runOffset << RUN_OFFSET_SHIFT + | (long) runPages << SIZE_SHIFT + | (long) inUsed << IS_USED_SHIFT; } - private int runOffset(int id) { - // represents the 0-based offset in #bytes from start of the byte-array chunk - int shift = id ^ 1 << depth(id); - return shift * runLength(id); + void initBuf(PooledByteBuf buf, ByteBuffer nioBuffer, long handle, int reqCapacity, + PoolThreadCache threadCache) { + if (isRun(handle)) { + buf.init(this, nioBuffer, handle, runOffset(handle) << pageShifts, + reqCapacity, runSize(pageShifts, handle), arena.parent.threadCache()); + } else { + initBufWithSubpage(buf, nioBuffer, handle, reqCapacity, threadCache); + } } - private int subpageIdx(int memoryMapIdx) { - return memoryMapIdx ^ maxSubpageAllocs; // remove highest set bit, to get offset - } + void initBufWithSubpage(PooledByteBuf buf, ByteBuffer nioBuffer, long handle, int reqCapacity, + PoolThreadCache threadCache) { + int runOffset = runOffset(handle); + int bitmapIdx = bitmapIdx(handle); - private static int memoryMapIdx(long handle) { - return (int) handle; - } + PoolSubpage s = subpages[runOffset]; + assert s.doNotDestroy; + assert reqCapacity <= s.elemSize; - private static int bitmapIdx(long handle) { - return (int) (handle >>> Integer.SIZE); + int offset = (runOffset << pageShifts) + bitmapIdx * s.elemSize; + buf.init(this, nioBuffer, handle, offset, reqCapacity, s.elemSize, threadCache); } @Override @@ -482,4 +611,32 @@ public String toString() { void destroy() { arena.destroyChunk(this); } + + static int runOffset(long handle) { + return (int) (handle >> RUN_OFFSET_SHIFT); + } + + static int runSize(int pageShifts, long handle) { + return runPages(handle) << pageShifts; + } + + static int runPages(long handle) { + return (int) (handle >> SIZE_SHIFT & 0x7fff); + } + + static boolean isUsed(long handle) { + return (handle >> IS_USED_SHIFT & 1) == 1L; + } + + static boolean isRun(long handle) { + return !isSubpage(handle); + } + + static boolean isSubpage(long handle) { + return (handle >> IS_SUBPAGE_SHIFT & 1) == 1L; + } + + static int bitmapIdx(long handle) { + return (int) handle; + } } diff --git a/buffer/src/main/java/io/netty/buffer/PoolChunkList.java b/buffer/src/main/java/io/netty/buffer/PoolChunkList.java index f92834d85c4..d0c38f6c1f7 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolChunkList.java +++ b/buffer/src/main/java/io/netty/buffer/PoolChunkList.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -25,6 +25,8 @@ import static java.lang.Math.*; +import java.nio.ByteBuffer; + final class PoolChunkList implements PoolChunkListMetric { private static final Iterator EMPTY_METRICS = Collections.emptyList().iterator(); private final PoolArena arena; @@ -33,6 +35,8 @@ final class PoolChunkList implements PoolChunkListMetric { private final int maxUsage; private final int maxCapacity; private PoolChunk head; + private final int freeMinThreshold; + private final int freeMaxThreshold; // This is only update once when create the linked like list of PoolChunkList in PoolArena constructor. private PoolChunkList prevList; @@ -47,6 +51,24 @@ final class PoolChunkList implements PoolChunkListMetric { this.minUsage = minUsage; this.maxUsage = maxUsage; maxCapacity = calculateMaxCapacity(minUsage, chunkSize); + + // the thresholds are aligned with PoolChunk.usage() logic: + // 1) basic logic: usage() = 100 - freeBytes * 100L / chunkSize + // so, for example: (usage() >= maxUsage) condition can be transformed in the following way: + // 100 - freeBytes * 100L / chunkSize >= maxUsage + // freeBytes <= chunkSize * (100 - maxUsage) / 100 + // let freeMinThreshold = chunkSize * (100 - maxUsage) / 100, then freeBytes <= freeMinThreshold + // + // 2) usage() returns an int value and has a floor rounding during a calculation, + // to be aligned absolute thresholds should be shifted for "the rounding step": + // freeBytes * 100 / chunkSize < 1 + // the condition can be converted to: freeBytes < 1 * chunkSize / 100 + // this is why we have + 0.99999999 shifts. A example why just +1 shift cannot be used: + // freeBytes = 16777216 == freeMaxThreshold: 16777216, usage = 0 < minUsage: 1, chunkSize: 16777216 + // At the same time we want to have zero thresholds in case of (maxUsage == 100) and (minUsage == 100). + // + freeMinThreshold = (maxUsage == 100) ? 0 : (int) (chunkSize * (100.0 - maxUsage + 0.99999999) / 100L); + freeMaxThreshold = (minUsage == 100) ? 0 : (int) (chunkSize * (100.0 - minUsage + 0.99999999) / 100L); } /** @@ -74,34 +96,29 @@ void prevList(PoolChunkList prevList) { this.prevList = prevList; } - boolean allocate(PooledByteBuf buf, int reqCapacity, int normCapacity) { - if (head == null || normCapacity > maxCapacity) { + boolean allocate(PooledByteBuf buf, int reqCapacity, int sizeIdx, PoolThreadCache threadCache) { + int normCapacity = arena.sizeIdx2size(sizeIdx); + if (normCapacity > maxCapacity) { // Either this PoolChunkList is empty or the requested capacity is larger then the capacity which can // be handled by the PoolChunks that are contained in this PoolChunkList. return false; } - for (PoolChunk cur = head;;) { - long handle = cur.allocate(normCapacity); - if (handle < 0) { - cur = cur.next; - if (cur == null) { - return false; - } - } else { - cur.initBuf(buf, handle, reqCapacity); - if (cur.usage() >= maxUsage) { + for (PoolChunk cur = head; cur != null; cur = cur.next) { + if (cur.allocate(buf, reqCapacity, sizeIdx, threadCache)) { + if (cur.freeBytes <= freeMinThreshold) { remove(cur); nextList.add(cur); } return true; } } + return false; } - boolean free(PoolChunk chunk, long handle) { - chunk.free(handle); - if (chunk.usage() < minUsage) { + boolean free(PoolChunk chunk, long handle, int normCapacity, ByteBuffer nioBuffer) { + chunk.free(handle, normCapacity, nioBuffer); + if (chunk.freeBytes > freeMaxThreshold) { remove(chunk); // Move the PoolChunk down the PoolChunkList linked-list. return move0(chunk); @@ -112,7 +129,7 @@ boolean free(PoolChunk chunk, long handle) { private boolean move(PoolChunk chunk) { assert chunk.usage() < maxUsage; - if (chunk.usage() < minUsage) { + if (chunk.freeBytes > freeMaxThreshold) { // Move the PoolChunk down the PoolChunkList linked-list. return move0(chunk); } @@ -137,7 +154,7 @@ private boolean move0(PoolChunk chunk) { } void add(PoolChunk chunk) { - if (chunk.usage() >= maxUsage) { + if (chunk.freeBytes <= freeMinThreshold) { nextList.add(chunk); return; } @@ -196,7 +213,7 @@ public Iterator iterator() { if (head == null) { return EMPTY_METRICS; } - List metrics = new ArrayList(); + List metrics = new ArrayList<>(); for (PoolChunk cur = head;;) { metrics.add(cur); cur = cur.next; diff --git a/buffer/src/main/java/io/netty/buffer/PoolChunkListMetric.java b/buffer/src/main/java/io/netty/buffer/PoolChunkListMetric.java index ee8f0e03979..ec45561e82d 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolChunkListMetric.java +++ b/buffer/src/main/java/io/netty/buffer/PoolChunkListMetric.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/PoolChunkMetric.java b/buffer/src/main/java/io/netty/buffer/PoolChunkMetric.java index b08ad06f0e6..a006785d224 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolChunkMetric.java +++ b/buffer/src/main/java/io/netty/buffer/PoolChunkMetric.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/PoolSubpage.java b/buffer/src/main/java/io/netty/buffer/PoolSubpage.java index f897eeeb486..62e905c4d02 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolSubpage.java +++ b/buffer/src/main/java/io/netty/buffer/PoolSubpage.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,12 +16,18 @@ package io.netty.buffer; +import static io.netty.buffer.PoolChunk.RUN_OFFSET_SHIFT; +import static io.netty.buffer.PoolChunk.SIZE_SHIFT; +import static io.netty.buffer.PoolChunk.IS_USED_SHIFT; +import static io.netty.buffer.PoolChunk.IS_SUBPAGE_SHIFT; +import static io.netty.buffer.SizeClasses.LOG2_QUANTUM; + final class PoolSubpage implements PoolSubpageMetric { final PoolChunk chunk; - private final int memoryMapIdx; + private final int pageShifts; private final int runOffset; - private final int pageSize; + private final int runSize; private final long[] bitmap; PoolSubpage prev; @@ -38,29 +44,26 @@ final class PoolSubpage implements PoolSubpageMetric { //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; /** Special constructor that creates a linked list head */ - PoolSubpage(int pageSize) { + PoolSubpage() { chunk = null; - memoryMapIdx = -1; + pageShifts = -1; runOffset = -1; elemSize = -1; - this.pageSize = pageSize; + runSize = -1; bitmap = null; } - PoolSubpage(PoolSubpage head, PoolChunk chunk, int memoryMapIdx, int runOffset, int pageSize, int elemSize) { + PoolSubpage(PoolSubpage head, PoolChunk chunk, int pageShifts, int runOffset, int runSize, int elemSize) { this.chunk = chunk; - this.memoryMapIdx = memoryMapIdx; + this.pageShifts = pageShifts; this.runOffset = runOffset; - this.pageSize = pageSize; - bitmap = new long[pageSize >>> 10]; // pageSize / 16 / 64 - init(head, elemSize); - } + this.runSize = runSize; + this.elemSize = elemSize; + bitmap = new long[runSize >>> 6 + LOG2_QUANTUM]; // runSize / 64 / QUANTUM - void init(PoolSubpage head, int elemSize) { doNotDestroy = true; - this.elemSize = elemSize; if (elemSize != 0) { - maxNumElems = numAvail = pageSize / elemSize; + maxNumElems = numAvail = runSize / elemSize; nextAvail = 0; bitmapLength = maxNumElems >>> 6; if ((maxNumElems & 63) != 0) { @@ -78,10 +81,6 @@ void init(PoolSubpage head, int elemSize) { * Returns the bitmap index of the subpage allocation. */ long allocate() { - if (elemSize == 0) { - return toHandle(0); - } - if (numAvail == 0 || !doNotDestroy) { return -1; } @@ -116,7 +115,13 @@ boolean free(PoolSubpage head, int bitmapIdx) { if (numAvail ++ == 0) { addToPool(head); - return true; + /* When maxNumElems == 1, the maximum numAvail is also 1. + * Each of these PoolSubpages will go in here when they do free operation. + * If they return true directly from here, then the rest of the code will be unreachable + * and they will not actually be recycled. So return true only on maxNumElems > 1. */ + if (maxNumElems > 1) { + return true; + } } if (numAvail != maxNumElems) { @@ -195,7 +200,12 @@ private int findNextAvail0(int i, long bits) { } private long toHandle(int bitmapIdx) { - return 0x4000000000000000L | (long) bitmapIdx << 32 | memoryMapIdx; + int pages = runSize >> pageShifts; + return (long) runOffset << RUN_OFFSET_SHIFT + | (long) pages << SIZE_SHIFT + | 1L << IS_USED_SHIFT + | 1L << IS_SUBPAGE_SHIFT + | bitmapIdx; } @Override @@ -204,29 +214,42 @@ public String toString() { final int maxNumElems; final int numAvail; final int elemSize; - synchronized (chunk.arena) { - if (!this.doNotDestroy) { - doNotDestroy = false; - // Not used for creating the String. - maxNumElems = numAvail = elemSize = -1; - } else { - doNotDestroy = true; - maxNumElems = this.maxNumElems; - numAvail = this.numAvail; - elemSize = this.elemSize; + if (chunk == null) { + // This is the head so there is no need to synchronize at all as these never change. + doNotDestroy = true; + maxNumElems = 0; + numAvail = 0; + elemSize = -1; + } else { + synchronized (chunk.arena) { + if (!this.doNotDestroy) { + doNotDestroy = false; + // Not used for creating the String. + maxNumElems = numAvail = elemSize = -1; + } else { + doNotDestroy = true; + maxNumElems = this.maxNumElems; + numAvail = this.numAvail; + elemSize = this.elemSize; + } } } if (!doNotDestroy) { - return "(" + memoryMapIdx + ": not in use)"; + return "(" + runOffset + ": not in use)"; } - return "(" + memoryMapIdx + ": " + (maxNumElems - numAvail) + '/' + maxNumElems + - ", offset: " + runOffset + ", length: " + pageSize + ", elemSize: " + elemSize + ')'; + return "(" + runOffset + ": " + (maxNumElems - numAvail) + '/' + maxNumElems + + ", offset: " + runOffset + ", length: " + runSize + ", elemSize: " + elemSize + ')'; } @Override public int maxNumElements() { + if (chunk == null) { + // It's the head. + return 0; + } + synchronized (chunk.arena) { return maxNumElems; } @@ -234,6 +257,11 @@ public int maxNumElements() { @Override public int numAvailable() { + if (chunk == null) { + // It's the head. + return 0; + } + synchronized (chunk.arena) { return numAvail; } @@ -241,6 +269,11 @@ public int numAvailable() { @Override public int elementSize() { + if (chunk == null) { + // It's the head. + return -1; + } + synchronized (chunk.arena) { return elemSize; } @@ -248,7 +281,7 @@ public int elementSize() { @Override public int pageSize() { - return pageSize; + return 1 << pageShifts; } void destroy() { diff --git a/buffer/src/main/java/io/netty/buffer/PoolSubpageMetric.java b/buffer/src/main/java/io/netty/buffer/PoolSubpageMetric.java index d6747674622..c0102737d64 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolSubpageMetric.java +++ b/buffer/src/main/java/io/netty/buffer/PoolSubpageMetric.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -36,7 +36,7 @@ public interface PoolSubpageMetric { int elementSize(); /** - * Return the size (in bytes) of this page. + * Return the page size (in bytes) of this page. */ int pageSize(); } diff --git a/buffer/src/main/java/io/netty/buffer/PoolThreadCache.java b/buffer/src/main/java/io/netty/buffer/PoolThreadCache.java index 3503748c0d9..9e7fde8a0dc 100644 --- a/buffer/src/main/java/io/netty/buffer/PoolThreadCache.java +++ b/buffer/src/main/java/io/netty/buffer/PoolThreadCache.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,20 +17,25 @@ package io.netty.buffer; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; + import io.netty.buffer.PoolArena.SizeClass; -import io.netty.util.Recycler; -import io.netty.util.Recycler.Handle; import io.netty.util.internal.MathUtil; +import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.ObjectPool.Handle; import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; import java.util.Queue; +import java.util.concurrent.atomic.AtomicBoolean; /** * Acts a Thread cache for allocations. This implementation is moduled after - * jemalloc and the descripted + * jemalloc and the descripted * technics of * * Scalable memory allocation using jemalloc. @@ -38,22 +43,19 @@ final class PoolThreadCache { private static final InternalLogger logger = InternalLoggerFactory.getInstance(PoolThreadCache.class); + private static final int INTEGER_SIZE_MINUS_ONE = Integer.SIZE - 1; final PoolArena heapArena; final PoolArena directArena; // Hold the caches for the different size classes, which are tiny, small and normal. - private final MemoryRegionCache[] tinySubPageHeapCaches; private final MemoryRegionCache[] smallSubPageHeapCaches; - private final MemoryRegionCache[] tinySubPageDirectCaches; private final MemoryRegionCache[] smallSubPageDirectCaches; private final MemoryRegionCache[] normalHeapCaches; private final MemoryRegionCache[] normalDirectCaches; - // Used for bitshifting when calculate the index of normal caches later - private final int numShiftsNormalDirect; - private final int numShiftsNormalHeap; private final int freeSweepAllocationThreshold; + private final AtomicBoolean freed = new AtomicBoolean(); private int allocations; @@ -61,56 +63,43 @@ final class PoolThreadCache { //private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; PoolThreadCache(PoolArena heapArena, PoolArena directArena, - int tinyCacheSize, int smallCacheSize, int normalCacheSize, - int maxCachedBufferCapacity, int freeSweepAllocationThreshold) { - if (maxCachedBufferCapacity < 0) { - throw new IllegalArgumentException("maxCachedBufferCapacity: " - + maxCachedBufferCapacity + " (expected: >= 0)"); - } + int smallCacheSize, int normalCacheSize, int maxCachedBufferCapacity, + int freeSweepAllocationThreshold) { + checkPositiveOrZero(maxCachedBufferCapacity, "maxCachedBufferCapacity"); this.freeSweepAllocationThreshold = freeSweepAllocationThreshold; this.heapArena = heapArena; this.directArena = directArena; if (directArena != null) { - tinySubPageDirectCaches = createSubPageCaches( - tinyCacheSize, PoolArena.numTinySubpagePools, SizeClass.Tiny); smallSubPageDirectCaches = createSubPageCaches( - smallCacheSize, directArena.numSmallSubpagePools, SizeClass.Small); + smallCacheSize, directArena.numSmallSubpagePools); - numShiftsNormalDirect = log2(directArena.pageSize); normalDirectCaches = createNormalCaches( normalCacheSize, maxCachedBufferCapacity, directArena); directArena.numThreadCaches.getAndIncrement(); } else { // No directArea is configured so just null out all caches - tinySubPageDirectCaches = null; smallSubPageDirectCaches = null; normalDirectCaches = null; - numShiftsNormalDirect = -1; } if (heapArena != null) { // Create the caches for the heap allocations - tinySubPageHeapCaches = createSubPageCaches( - tinyCacheSize, PoolArena.numTinySubpagePools, SizeClass.Tiny); smallSubPageHeapCaches = createSubPageCaches( - smallCacheSize, heapArena.numSmallSubpagePools, SizeClass.Small); + smallCacheSize, heapArena.numSmallSubpagePools); - numShiftsNormalHeap = log2(heapArena.pageSize); normalHeapCaches = createNormalCaches( normalCacheSize, maxCachedBufferCapacity, heapArena); heapArena.numThreadCaches.getAndIncrement(); } else { // No heapArea is configured so just null out all caches - tinySubPageHeapCaches = null; smallSubPageHeapCaches = null; normalHeapCaches = null; - numShiftsNormalHeap = -1; } // Only check if there are caches in use. - if ((tinySubPageDirectCaches != null || smallSubPageDirectCaches != null || normalDirectCaches != null - || tinySubPageHeapCaches != null || smallSubPageHeapCaches != null || normalHeapCaches != null) + if ((smallSubPageDirectCaches != null || normalDirectCaches != null + || smallSubPageHeapCaches != null || normalHeapCaches != null) && freeSweepAllocationThreshold < 1) { throw new IllegalArgumentException("freeSweepAllocationThreshold: " + freeSweepAllocationThreshold + " (expected: > 0)"); @@ -118,13 +107,13 @@ final class PoolThreadCache { } private static MemoryRegionCache[] createSubPageCaches( - int cacheSize, int numCaches, SizeClass sizeClass) { + int cacheSize, int numCaches) { if (cacheSize > 0 && numCaches > 0) { @SuppressWarnings("unchecked") MemoryRegionCache[] cache = new MemoryRegionCache[numCaches]; for (int i = 0; i < cache.length; i++) { // TODO: maybe use cacheSize / cache.length - cache[i] = new SubPageMemoryRegionCache(cacheSize, sizeClass); + cache[i] = new SubPageMemoryRegionCache<>(cacheSize); } return cache; } else { @@ -132,51 +121,41 @@ private static MemoryRegionCache[] createSubPageCaches( } } + @SuppressWarnings("unchecked") private static MemoryRegionCache[] createNormalCaches( int cacheSize, int maxCachedBufferCapacity, PoolArena area) { if (cacheSize > 0 && maxCachedBufferCapacity > 0) { int max = Math.min(area.chunkSize, maxCachedBufferCapacity); - int arraySize = Math.max(1, log2(max / area.pageSize) + 1); - @SuppressWarnings("unchecked") - MemoryRegionCache[] cache = new MemoryRegionCache[arraySize]; - for (int i = 0; i < cache.length; i++) { - cache[i] = new NormalMemoryRegionCache(cacheSize); + // Create as many normal caches as we support based on how many sizeIdx we have and what the upper + // bound is that we want to cache in general. + List> cache = new ArrayList<>() ; + for (int idx = area.numSmallSubpagePools; idx < area.nSizes && area.sizeIdx2size(idx) <= max ; idx++) { + cache.add(new NormalMemoryRegionCache(cacheSize)); } - return cache; + return cache.toArray(new MemoryRegionCache[0]); } else { return null; } } - private static int log2(int val) { - int res = 0; - while (val > 1) { - val >>= 1; - res++; - } - return res; - } - - /** - * Try to allocate a tiny buffer out of the cache. Returns {@code true} if successful {@code false} otherwise - */ - boolean allocateTiny(PoolArena area, PooledByteBuf buf, int reqCapacity, int normCapacity) { - return allocate(cacheForTiny(area, normCapacity), buf, reqCapacity); + // val > 0 + static int log2(int val) { + return INTEGER_SIZE_MINUS_ONE - Integer.numberOfLeadingZeros(val); } /** * Try to allocate a small buffer out of the cache. Returns {@code true} if successful {@code false} otherwise */ - boolean allocateSmall(PoolArena area, PooledByteBuf buf, int reqCapacity, int normCapacity) { - return allocate(cacheForSmall(area, normCapacity), buf, reqCapacity); + boolean allocateSmall(PoolArena area, PooledByteBuf buf, int reqCapacity, int sizeIdx) { + return allocate(cacheForSmall(area, sizeIdx), buf, reqCapacity); } /** - * Try to allocate a small buffer out of the cache. Returns {@code true} if successful {@code false} otherwise + * Try to allocate a normal buffer out of the cache. Returns {@code true} if successful {@code false} otherwise */ - boolean allocateNormal(PoolArena area, PooledByteBuf buf, int reqCapacity, int normCapacity) { - return allocate(cacheForNormal(area, normCapacity), buf, reqCapacity); + boolean allocateNormal(PoolArena area, PooledByteBuf buf, int reqCapacity, int sizeIdx) { + return allocate(cacheForNormal(area, sizeIdx), buf, reqCapacity); } @SuppressWarnings({ "unchecked", "rawtypes" }) @@ -185,7 +164,7 @@ private boolean allocate(MemoryRegionCache cache, PooledByteBuf buf, int reqC // no cache found so just return false here return false; } - boolean allocated = cache.allocate(buf, reqCapacity); + boolean allocated = cache.allocate(buf, reqCapacity, this); if (++ allocations >= freeSweepAllocationThreshold) { allocations = 0; trim(); @@ -198,75 +177,86 @@ private boolean allocate(MemoryRegionCache cache, PooledByteBuf buf, int reqC * Returns {@code true} if it fit into the cache {@code false} otherwise. */ @SuppressWarnings({ "unchecked", "rawtypes" }) - boolean add(PoolArena area, PoolChunk chunk, long handle, int normCapacity, SizeClass sizeClass) { - MemoryRegionCache cache = cache(area, normCapacity, sizeClass); + boolean add(PoolArena area, PoolChunk chunk, ByteBuffer nioBuffer, + long handle, int normCapacity, SizeClass sizeClass) { + int sizeIdx = area.size2SizeIdx(normCapacity); + MemoryRegionCache cache = cache(area, sizeIdx, sizeClass); if (cache == null) { return false; } - return cache.add(chunk, handle); + return cache.add(chunk, nioBuffer, handle, normCapacity); } - private MemoryRegionCache cache(PoolArena area, int normCapacity, SizeClass sizeClass) { + private MemoryRegionCache cache(PoolArena area, int sizeIdx, SizeClass sizeClass) { switch (sizeClass) { case Normal: - return cacheForNormal(area, normCapacity); + return cacheForNormal(area, sizeIdx); case Small: - return cacheForSmall(area, normCapacity); - case Tiny: - return cacheForTiny(area, normCapacity); + return cacheForSmall(area, sizeIdx); default: throw new Error(); } } + /// TODO: In the future when we move to Java9+ we should use java.lang.ref.Cleaner. + @Override + protected void finalize() throws Throwable { + try { + super.finalize(); + } finally { + free(true); + } + } + /** * Should be called if the Thread that uses this cache is about to exist to release resources out of the cache */ - void free() { - int numFreed = free(tinySubPageDirectCaches) + - free(smallSubPageDirectCaches) + - free(normalDirectCaches) + - free(tinySubPageHeapCaches) + - free(smallSubPageHeapCaches) + - free(normalHeapCaches); - - if (numFreed > 0 && logger.isDebugEnabled()) { - logger.debug("Freed {} thread-local buffer(s) from thread: {}", numFreed, Thread.currentThread().getName()); - } + void free(boolean finalizer) { + // As free() may be called either by the finalizer or by FastThreadLocal.onRemoval(...) we need to ensure + // we only call this one time. + if (freed.compareAndSet(false, true)) { + int numFreed = free(smallSubPageDirectCaches, finalizer) + + free(normalDirectCaches, finalizer) + + free(smallSubPageHeapCaches, finalizer) + + free(normalHeapCaches, finalizer); + + if (numFreed > 0 && logger.isDebugEnabled()) { + logger.debug("Freed {} thread-local buffer(s) from thread: {}", numFreed, + Thread.currentThread().getName()); + } - if (directArena != null) { - directArena.numThreadCaches.getAndDecrement(); - } + if (directArena != null) { + directArena.numThreadCaches.getAndDecrement(); + } - if (heapArena != null) { - heapArena.numThreadCaches.getAndDecrement(); + if (heapArena != null) { + heapArena.numThreadCaches.getAndDecrement(); + } } } - private static int free(MemoryRegionCache[] caches) { + private static int free(MemoryRegionCache[] caches, boolean finalizer) { if (caches == null) { return 0; } int numFreed = 0; for (MemoryRegionCache c: caches) { - numFreed += free(c); + numFreed += free(c, finalizer); } return numFreed; } - private static int free(MemoryRegionCache cache) { + private static int free(MemoryRegionCache cache, boolean finalizer) { if (cache == null) { return 0; } - return cache.free(); + return cache.free(finalizer); } void trim() { - trim(tinySubPageDirectCaches); trim(smallSubPageDirectCaches); trim(normalDirectCaches); - trim(tinySubPageHeapCaches); trim(smallSubPageHeapCaches); trim(normalHeapCaches); } @@ -287,50 +277,42 @@ private static void trim(MemoryRegionCache cache) { cache.trim(); } - private MemoryRegionCache cacheForTiny(PoolArena area, int normCapacity) { - int idx = PoolArena.tinyIdx(normCapacity); + private MemoryRegionCache cacheForSmall(PoolArena area, int sizeIdx) { if (area.isDirect()) { - return cache(tinySubPageDirectCaches, idx); + return cache(smallSubPageDirectCaches, sizeIdx); } - return cache(tinySubPageHeapCaches, idx); + return cache(smallSubPageHeapCaches, sizeIdx); } - private MemoryRegionCache cacheForSmall(PoolArena area, int normCapacity) { - int idx = PoolArena.smallIdx(normCapacity); + private MemoryRegionCache cacheForNormal(PoolArena area, int sizeIdx) { + // We need to substract area.numSmallSubpagePools as sizeIdx is the overall index for all sizes. + int idx = sizeIdx - area.numSmallSubpagePools; if (area.isDirect()) { - return cache(smallSubPageDirectCaches, idx); - } - return cache(smallSubPageHeapCaches, idx); - } - - private MemoryRegionCache cacheForNormal(PoolArena area, int normCapacity) { - if (area.isDirect()) { - int idx = log2(normCapacity >> numShiftsNormalDirect); return cache(normalDirectCaches, idx); } - int idx = log2(normCapacity >> numShiftsNormalHeap); return cache(normalHeapCaches, idx); } - private static MemoryRegionCache cache(MemoryRegionCache[] cache, int idx) { - if (cache == null || idx > cache.length - 1) { + private static MemoryRegionCache cache(MemoryRegionCache[] cache, int sizeIdx) { + if (cache == null || sizeIdx > cache.length - 1) { return null; } - return cache[idx]; + return cache[sizeIdx]; } /** * Cache used for buffers which are backed by TINY or SMALL size. */ private static final class SubPageMemoryRegionCache extends MemoryRegionCache { - SubPageMemoryRegionCache(int size, SizeClass sizeClass) { - super(size, sizeClass); + SubPageMemoryRegionCache(int size) { + super(size, SizeClass.Small); } @Override protected void initBuf( - PoolChunk chunk, long handle, PooledByteBuf buf, int reqCapacity) { - chunk.initBufWithSubpage(buf, handle, reqCapacity); + PoolChunk chunk, ByteBuffer nioBuffer, long handle, PooledByteBuf buf, int reqCapacity, + PoolThreadCache threadCache) { + chunk.initBufWithSubpage(buf, nioBuffer, handle, reqCapacity, threadCache); } } @@ -344,8 +326,9 @@ private static final class NormalMemoryRegionCache extends MemoryRegionCache< @Override protected void initBuf( - PoolChunk chunk, long handle, PooledByteBuf buf, int reqCapacity) { - chunk.initBuf(buf, handle, reqCapacity); + PoolChunk chunk, ByteBuffer nioBuffer, long handle, PooledByteBuf buf, int reqCapacity, + PoolThreadCache threadCache) { + chunk.initBuf(buf, nioBuffer, handle, reqCapacity, threadCache); } } @@ -364,15 +347,15 @@ private abstract static class MemoryRegionCache { /** * Init the {@link PooledByteBuf} using the provided chunk and handle with the capacity restrictions. */ - protected abstract void initBuf(PoolChunk chunk, long handle, - PooledByteBuf buf, int reqCapacity); + protected abstract void initBuf(PoolChunk chunk, ByteBuffer nioBuffer, long handle, + PooledByteBuf buf, int reqCapacity, PoolThreadCache threadCache); /** * Add to cache if not already full. */ @SuppressWarnings("unchecked") - public final boolean add(PoolChunk chunk, long handle) { - Entry entry = newEntry(chunk, handle); + public final boolean add(PoolChunk chunk, ByteBuffer nioBuffer, long handle, int normCapacity) { + Entry entry = newEntry(chunk, nioBuffer, handle, normCapacity); boolean queued = queue.offer(entry); if (!queued) { // If it was not possible to cache the chunk, immediately recycle the entry @@ -385,12 +368,12 @@ public final boolean add(PoolChunk chunk, long handle) { /** * Allocate something out of the cache if possible and remove the entry from the cache. */ - public final boolean allocate(PooledByteBuf buf, int reqCapacity) { + public final boolean allocate(PooledByteBuf buf, int reqCapacity, PoolThreadCache threadCache) { Entry entry = queue.poll(); if (entry == null) { return false; } - initBuf(entry.chunk, entry.handle, buf, reqCapacity); + initBuf(entry.chunk, entry.nioBuffer, entry.handle, buf, reqCapacity, threadCache); entry.recycle(); // allocations is not thread-safe which is fine as this is only called from the same thread all time. @@ -401,16 +384,16 @@ public final boolean allocate(PooledByteBuf buf, int reqCapacity) { /** * Clear out this cache and free up all previous cached {@link PoolChunk}s and {@code handle}s. */ - public final int free() { - return free(Integer.MAX_VALUE); + public final int free(boolean finalizer) { + return free(Integer.MAX_VALUE, finalizer); } - private int free(int max) { + private int free(int max, boolean finalizer) { int numFreed = 0; for (; numFreed < max; numFreed++) { Entry entry = queue.poll(); if (entry != null) { - freeEntry(entry); + freeEntry(entry, finalizer); } else { // all cleared return numFreed; @@ -428,25 +411,31 @@ public final void trim() { // We not even allocated all the number that are if (free > 0) { - free(free); + free(free, false); } } @SuppressWarnings({ "unchecked", "rawtypes" }) - private void freeEntry(Entry entry) { + private void freeEntry(Entry entry, boolean finalizer) { PoolChunk chunk = entry.chunk; long handle = entry.handle; + ByteBuffer nioBuffer = entry.nioBuffer; - // recycle now so PoolChunk can be GC'ed. - entry.recycle(); + if (!finalizer) { + // recycle now so PoolChunk can be GC'ed. This will only be done if this is not freed because of + // a finalizer. + entry.recycle(); + } - chunk.arena.freeChunk(chunk, handle, sizeClass); + chunk.arena.freeChunk(chunk, handle, entry.normCapacity, sizeClass, nioBuffer, finalizer); } static final class Entry { final Handle> recyclerHandle; PoolChunk chunk; + ByteBuffer nioBuffer; long handle = -1; + int normCapacity; Entry(Handle> recyclerHandle) { this.recyclerHandle = recyclerHandle; @@ -454,26 +443,23 @@ static final class Entry { void recycle() { chunk = null; + nioBuffer = null; handle = -1; recyclerHandle.recycle(this); } } @SuppressWarnings("rawtypes") - private static Entry newEntry(PoolChunk chunk, long handle) { + private static Entry newEntry(PoolChunk chunk, ByteBuffer nioBuffer, long handle, int normCapacity) { Entry entry = RECYCLER.get(); entry.chunk = chunk; + entry.nioBuffer = nioBuffer; entry.handle = handle; + entry.normCapacity = normCapacity; return entry; } @SuppressWarnings("rawtypes") - private static final Recycler RECYCLER = new Recycler() { - @SuppressWarnings("unchecked") - @Override - protected Entry newObject(Handle handle) { - return new Entry(handle); - } - }; + private static final ObjectPool RECYCLER = ObjectPool.newPool(handle -> new Entry(handle)); } } diff --git a/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java b/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java index 56a4be38723..99d601f4979 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/PooledByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,15 +16,19 @@ package io.netty.buffer; -import io.netty.util.Recycler; -import io.netty.util.Recycler.Handle; +import io.netty.util.internal.ObjectPool.Handle; +import java.io.IOException; import java.nio.ByteBuffer; import java.nio.ByteOrder; +import java.nio.channels.ClosedChannelException; +import java.nio.channels.FileChannel; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.ScatteringByteChannel; abstract class PooledByteBuf extends AbstractReferenceCountedByteBuf { - private final Recycler.Handle> recyclerHandle; + private final Handle> recyclerHandle; protected PoolChunk chunk; protected long handle; @@ -33,36 +37,38 @@ abstract class PooledByteBuf extends AbstractReferenceCountedByteBuf { protected int length; int maxLength; PoolThreadCache cache; - private ByteBuffer tmpNioBuf; + ByteBuffer tmpNioBuf; private ByteBufAllocator allocator; @SuppressWarnings("unchecked") - protected PooledByteBuf(Recycler.Handle> recyclerHandle, int maxCapacity) { + protected PooledByteBuf(Handle> recyclerHandle, int maxCapacity) { super(maxCapacity); this.recyclerHandle = (Handle>) recyclerHandle; } - void init(PoolChunk chunk, long handle, int offset, int length, int maxLength, PoolThreadCache cache) { - init0(chunk, handle, offset, length, maxLength, cache); + void init(PoolChunk chunk, ByteBuffer nioBuffer, + long handle, int offset, int length, int maxLength, PoolThreadCache cache) { + init0(chunk, nioBuffer, handle, offset, length, maxLength, cache); } void initUnpooled(PoolChunk chunk, int length) { - init0(chunk, 0, chunk.offset, length, length, null); + init0(chunk, null, 0, 0, length, length, null); } - private void init0(PoolChunk chunk, long handle, int offset, int length, int maxLength, PoolThreadCache cache) { + private void init0(PoolChunk chunk, ByteBuffer nioBuffer, + long handle, int offset, int length, int maxLength, PoolThreadCache cache) { assert handle >= 0; assert chunk != null; this.chunk = chunk; memory = chunk.memory; + tmpNioBuf = nioBuffer; allocator = chunk.arena.parent; this.cache = cache; this.handle = handle; this.offset = offset; this.length = length; this.maxLength = maxLength; - tmpNioBuf = null; } /** @@ -70,9 +76,8 @@ private void init0(PoolChunk chunk, long handle, int offset, int length, int */ final void reuse(int maxCapacity) { maxCapacity(maxCapacity); - setRefCnt(1); + resetRefCnt(); setIndex0(0, 0); - discardMarks(); } @Override @@ -80,36 +85,30 @@ public final int capacity() { return length; } + @Override + public int maxFastWritableBytes() { + return Math.min(maxLength, maxCapacity()) - writerIndex; + } + @Override public final ByteBuf capacity(int newCapacity) { + if (newCapacity == length) { + ensureAccessible(); + return this; + } checkNewCapacity(newCapacity); - - // If the request capacity does not require reallocation, just update the length of the memory. - if (chunk.unpooled) { - if (newCapacity == length) { - return this; - } - } else { + if (!chunk.unpooled) { + // If the request capacity does not require reallocation, just update the length of the memory. if (newCapacity > length) { if (newCapacity <= maxLength) { length = newCapacity; return this; } - } else if (newCapacity < length) { - if (newCapacity > maxLength >>> 1) { - if (maxLength <= 512) { - if (newCapacity > maxLength - 16) { - length = newCapacity; - setIndex(Math.min(readerIndex(), newCapacity), Math.min(writerIndex(), newCapacity)); - return this; - } - } else { // > 512 (i.e. >= 1024) - length = newCapacity; - setIndex(Math.min(readerIndex(), newCapacity), Math.min(writerIndex(), newCapacity)); - return this; - } - } - } else { + } else if (newCapacity > maxLength >>> 1 && + (maxLength > 512 || newCapacity > maxLength - 16)) { + // here newCapacity < length + length = newCapacity; + trimIndicesToCapacity(newCapacity); return this; } } @@ -154,6 +153,8 @@ protected final ByteBuffer internalNioBuffer() { ByteBuffer tmpNioBuf = this.tmpNioBuf; if (tmpNioBuf == null) { this.tmpNioBuf = tmpNioBuf = newInternalNioBuffer(memory); + } else { + tmpNioBuf.clear(); } return tmpNioBuf; } @@ -166,8 +167,8 @@ protected final void deallocate() { final long handle = this.handle; this.handle = -1; memory = null; + chunk.arena.free(chunk, tmpNioBuf, handle, maxLength, cache); tmpNioBuf = null; - chunk.arena.free(chunk, handle, maxLength, cache); chunk = null; recycle(); } @@ -180,4 +181,86 @@ private void recycle() { protected final int idx(int index) { return offset + index; } + + final ByteBuffer _internalNioBuffer(int index, int length, boolean duplicate) { + index = idx(index); + ByteBuffer buffer = duplicate ? newInternalNioBuffer(memory) : internalNioBuffer(); + buffer.limit(index + length).position(index); + return buffer; + } + + ByteBuffer duplicateInternalNioBuffer(int index, int length) { + checkIndex(index, length); + return _internalNioBuffer(index, length, true); + } + + @Override + public final ByteBuffer internalNioBuffer(int index, int length) { + checkIndex(index, length); + return _internalNioBuffer(index, length, false); + } + + @Override + public final int nioBufferCount() { + return 1; + } + + @Override + public final ByteBuffer nioBuffer(int index, int length) { + return duplicateInternalNioBuffer(index, length).slice(); + } + + @Override + public final ByteBuffer[] nioBuffers(int index, int length) { + return new ByteBuffer[] { nioBuffer(index, length) }; + } + + @Override + public final boolean isContiguous() { + return true; + } + + @Override + public final int getBytes(int index, GatheringByteChannel out, int length) throws IOException { + return out.write(duplicateInternalNioBuffer(index, length)); + } + + @Override + public final int readBytes(GatheringByteChannel out, int length) throws IOException { + checkReadableBytes(length); + int readBytes = out.write(_internalNioBuffer(readerIndex, length, false)); + readerIndex += readBytes; + return readBytes; + } + + @Override + public final int getBytes(int index, FileChannel out, long position, int length) throws IOException { + return out.write(duplicateInternalNioBuffer(index, length), position); + } + + @Override + public final int readBytes(FileChannel out, long position, int length) throws IOException { + checkReadableBytes(length); + int readBytes = out.write(_internalNioBuffer(readerIndex, length, false), position); + readerIndex += readBytes; + return readBytes; + } + + @Override + public final int setBytes(int index, ScatteringByteChannel in, int length) throws IOException { + try { + return in.read(internalNioBuffer(index, length)); + } catch (ClosedChannelException ignored) { + return -1; + } + } + + @Override + public final int setBytes(int index, FileChannel in, long position, int length) throws IOException { + try { + return in.read(internalNioBuffer(index, length), position); + } catch (ClosedChannelException ignored) { + return -1; + } + } } diff --git a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java index b613fea9e1c..9fb469cd97a 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java +++ b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocator.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,12 +16,16 @@ package io.netty.buffer; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; + import io.netty.util.NettyRuntime; +import io.netty.util.concurrent.EventExecutor; import io.netty.util.concurrent.FastThreadLocal; import io.netty.util.concurrent.FastThreadLocalThread; import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.StringUtil; import io.netty.util.internal.SystemPropertyUtil; +import io.netty.util.internal.ThreadExecutorMap; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; @@ -29,6 +33,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.List; +import java.util.concurrent.TimeUnit; public class PooledByteBufAllocator extends AbstractByteBufAllocator implements ByteBufAllocatorMetricProvider { @@ -38,27 +43,34 @@ public class PooledByteBufAllocator extends AbstractByteBufAllocator implements private static final int DEFAULT_PAGE_SIZE; private static final int DEFAULT_MAX_ORDER; // 8192 << 11 = 16 MiB per chunk - private static final int DEFAULT_TINY_CACHE_SIZE; private static final int DEFAULT_SMALL_CACHE_SIZE; private static final int DEFAULT_NORMAL_CACHE_SIZE; - private static final int DEFAULT_MAX_CACHED_BUFFER_CAPACITY; + static final int DEFAULT_MAX_CACHED_BUFFER_CAPACITY; private static final int DEFAULT_CACHE_TRIM_INTERVAL; + private static final long DEFAULT_CACHE_TRIM_INTERVAL_MILLIS; private static final boolean DEFAULT_USE_CACHE_FOR_ALL_THREADS; private static final int DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT; + static final int DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK; private static final int MIN_PAGE_SIZE = 4096; private static final int MAX_CHUNK_SIZE = (int) (((long) Integer.MAX_VALUE + 1) / 2); + private final Runnable trimTask = this::trimCurrentThreadCache; + static { + int defaultAlignment = SystemPropertyUtil.getInt( + "io.netty.allocator.directMemoryCacheAlignment", 0); int defaultPageSize = SystemPropertyUtil.getInt("io.netty.allocator.pageSize", 8192); Throwable pageSizeFallbackCause = null; try { - validateAndCalculatePageShifts(defaultPageSize); + validateAndCalculatePageShifts(defaultPageSize, defaultAlignment); } catch (Throwable t) { pageSizeFallbackCause = t; defaultPageSize = 8192; + defaultAlignment = 0; } DEFAULT_PAGE_SIZE = defaultPageSize; + DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT = defaultAlignment; int defaultMaxOrder = SystemPropertyUtil.getInt("io.netty.allocator.maxOrder", 11); Throwable maxOrderFallbackCause = null; @@ -97,7 +109,6 @@ public class PooledByteBufAllocator extends AbstractByteBufAllocator implements PlatformDependent.maxDirectMemory() / defaultChunkSize / 2 / 3))); // cache sizes - DEFAULT_TINY_CACHE_SIZE = SystemPropertyUtil.getInt("io.netty.allocator.tinyCacheSize", 512); DEFAULT_SMALL_CACHE_SIZE = SystemPropertyUtil.getInt("io.netty.allocator.smallCacheSize", 256); DEFAULT_NORMAL_CACHE_SIZE = SystemPropertyUtil.getInt("io.netty.allocator.normalCacheSize", 64); @@ -110,11 +121,16 @@ public class PooledByteBufAllocator extends AbstractByteBufAllocator implements DEFAULT_CACHE_TRIM_INTERVAL = SystemPropertyUtil.getInt( "io.netty.allocator.cacheTrimInterval", 8192); + DEFAULT_CACHE_TRIM_INTERVAL_MILLIS = SystemPropertyUtil.getLong( + "io.netty.allocator.cacheTrimIntervalMillis", 0); + DEFAULT_USE_CACHE_FOR_ALL_THREADS = SystemPropertyUtil.getBoolean( - "io.netty.allocator.useCacheForAllThreads", true); + "io.netty.allocator.useCacheForAllThreads", false); - DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT = SystemPropertyUtil.getInt( - "io.netty.allocator.directMemoryCacheAlignment", 0); + // Use 1023 by default as we use an ArrayDeque as backing storage which will then allocate an internal array + // of 1024 elements. Otherwise we would allocate 2048 and only use 1024 which is wasteful. + DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK = SystemPropertyUtil.getInt( + "io.netty.allocator.maxCachedByteBuffersPerChunk", 1023); if (logger.isDebugEnabled()) { logger.debug("-Dio.netty.allocator.numHeapArenas: {}", DEFAULT_NUM_HEAP_ARENA); @@ -130,12 +146,14 @@ public class PooledByteBufAllocator extends AbstractByteBufAllocator implements logger.debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER, maxOrderFallbackCause); } logger.debug("-Dio.netty.allocator.chunkSize: {}", DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER); - logger.debug("-Dio.netty.allocator.tinyCacheSize: {}", DEFAULT_TINY_CACHE_SIZE); logger.debug("-Dio.netty.allocator.smallCacheSize: {}", DEFAULT_SMALL_CACHE_SIZE); logger.debug("-Dio.netty.allocator.normalCacheSize: {}", DEFAULT_NORMAL_CACHE_SIZE); logger.debug("-Dio.netty.allocator.maxCachedBufferCapacity: {}", DEFAULT_MAX_CACHED_BUFFER_CAPACITY); logger.debug("-Dio.netty.allocator.cacheTrimInterval: {}", DEFAULT_CACHE_TRIM_INTERVAL); + logger.debug("-Dio.netty.allocator.cacheTrimIntervalMillis: {}", DEFAULT_CACHE_TRIM_INTERVAL_MILLIS); logger.debug("-Dio.netty.allocator.useCacheForAllThreads: {}", DEFAULT_USE_CACHE_FOR_ALL_THREADS); + logger.debug("-Dio.netty.allocator.maxCachedByteBuffersPerChunk: {}", + DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK); } } @@ -144,7 +162,6 @@ public class PooledByteBufAllocator extends AbstractByteBufAllocator implements private final PoolArena[] heapArenas; private final PoolArena[] directArenas; - private final int tinyCacheSize; private final int smallCacheSize; private final int normalCacheSize; private final List heapArenaMetrics; @@ -169,55 +186,85 @@ public PooledByteBufAllocator(int nHeapArena, int nDirectArena, int pageSize, in /** * @deprecated use - * {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, int, boolean)} + * {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, boolean)} */ @Deprecated public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder) { this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder, - DEFAULT_TINY_CACHE_SIZE, DEFAULT_SMALL_CACHE_SIZE, DEFAULT_NORMAL_CACHE_SIZE); + 0, DEFAULT_SMALL_CACHE_SIZE, DEFAULT_NORMAL_CACHE_SIZE); } /** * @deprecated use - * {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, int, boolean)} + * {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, boolean)} */ @Deprecated public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder, int tinyCacheSize, int smallCacheSize, int normalCacheSize) { - this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder, tinyCacheSize, smallCacheSize, - normalCacheSize, DEFAULT_USE_CACHE_FOR_ALL_THREADS, DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT); + this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder, smallCacheSize, + normalCacheSize, DEFAULT_USE_CACHE_FOR_ALL_THREADS, DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT); } + /** + * @deprecated use + * {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, boolean)} + */ + @Deprecated public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder, int tinyCacheSize, int smallCacheSize, int normalCacheSize, boolean useCacheForAllThreads) { this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder, - tinyCacheSize, smallCacheSize, normalCacheSize, - useCacheForAllThreads, DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT); + smallCacheSize, normalCacheSize, + useCacheForAllThreads); } + public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, + int nDirectArena, int pageSize, int maxOrder, + int smallCacheSize, int normalCacheSize, + boolean useCacheForAllThreads) { + this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder, + smallCacheSize, normalCacheSize, + useCacheForAllThreads, DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT); + } + + /** + * @deprecated use + * {@link PooledByteBufAllocator#PooledByteBufAllocator(boolean, int, int, int, int, int, int, boolean, int)} + */ + @Deprecated public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder, int tinyCacheSize, int smallCacheSize, int normalCacheSize, boolean useCacheForAllThreads, int directMemoryCacheAlignment) { + this(preferDirect, nHeapArena, nDirectArena, pageSize, maxOrder, + smallCacheSize, normalCacheSize, + useCacheForAllThreads, directMemoryCacheAlignment); + } + + public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectArena, int pageSize, int maxOrder, + int smallCacheSize, int normalCacheSize, + boolean useCacheForAllThreads, int directMemoryCacheAlignment) { super(preferDirect); threadCache = new PoolThreadLocalCache(useCacheForAllThreads); - this.tinyCacheSize = tinyCacheSize; this.smallCacheSize = smallCacheSize; this.normalCacheSize = normalCacheSize; - chunkSize = validateAndCalculateChunkSize(pageSize, maxOrder); - if (nHeapArena < 0) { - throw new IllegalArgumentException("nHeapArena: " + nHeapArena + " (expected: >= 0)"); - } - if (nDirectArena < 0) { - throw new IllegalArgumentException("nDirectArea: " + nDirectArena + " (expected: >= 0)"); - } + if (directMemoryCacheAlignment != 0) { + if (!PlatformDependent.hasAlignDirectByteBuffer()) { + throw new UnsupportedOperationException("Buffer alignment is not supported. " + + "Either Unsafe or ByteBuffer.alignSlice() must be available."); + } - if (directMemoryCacheAlignment < 0) { - throw new IllegalArgumentException("directMemoryCacheAlignment: " - + directMemoryCacheAlignment + " (expected: >= 0)"); + // Ensure page size is a whole multiple of the alignment, or bump it to the next whole multiple. + pageSize = (int) PlatformDependent.align(pageSize, directMemoryCacheAlignment); } + + chunkSize = validateAndCalculateChunkSize(pageSize, maxOrder); + + checkPositiveOrZero(nHeapArena, "nHeapArena"); + checkPositiveOrZero(nDirectArena, "nDirectArena"); + + checkPositiveOrZero(directMemoryCacheAlignment, "directMemoryCacheAlignment"); if (directMemoryCacheAlignment > 0 && !isDirectMemoryCacheAlignmentSupported()) { throw new IllegalArgumentException("directMemoryCacheAlignment is not supported"); } @@ -227,14 +274,14 @@ public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectA + directMemoryCacheAlignment + " (expected: power of two)"); } - int pageShifts = validateAndCalculatePageShifts(pageSize); + int pageShifts = validateAndCalculatePageShifts(pageSize, directMemoryCacheAlignment); if (nHeapArena > 0) { heapArenas = newArenaArray(nHeapArena); - List metrics = new ArrayList(heapArenas.length); + List metrics = new ArrayList<>(heapArenas.length); for (int i = 0; i < heapArenas.length; i ++) { PoolArena.HeapArena arena = new PoolArena.HeapArena(this, - pageSize, maxOrder, pageShifts, chunkSize, + pageSize, pageShifts, chunkSize, directMemoryCacheAlignment); heapArenas[i] = arena; metrics.add(arena); @@ -247,10 +294,10 @@ public PooledByteBufAllocator(boolean preferDirect, int nHeapArena, int nDirectA if (nDirectArena > 0) { directArenas = newArenaArray(nDirectArena); - List metrics = new ArrayList(directArenas.length); + List metrics = new ArrayList<>(directArenas.length); for (int i = 0; i < directArenas.length; i ++) { PoolArena.DirectArena arena = new PoolArena.DirectArena( - this, pageSize, maxOrder, pageShifts, chunkSize, directMemoryCacheAlignment); + this, pageSize, pageShifts, chunkSize, directMemoryCacheAlignment); directArenas[i] = arena; metrics.add(arena); } @@ -267,15 +314,20 @@ private static PoolArena[] newArenaArray(int size) { return new PoolArena[size]; } - private static int validateAndCalculatePageShifts(int pageSize) { + private static int validateAndCalculatePageShifts(int pageSize, int alignment) { if (pageSize < MIN_PAGE_SIZE) { - throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: " + MIN_PAGE_SIZE + ")"); + throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: " + MIN_PAGE_SIZE + ')'); } if ((pageSize & pageSize - 1) != 0) { throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: power of 2)"); } + if (pageSize < alignment) { + throw new IllegalArgumentException("Alignment cannot be greater than page size. " + + "Alignment: " + alignment + ", page size: " + pageSize + '.'); + } + // Logarithm base 2. At this point we know that pageSize is a power of two. return Integer.SIZE - 1 - Integer.numberOfLeadingZeros(pageSize); } @@ -374,10 +426,13 @@ public static boolean defaultPreferDirect() { } /** - * Default tiny cache size - System Property: io.netty.allocator.tinyCacheSize - default 512 + * Default tiny cache size - default 0 + * + * @deprecated Tiny caches have been merged into small caches. */ + @Deprecated public static int defaultTinyCacheSize() { - return DEFAULT_TINY_CACHE_SIZE; + return 0; } /** @@ -435,19 +490,28 @@ protected synchronized PoolThreadCache initialValue() { final PoolArena heapArena = leastUsedArena(heapArenas); final PoolArena directArena = leastUsedArena(directArenas); - Thread current = Thread.currentThread(); + final Thread current = Thread.currentThread(); if (useCacheForAllThreads || current instanceof FastThreadLocalThread) { - return new PoolThreadCache( - heapArena, directArena, tinyCacheSize, smallCacheSize, normalCacheSize, + final PoolThreadCache cache = new PoolThreadCache( + heapArena, directArena, smallCacheSize, normalCacheSize, DEFAULT_MAX_CACHED_BUFFER_CAPACITY, DEFAULT_CACHE_TRIM_INTERVAL); + + if (DEFAULT_CACHE_TRIM_INTERVAL_MILLIS > 0) { + final EventExecutor executor = ThreadExecutorMap.currentExecutor(); + if (executor != null) { + executor.scheduleAtFixedRate(trimTask, DEFAULT_CACHE_TRIM_INTERVAL_MILLIS, + DEFAULT_CACHE_TRIM_INTERVAL_MILLIS, TimeUnit.MILLISECONDS); + } + } + return cache; } // No caching so just use 0 as sizes. - return new PoolThreadCache(heapArena, directArena, 0, 0, 0, 0, 0); + return new PoolThreadCache(heapArena, directArena, 0, 0, 0, 0); } @Override protected void onRemoval(PoolThreadCache threadCache) { - threadCache.free(); + threadCache.free(false); } private PoolArena leastUsedArena(PoolArena[] arenas) { @@ -539,7 +603,7 @@ public int numThreadLocalCaches() { */ @Deprecated public int tinyCacheSize() { - return tinyCacheSize; + return 0; } /** @@ -580,7 +644,7 @@ final long usedDirectMemory() { return usedMemory(directArenas); } - private static long usedMemory(PoolArena... arenas) { + private static long usedMemory(PoolArena[] arenas) { if (arenas == null) { return -1; } @@ -600,6 +664,21 @@ final PoolThreadCache threadCache() { return cache; } + /** + * Trim thread local cache for the current {@link Thread}, which will give back any cached memory that was not + * allocated frequently since the last trim operation. + * + * Returns {@code true} if a cache for the current {@link Thread} exists and so was trimmed, false otherwise. + */ + public boolean trimCurrentThreadCache() { + PoolThreadCache cache = threadCache.getIfExists(); + if (cache != null) { + cache.trim(); + return true; + } + return false; + } + /** * Returns the status of the allocator (which contains all metrics) as string. Be aware this may be expensive * and so should not called too frequently. diff --git a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocatorMetric.java b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocatorMetric.java index f9391f6edb9..f20dfff5bc3 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocatorMetric.java +++ b/buffer/src/main/java/io/netty/buffer/PooledByteBufAllocatorMetric.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -68,7 +68,10 @@ public int numThreadLocalCaches() { /** * Return the size of the tiny cache. + * + * @deprecated Tiny caches have been merged into small caches. */ + @Deprecated public int tinyCacheSize() { return allocator.tinyCacheSize(); } @@ -112,7 +115,6 @@ public String toString() { .append("; usedDirectMemory: ").append(usedDirectMemory()) .append("; numHeapArenas: ").append(numHeapArenas()) .append("; numDirectArenas: ").append(numDirectArenas()) - .append("; tinyCacheSize: ").append(tinyCacheSize()) .append("; smallCacheSize: ").append(smallCacheSize()) .append("; normalCacheSize: ").append(normalCacheSize()) .append("; numThreadLocalCaches: ").append(numThreadLocalCaches()) diff --git a/buffer/src/main/java/io/netty/buffer/PooledDirectByteBuf.java b/buffer/src/main/java/io/netty/buffer/PooledDirectByteBuf.java index 7c6192bf076..5509c5dbc71 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledDirectByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/PooledDirectByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,25 +16,18 @@ package io.netty.buffer; -import io.netty.util.Recycler; +import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.ObjectPool.Handle; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.FileChannel; -import java.nio.channels.GatheringByteChannel; -import java.nio.channels.ScatteringByteChannel; final class PooledDirectByteBuf extends PooledByteBuf { - private static final Recycler RECYCLER = new Recycler() { - @Override - protected PooledDirectByteBuf newObject(Handle handle) { - return new PooledDirectByteBuf(handle, 0); - } - }; + private static final ObjectPool RECYCLER = ObjectPool.newPool( + handle -> new PooledDirectByteBuf(handle, 0)); static PooledDirectByteBuf newInstance(int maxCapacity) { PooledDirectByteBuf buf = RECYCLER.get(); @@ -42,7 +35,7 @@ static PooledDirectByteBuf newInstance(int maxCapacity) { return buf; } - private PooledDirectByteBuf(Recycler.Handle recyclerHandle, int maxCapacity) { + private PooledDirectByteBuf(Handle recyclerHandle, int maxCapacity) { super(recyclerHandle, maxCapacity); } @@ -126,55 +119,30 @@ public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { @Override public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { - getBytes(index, dst, dstIndex, length, false); - return this; - } - - private void getBytes(int index, byte[] dst, int dstIndex, int length, boolean internal) { checkDstIndex(index, length, dstIndex, dst.length); - ByteBuffer tmpBuf; - if (internal) { - tmpBuf = internalNioBuffer(); - } else { - tmpBuf = memory.duplicate(); - } - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - tmpBuf.get(dst, dstIndex, length); + _internalNioBuffer(index, length, true).get(dst, dstIndex, length); + return this; } @Override public ByteBuf readBytes(byte[] dst, int dstIndex, int length) { - checkReadableBytes(length); - getBytes(readerIndex, dst, dstIndex, length, true); + checkDstIndex(length, dstIndex, dst.length); + _internalNioBuffer(readerIndex, length, false).get(dst, dstIndex, length); readerIndex += length; return this; } @Override public ByteBuf getBytes(int index, ByteBuffer dst) { - getBytes(index, dst, false); + dst.put(duplicateInternalNioBuffer(index, dst.remaining())); return this; } - private void getBytes(int index, ByteBuffer dst, boolean internal) { - checkIndex(index, dst.remaining()); - ByteBuffer tmpBuf; - if (internal) { - tmpBuf = internalNioBuffer(); - } else { - tmpBuf = memory.duplicate(); - } - index = idx(index); - tmpBuf.clear().position(index).limit(index + dst.remaining()); - dst.put(tmpBuf); - } - @Override public ByteBuf readBytes(ByteBuffer dst) { int length = dst.remaining(); checkReadableBytes(length); - getBytes(readerIndex, dst, true); + dst.put(_internalNioBuffer(readerIndex, length, false)); readerIndex += length; return this; } @@ -190,17 +158,7 @@ private void getBytes(int index, OutputStream out, int length, boolean internal) if (length == 0) { return; } - - byte[] tmp = new byte[length]; - ByteBuffer tmpBuf; - if (internal) { - tmpBuf = internalNioBuffer(); - } else { - tmpBuf = memory.duplicate(); - } - tmpBuf.clear().position(idx(index)); - tmpBuf.get(tmp); - out.write(tmp); + ByteBufUtil.readBytes(alloc(), internal ? internalNioBuffer() : memory.duplicate(), idx(index), length, out); } @Override @@ -211,61 +169,6 @@ public ByteBuf readBytes(OutputStream out, int length) throws IOException { return this; } - @Override - public int getBytes(int index, GatheringByteChannel out, int length) throws IOException { - return getBytes(index, out, length, false); - } - - private int getBytes(int index, GatheringByteChannel out, int length, boolean internal) throws IOException { - checkIndex(index, length); - if (length == 0) { - return 0; - } - - ByteBuffer tmpBuf; - if (internal) { - tmpBuf = internalNioBuffer(); - } else { - tmpBuf = memory.duplicate(); - } - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - return out.write(tmpBuf); - } - - @Override - public int getBytes(int index, FileChannel out, long position, int length) throws IOException { - return getBytes(index, out, position, length, false); - } - - private int getBytes(int index, FileChannel out, long position, int length, boolean internal) throws IOException { - checkIndex(index, length); - if (length == 0) { - return 0; - } - - ByteBuffer tmpBuf = internal ? internalNioBuffer() : memory.duplicate(); - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - return out.write(tmpBuf, position); - } - - @Override - public int readBytes(GatheringByteChannel out, int length) throws IOException { - checkReadableBytes(length); - int readBytes = getBytes(readerIndex, out, length, true); - readerIndex += readBytes; - return readBytes; - } - - @Override - public int readBytes(FileChannel out, long position, int length) throws IOException { - checkReadableBytes(length); - int readBytes = getBytes(readerIndex, out, position, length, true); - readerIndex += readBytes; - return readBytes; - } - @Override protected void _setByte(int index, int value) { memory.put(idx(index), (byte) value); @@ -337,23 +240,21 @@ public ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) { @Override public ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) { checkSrcIndex(index, length, srcIndex, src.length); - ByteBuffer tmpBuf = internalNioBuffer(); - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - tmpBuf.put(src, srcIndex, length); + _internalNioBuffer(index, length, false).put(src, srcIndex, length); return this; } @Override public ByteBuf setBytes(int index, ByteBuffer src) { - checkIndex(index, src.remaining()); + int length = src.remaining(); + checkIndex(index, length); ByteBuffer tmpBuf = internalNioBuffer(); if (src == tmpBuf) { src = src.duplicate(); } index = idx(index); - tmpBuf.clear().position(index).limit(index + src.remaining()); + tmpBuf.limit(index + length).position(index); tmpBuf.put(src); return this; } @@ -361,73 +262,22 @@ public ByteBuf setBytes(int index, ByteBuffer src) { @Override public int setBytes(int index, InputStream in, int length) throws IOException { checkIndex(index, length); - byte[] tmp = new byte[length]; - int readBytes = in.read(tmp); + byte[] tmp = ByteBufUtil.threadLocalTempArray(length); + int readBytes = in.read(tmp, 0, length); if (readBytes <= 0) { return readBytes; } ByteBuffer tmpBuf = internalNioBuffer(); - tmpBuf.clear().position(idx(index)); + tmpBuf.position(idx(index)); tmpBuf.put(tmp, 0, readBytes); return readBytes; } - @Override - public int setBytes(int index, ScatteringByteChannel in, int length) throws IOException { - checkIndex(index, length); - ByteBuffer tmpBuf = internalNioBuffer(); - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - try { - return in.read(tmpBuf); - } catch (ClosedChannelException ignored) { - return -1; - } - } - - @Override - public int setBytes(int index, FileChannel in, long position, int length) throws IOException { - checkIndex(index, length); - ByteBuffer tmpBuf = internalNioBuffer(); - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - try { - return in.read(tmpBuf, position); - } catch (ClosedChannelException ignored) { - return -1; - } - } - @Override public ByteBuf copy(int index, int length) { checkIndex(index, length); ByteBuf copy = alloc().directBuffer(length, maxCapacity()); - copy.writeBytes(this, index, length); - return copy; - } - - @Override - public int nioBufferCount() { - return 1; - } - - @Override - public ByteBuffer nioBuffer(int index, int length) { - checkIndex(index, length); - index = idx(index); - return ((ByteBuffer) memory.duplicate().position(index).limit(index + length)).slice(); - } - - @Override - public ByteBuffer[] nioBuffers(int index, int length) { - return new ByteBuffer[] { nioBuffer(index, length) }; - } - - @Override - public ByteBuffer internalNioBuffer(int index, int length) { - checkIndex(index, length); - index = idx(index); - return (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length); + return copy.writeBytes(this, index, length); } @Override diff --git a/buffer/src/main/java/io/netty/buffer/PooledDuplicatedByteBuf.java b/buffer/src/main/java/io/netty/buffer/PooledDuplicatedByteBuf.java index 1260f4e31fe..7769a8d3a0a 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledDuplicatedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/PooledDuplicatedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,8 +17,8 @@ package io.netty.buffer; import io.netty.util.ByteProcessor; -import io.netty.util.Recycler; -import io.netty.util.Recycler.Handle; +import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.ObjectPool.Handle; import java.io.IOException; import java.io.InputStream; @@ -30,20 +30,13 @@ final class PooledDuplicatedByteBuf extends AbstractPooledDerivedByteBuf { - private static final Recycler RECYCLER = new Recycler() { - @Override - protected PooledDuplicatedByteBuf newObject(Handle handle) { - return new PooledDuplicatedByteBuf(handle); - } - }; + private static final ObjectPool RECYCLER = + ObjectPool.newPool(PooledDuplicatedByteBuf::new); static PooledDuplicatedByteBuf newInstance(AbstractByteBuf unwrapped, ByteBuf wrapped, int readerIndex, int writerIndex) { final PooledDuplicatedByteBuf duplicate = RECYCLER.get(); duplicate.init(unwrapped, wrapped, readerIndex, writerIndex, unwrapped.maxCapacity()); - duplicate.markReaderIndex(); - duplicate.markWriterIndex(); - return duplicate; } diff --git a/buffer/src/main/java/io/netty/buffer/PooledHeapByteBuf.java b/buffer/src/main/java/io/netty/buffer/PooledHeapByteBuf.java index 467bde09cc9..5cd768a688b 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledHeapByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/PooledHeapByteBuf.java @@ -3,7 +3,7 @@ * * The Netty Project licenses this file to the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -14,26 +14,19 @@ package io.netty.buffer; -import io.netty.util.Recycler; +import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.ObjectPool.Handle; import io.netty.util.internal.PlatformDependent; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.FileChannel; -import java.nio.channels.GatheringByteChannel; -import java.nio.channels.ScatteringByteChannel; class PooledHeapByteBuf extends PooledByteBuf { - private static final Recycler RECYCLER = new Recycler() { - @Override - protected PooledHeapByteBuf newObject(Handle handle) { - return new PooledHeapByteBuf(handle, 0); - } - }; + private static final ObjectPool RECYCLER = ObjectPool.newPool( + handle -> new PooledHeapByteBuf(handle, 0)); static PooledHeapByteBuf newInstance(int maxCapacity) { PooledHeapByteBuf buf = RECYCLER.get(); @@ -41,7 +34,7 @@ static PooledHeapByteBuf newInstance(int maxCapacity) { return buf; } - PooledHeapByteBuf(Recycler.Handle recyclerHandle, int maxCapacity) { + PooledHeapByteBuf(Handle recyclerHandle, int maxCapacity) { super(recyclerHandle, maxCapacity); } @@ -117,8 +110,9 @@ public final ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { @Override public final ByteBuf getBytes(int index, ByteBuffer dst) { - checkIndex(index, dst.remaining()); - dst.put(memory, idx(index), dst.remaining()); + int length = dst.remaining(); + checkIndex(index, length); + dst.put(memory, idx(index), length); return this; } @@ -129,51 +123,6 @@ public final ByteBuf getBytes(int index, OutputStream out, int length) throws IO return this; } - @Override - public final int getBytes(int index, GatheringByteChannel out, int length) throws IOException { - return getBytes(index, out, length, false); - } - - private int getBytes(int index, GatheringByteChannel out, int length, boolean internal) throws IOException { - checkIndex(index, length); - index = idx(index); - ByteBuffer tmpBuf; - if (internal) { - tmpBuf = internalNioBuffer(); - } else { - tmpBuf = ByteBuffer.wrap(memory); - } - return out.write((ByteBuffer) tmpBuf.clear().position(index).limit(index + length)); - } - - @Override - public final int getBytes(int index, FileChannel out, long position, int length) throws IOException { - return getBytes(index, out, position, length, false); - } - - private int getBytes(int index, FileChannel out, long position, int length, boolean internal) throws IOException { - checkIndex(index, length); - index = idx(index); - ByteBuffer tmpBuf = internal ? internalNioBuffer() : ByteBuffer.wrap(memory); - return out.write((ByteBuffer) tmpBuf.clear().position(index).limit(index + length), position); - } - - @Override - public final int readBytes(GatheringByteChannel out, int length) throws IOException { - checkReadableBytes(length); - int readBytes = getBytes(readerIndex, out, length, true); - readerIndex += readBytes; - return readBytes; - } - - @Override - public final int readBytes(FileChannel out, long position, int length) throws IOException { - checkReadableBytes(length); - int readBytes = getBytes(readerIndex, out, position, length, true); - readerIndex += readBytes; - return readBytes; - } - @Override protected void _setByte(int index, int value) { HeapByteBufUtil.setByte(memory, idx(index), value); @@ -253,59 +202,17 @@ public final int setBytes(int index, InputStream in, int length) throws IOExcept return in.read(memory, idx(index), length); } - @Override - public final int setBytes(int index, ScatteringByteChannel in, int length) throws IOException { - checkIndex(index, length); - index = idx(index); - try { - return in.read((ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length)); - } catch (ClosedChannelException ignored) { - return -1; - } - } - - @Override - public final int setBytes(int index, FileChannel in, long position, int length) throws IOException { - checkIndex(index, length); - index = idx(index); - try { - return in.read((ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length), position); - } catch (ClosedChannelException ignored) { - return -1; - } - } - @Override public final ByteBuf copy(int index, int length) { checkIndex(index, length); ByteBuf copy = alloc().heapBuffer(length, maxCapacity()); - copy.writeBytes(memory, idx(index), length); - return copy; - } - - @Override - public final int nioBufferCount() { - return 1; - } - - @Override - public final ByteBuffer[] nioBuffers(int index, int length) { - return new ByteBuffer[] { nioBuffer(index, length) }; - } - - @Override - public final ByteBuffer nioBuffer(int index, int length) { - checkIndex(index, length); - index = idx(index); - ByteBuffer buf = ByteBuffer.wrap(memory, index, length); - return buf.slice(); + return copy.writeBytes(memory, idx(index), length); } @Override - public final ByteBuffer internalNioBuffer(int index, int length) { + final ByteBuffer duplicateInternalNioBuffer(int index, int length) { checkIndex(index, length); - index = idx(index); - return (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length); + return ByteBuffer.wrap(memory, idx(index), length).slice(); } @Override diff --git a/buffer/src/main/java/io/netty/buffer/PooledSlicedByteBuf.java b/buffer/src/main/java/io/netty/buffer/PooledSlicedByteBuf.java index 44051881abe..a35ab9b03cf 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledSlicedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/PooledSlicedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,8 +17,8 @@ package io.netty.buffer; import io.netty.util.ByteProcessor; -import io.netty.util.Recycler; -import io.netty.util.Recycler.Handle; +import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.ObjectPool.Handle; import java.io.IOException; import java.io.InputStream; @@ -32,12 +32,7 @@ final class PooledSlicedByteBuf extends AbstractPooledDerivedByteBuf { - private static final Recycler RECYCLER = new Recycler() { - @Override - protected PooledSlicedByteBuf newObject(Handle handle) { - return new PooledSlicedByteBuf(handle); - } - }; + private static final ObjectPool RECYCLER = ObjectPool.newPool(PooledSlicedByteBuf::new); static PooledSlicedByteBuf newInstance(AbstractByteBuf unwrapped, ByteBuf wrapped, int index, int length) { @@ -49,7 +44,6 @@ private static PooledSlicedByteBuf newInstance0(AbstractByteBuf unwrapped, ByteB int adjustment, int length) { final PooledSlicedByteBuf slice = RECYCLER.get(); slice.init(unwrapped, wrapped, 0, length, length); - slice.discardMarks(); slice.adjustment = adjustment; return slice; diff --git a/buffer/src/main/java/io/netty/buffer/PooledUnsafeDirectByteBuf.java b/buffer/src/main/java/io/netty/buffer/PooledUnsafeDirectByteBuf.java index 1dcc3702c4b..8ebb772c87c 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledUnsafeDirectByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/PooledUnsafeDirectByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,25 +16,18 @@ package io.netty.buffer; -import io.netty.util.Recycler; +import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.ObjectPool.Handle; import io.netty.util.internal.PlatformDependent; import java.io.IOException; import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.FileChannel; -import java.nio.channels.GatheringByteChannel; -import java.nio.channels.ScatteringByteChannel; final class PooledUnsafeDirectByteBuf extends PooledByteBuf { - private static final Recycler RECYCLER = new Recycler() { - @Override - protected PooledUnsafeDirectByteBuf newObject(Handle handle) { - return new PooledUnsafeDirectByteBuf(handle, 0); - } - }; + private static final ObjectPool RECYCLER = ObjectPool.newPool( + handle -> new PooledUnsafeDirectByteBuf(handle, 0)); static PooledUnsafeDirectByteBuf newInstance(int maxCapacity) { PooledUnsafeDirectByteBuf buf = RECYCLER.get(); @@ -44,14 +37,14 @@ static PooledUnsafeDirectByteBuf newInstance(int maxCapacity) { private long memoryAddress; - private PooledUnsafeDirectByteBuf(Recycler.Handle recyclerHandle, int maxCapacity) { + private PooledUnsafeDirectByteBuf(Handle recyclerHandle, int maxCapacity) { super(recyclerHandle, maxCapacity); } @Override - void init(PoolChunk chunk, long handle, int offset, int length, int maxLength, - PoolThreadCache cache) { - super.init(chunk, handle, offset, length, maxLength, cache); + void init(PoolChunk chunk, ByteBuffer nioBuffer, + long handle, int offset, int length, int maxLength, PoolThreadCache cache) { + super.init(chunk, nioBuffer, handle, offset, length, maxLength, cache); initMemoryAddress(); } @@ -138,78 +131,12 @@ public ByteBuf getBytes(int index, ByteBuffer dst) { return this; } - @Override - public ByteBuf readBytes(ByteBuffer dst) { - int length = dst.remaining(); - checkReadableBytes(length); - getBytes(readerIndex, dst); - readerIndex += length; - return this; - } - @Override public ByteBuf getBytes(int index, OutputStream out, int length) throws IOException { UnsafeByteBufUtil.getBytes(this, addr(index), index, out, length); return this; } - @Override - public int getBytes(int index, GatheringByteChannel out, int length) throws IOException { - return getBytes(index, out, length, false); - } - - private int getBytes(int index, GatheringByteChannel out, int length, boolean internal) throws IOException { - checkIndex(index, length); - if (length == 0) { - return 0; - } - - ByteBuffer tmpBuf; - if (internal) { - tmpBuf = internalNioBuffer(); - } else { - tmpBuf = memory.duplicate(); - } - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - return out.write(tmpBuf); - } - - @Override - public int getBytes(int index, FileChannel out, long position, int length) throws IOException { - return getBytes(index, out, position, length, false); - } - - private int getBytes(int index, FileChannel out, long position, int length, boolean internal) throws IOException { - checkIndex(index, length); - if (length == 0) { - return 0; - } - - ByteBuffer tmpBuf = internal ? internalNioBuffer() : memory.duplicate(); - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - return out.write(tmpBuf, position); - } - - @Override - public int readBytes(GatheringByteChannel out, int length) - throws IOException { - checkReadableBytes(length); - int readBytes = getBytes(readerIndex, out, length, true); - readerIndex += readBytes; - return readBytes; - } - - @Override - public int readBytes(FileChannel out, long position, int length) - throws IOException { - checkReadableBytes(length); - int readBytes = getBytes(readerIndex, out, position, length, true); - readerIndex += readBytes; - return readBytes; - } - @Override protected void _setByte(int index, int value) { UnsafeByteBufUtil.setByte(addr(index), (byte) value); @@ -278,61 +205,11 @@ public int setBytes(int index, InputStream in, int length) throws IOException { return UnsafeByteBufUtil.setBytes(this, addr(index), index, in, length); } - @Override - public int setBytes(int index, ScatteringByteChannel in, int length) throws IOException { - checkIndex(index, length); - ByteBuffer tmpBuf = internalNioBuffer(); - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - try { - return in.read(tmpBuf); - } catch (ClosedChannelException ignored) { - return -1; - } - } - - @Override - public int setBytes(int index, FileChannel in, long position, int length) throws IOException { - checkIndex(index, length); - ByteBuffer tmpBuf = internalNioBuffer(); - index = idx(index); - tmpBuf.clear().position(index).limit(index + length); - try { - return in.read(tmpBuf, position); - } catch (ClosedChannelException ignored) { - return -1; - } - } - @Override public ByteBuf copy(int index, int length) { return UnsafeByteBufUtil.copy(this, addr(index), index, length); } - @Override - public int nioBufferCount() { - return 1; - } - - @Override - public ByteBuffer[] nioBuffers(int index, int length) { - return new ByteBuffer[] { nioBuffer(index, length) }; - } - - @Override - public ByteBuffer nioBuffer(int index, int length) { - checkIndex(index, length); - index = idx(index); - return ((ByteBuffer) memory.duplicate().position(index).limit(index + length)).slice(); - } - - @Override - public ByteBuffer internalNioBuffer(int index, int length) { - checkIndex(index, length); - index = idx(index); - return (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length); - } - @Override public boolean hasArray() { return false; diff --git a/buffer/src/main/java/io/netty/buffer/PooledUnsafeHeapByteBuf.java b/buffer/src/main/java/io/netty/buffer/PooledUnsafeHeapByteBuf.java index a644450f5ce..4c04eac20d3 100644 --- a/buffer/src/main/java/io/netty/buffer/PooledUnsafeHeapByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/PooledUnsafeHeapByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,18 +15,14 @@ */ package io.netty.buffer; -import io.netty.util.Recycler; -import io.netty.util.Recycler.Handle; +import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.ObjectPool.Handle; import io.netty.util.internal.PlatformDependent; final class PooledUnsafeHeapByteBuf extends PooledHeapByteBuf { - private static final Recycler RECYCLER = new Recycler() { - @Override - protected PooledUnsafeHeapByteBuf newObject(Handle handle) { - return new PooledUnsafeHeapByteBuf(handle, 0); - } - }; + private static final ObjectPool RECYCLER = ObjectPool.newPool( + handle -> new PooledUnsafeHeapByteBuf(handle, 0)); static PooledUnsafeHeapByteBuf newUnsafeInstance(int maxCapacity) { PooledUnsafeHeapByteBuf buf = RECYCLER.get(); @@ -130,26 +126,18 @@ protected void _setLongLE(int index, long value) { @Override public ByteBuf setZero(int index, int length) { - if (PlatformDependent.javaVersion() >= 7) { - checkIndex(index, length); - // Only do on java7+ as the needed Unsafe call was only added there. - UnsafeByteBufUtil.setZero(memory, idx(index), length); - return this; - } - return super.setZero(index, length); + checkIndex(index, length); + UnsafeByteBufUtil.setZero(memory, idx(index), length); + return this; } @Override public ByteBuf writeZero(int length) { - if (PlatformDependent.javaVersion() >= 7) { - // Only do on java7+ as the needed Unsafe call was only added there. - ensureWritable(length); - int wIndex = writerIndex; - UnsafeByteBufUtil.setZero(memory, idx(wIndex), length); - writerIndex = wIndex + length; - return this; - } - return super.writeZero(length); + ensureWritable(length); + int wIndex = writerIndex; + UnsafeByteBufUtil.setZero(memory, idx(wIndex), length); + writerIndex = wIndex + length; + return this; } @Override diff --git a/buffer/src/main/java/io/netty/buffer/ReadOnlyByteBuf.java b/buffer/src/main/java/io/netty/buffer/ReadOnlyByteBuf.java index 583853d7b88..979916c9855 100644 --- a/buffer/src/main/java/io/netty/buffer/ReadOnlyByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/ReadOnlyByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -75,6 +75,16 @@ public ByteBuf ensureWritable(int minWritableBytes) { throw new ReadOnlyBufferException(); } + @Override + public int writableBytes() { + return 0; + } + + @Override + public int maxWritableBytes() { + return 0; + } + @Override public ByteBuf unwrap() { return buffer; diff --git a/buffer/src/main/java/io/netty/buffer/ReadOnlyByteBufferBuf.java b/buffer/src/main/java/io/netty/buffer/ReadOnlyByteBufferBuf.java index 037068b03ce..1e0bf73958b 100644 --- a/buffer/src/main/java/io/netty/buffer/ReadOnlyByteBufferBuf.java +++ b/buffer/src/main/java/io/netty/buffer/ReadOnlyByteBufferBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -27,7 +27,6 @@ import java.nio.channels.GatheringByteChannel; import java.nio.channels.ScatteringByteChannel; - /** * Read-only ByteBuf which wraps a read-only ByteBuffer. */ @@ -51,6 +50,36 @@ class ReadOnlyByteBufferBuf extends AbstractReferenceCountedByteBuf { @Override protected void deallocate() { } + @Override + public boolean isWritable() { + return false; + } + + @Override + public boolean isWritable(int numBytes) { + return false; + } + + @Override + public ByteBuf ensureWritable(int minWritableBytes) { + throw new ReadOnlyBufferException(); + } + + @Override + public int ensureWritable(int minWritableBytes, boolean force) { + return 1; + } + + @Override + public int writableBytes() { + return 0; + } + + @Override + public int maxWritableBytes() { + return 0; + } + @Override public byte getByte(int index) { ensureAccessible(); @@ -175,11 +204,6 @@ public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { checkDstIndex(index, length, dstIndex, dst.length); - if (dstIndex < 0 || dstIndex > dst.length - length) { - throw new IndexOutOfBoundsException(String.format( - "dstIndex: %d, length: %d (expected: range(0, %d))", dstIndex, length, dst.length)); - } - ByteBuffer tmpBuf = internalNioBuffer(); tmpBuf.clear().position(index).limit(index + length); tmpBuf.get(dst, dstIndex, length); @@ -188,14 +212,10 @@ public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { @Override public ByteBuf getBytes(int index, ByteBuffer dst) { - checkIndex(index); - if (dst == null) { - throw new NullPointerException("dst"); - } + checkIndex(index, dst.remaining()); - int bytesToCopy = Math.min(capacity() - index, dst.remaining()); ByteBuffer tmpBuf = internalNioBuffer(); - tmpBuf.clear().position(index).limit(index + bytesToCopy); + tmpBuf.clear().position(index).limit(index + dst.remaining()); dst.put(tmpBuf); return this; } @@ -335,11 +355,11 @@ public ByteBuf getBytes(int index, OutputStream out, int length) throws IOExcept if (buffer.hasArray()) { out.write(buffer.array(), index + buffer.arrayOffset(), length); } else { - byte[] tmp = new byte[length]; + byte[] tmp = ByteBufUtil.threadLocalTempArray(length); ByteBuffer tmpBuf = internalNioBuffer(); tmpBuf.clear().position(index); - tmpBuf.get(tmp); - out.write(tmp); + tmpBuf.get(tmp, 0, length); + out.write(tmp, 0, length); } return this; } @@ -433,6 +453,7 @@ public ByteBuffer[] nioBuffers(int index, int length) { @Override public ByteBuffer nioBuffer(int index, int length) { + checkIndex(index, length); return (ByteBuffer) buffer.duplicate().position(index).limit(index + length); } @@ -442,6 +463,11 @@ public ByteBuffer internalNioBuffer(int index, int length) { return (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length); } + @Override + public final boolean isContiguous() { + return true; + } + @Override public boolean hasArray() { return buffer.hasArray(); diff --git a/buffer/src/main/java/io/netty/buffer/ReadOnlyUnsafeDirectByteBuf.java b/buffer/src/main/java/io/netty/buffer/ReadOnlyUnsafeDirectByteBuf.java index 316760eca7a..154ebe8bfd9 100644 --- a/buffer/src/main/java/io/netty/buffer/ReadOnlyUnsafeDirectByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/ReadOnlyUnsafeDirectByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,10 +16,12 @@ package io.netty.buffer; -import io.netty.util.internal.PlatformDependent; +import static java.util.Objects.requireNonNull; import java.nio.ByteBuffer; +import io.netty.util.internal.PlatformDependent; + /** * Read-only ByteBuf which wraps a read-only direct ByteBuffer and use unsafe for best performance. @@ -62,9 +64,7 @@ protected long _getLong(int index) { @Override public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { checkIndex(index, length); - if (dst == null) { - throw new NullPointerException("dst"); - } + requireNonNull(dst, "dst"); if (dstIndex < 0 || dstIndex > dst.capacity() - length) { throw new IndexOutOfBoundsException("dstIndex: " + dstIndex); } @@ -82,9 +82,7 @@ public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { @Override public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { checkIndex(index, length); - if (dst == null) { - throw new NullPointerException("dst"); - } + requireNonNull(dst, "dst"); if (dstIndex < 0 || dstIndex > dst.length - length) { throw new IndexOutOfBoundsException(String.format( "dstIndex: %d, length: %d (expected: range(0, %d))", dstIndex, length, dst.length)); @@ -96,20 +94,6 @@ public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { return this; } - @Override - public ByteBuf getBytes(int index, ByteBuffer dst) { - checkIndex(index); - if (dst == null) { - throw new NullPointerException("dst"); - } - - int bytesToCopy = Math.min(capacity() - index, dst.remaining()); - ByteBuffer tmpBuf = internalNioBuffer(); - tmpBuf.clear().position(index).limit(index + bytesToCopy); - dst.put(tmpBuf); - return this; - } - @Override public ByteBuf copy(int index, int length) { checkIndex(index, length); diff --git a/buffer/src/main/java/io/netty/buffer/SimpleLeakAwareByteBuf.java b/buffer/src/main/java/io/netty/buffer/SimpleLeakAwareByteBuf.java index e07171258a2..5d6523b4fa7 100644 --- a/buffer/src/main/java/io/netty/buffer/SimpleLeakAwareByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/SimpleLeakAwareByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,9 +16,10 @@ package io.netty.buffer; +import static java.util.Objects.requireNonNull; + import io.netty.util.ResourceLeakDetector; import io.netty.util.ResourceLeakTracker; -import io.netty.util.internal.ObjectUtil; import java.nio.ByteOrder; @@ -34,8 +35,8 @@ class SimpleLeakAwareByteBuf extends WrappedByteBuf { SimpleLeakAwareByteBuf(ByteBuf wrapped, ByteBuf trackedByteBuf, ResourceLeakTracker leak) { super(wrapped); - this.trackedByteBuf = ObjectUtil.checkNotNull(trackedByteBuf, "trackedByteBuf"); - this.leak = ObjectUtil.checkNotNull(leak, "leak"); + this.trackedByteBuf = requireNonNull(trackedByteBuf, "trackedByteBuf"); + this.leak = requireNonNull(leak, "leak"); } SimpleLeakAwareByteBuf(ByteBuf wrapped, ResourceLeakTracker leak) { diff --git a/buffer/src/main/java/io/netty/buffer/SimpleLeakAwareCompositeByteBuf.java b/buffer/src/main/java/io/netty/buffer/SimpleLeakAwareCompositeByteBuf.java index d96f35c0c56..686f122fbff 100644 --- a/buffer/src/main/java/io/netty/buffer/SimpleLeakAwareCompositeByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/SimpleLeakAwareCompositeByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,9 +15,9 @@ */ package io.netty.buffer; +import static java.util.Objects.requireNonNull; import io.netty.util.ResourceLeakTracker; -import io.netty.util.internal.ObjectUtil; import java.nio.ByteOrder; @@ -27,7 +27,7 @@ class SimpleLeakAwareCompositeByteBuf extends WrappedCompositeByteBuf { SimpleLeakAwareCompositeByteBuf(CompositeByteBuf wrapped, ResourceLeakTracker leak) { super(wrapped); - this.leak = ObjectUtil.checkNotNull(leak, "leak"); + this.leak = requireNonNull(leak, "leak"); } @Override diff --git a/buffer/src/main/java/io/netty/buffer/SizeClasses.java b/buffer/src/main/java/io/netty/buffer/SizeClasses.java new file mode 100644 index 00000000000..04fe5c2cbf7 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/SizeClasses.java @@ -0,0 +1,407 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer; + +import static io.netty.buffer.PoolThreadCache.*; + +/** + * SizeClasses requires {@code pageShifts} to be defined prior to inclusion, + * and it in turn defines: + *

    + * LOG2_SIZE_CLASS_GROUP: Log of size class count for each size doubling. + * LOG2_MAX_LOOKUP_SIZE: Log of max size class in the lookup table. + * sizeClasses: Complete table of [index, log2Group, log2Delta, nDelta, isMultiPageSize, + * isSubPage, log2DeltaLookup] tuples. + * index: Size class index. + * log2Group: Log of group base size (no deltas added). + * log2Delta: Log of delta to previous size class. + * nDelta: Delta multiplier. + * isMultiPageSize: 'yes' if a multiple of the page size, 'no' otherwise. + * isSubPage: 'yes' if a subpage size class, 'no' otherwise. + * log2DeltaLookup: Same as log2Delta if a lookup table size class, 'no' + * otherwise. + *

    + * nSubpages: Number of subpages size classes. + * nSizes: Number of size classes. + * nPSizes: Number of size classes that are multiples of pageSize. + * + * smallMaxSizeIdx: Maximum small size class index. + * + * lookupMaxclass: Maximum size class included in lookup table. + * log2NormalMinClass: Log of minimum normal size class. + *

    + * The first size class and spacing are 1 << LOG2_QUANTUM. + * Each group has 1 << LOG2_SIZE_CLASS_GROUP of size classes. + * + * size = 1 << log2Group + nDelta * (1 << log2Delta) + * + * The first size class has an unusual encoding, because the size has to be + * split between group and delta*nDelta. + * + * If pageShift = 13, sizeClasses looks like this: + * + * (index, log2Group, log2Delta, nDelta, isMultiPageSize, isSubPage, log2DeltaLookup) + *

    + * ( 0, 4, 4, 0, no, yes, 4) + * ( 1, 4, 4, 1, no, yes, 4) + * ( 2, 4, 4, 2, no, yes, 4) + * ( 3, 4, 4, 3, no, yes, 4) + *

    + * ( 4, 6, 4, 1, no, yes, 4) + * ( 5, 6, 4, 2, no, yes, 4) + * ( 6, 6, 4, 3, no, yes, 4) + * ( 7, 6, 4, 4, no, yes, 4) + *

    + * ( 8, 7, 5, 1, no, yes, 5) + * ( 9, 7, 5, 2, no, yes, 5) + * ( 10, 7, 5, 3, no, yes, 5) + * ( 11, 7, 5, 4, no, yes, 5) + * ... + * ... + * ( 72, 23, 21, 1, yes, no, no) + * ( 73, 23, 21, 2, yes, no, no) + * ( 74, 23, 21, 3, yes, no, no) + * ( 75, 23, 21, 4, yes, no, no) + *

    + * ( 76, 24, 22, 1, yes, no, no) + */ +abstract class SizeClasses implements SizeClassesMetric { + + static final int LOG2_QUANTUM = 4; + + private static final int LOG2_SIZE_CLASS_GROUP = 2; + private static final int LOG2_MAX_LOOKUP_SIZE = 12; + + private static final int INDEX_IDX = 0; + private static final int LOG2GROUP_IDX = 1; + private static final int LOG2DELTA_IDX = 2; + private static final int NDELTA_IDX = 3; + private static final int PAGESIZE_IDX = 4; + private static final int SUBPAGE_IDX = 5; + private static final int LOG2_DELTA_LOOKUP_IDX = 6; + + private static final byte no = 0, yes = 1; + + protected SizeClasses(int pageSize, int pageShifts, int chunkSize, int directMemoryCacheAlignment) { + this.pageSize = pageSize; + this.pageShifts = pageShifts; + this.chunkSize = chunkSize; + this.directMemoryCacheAlignment = directMemoryCacheAlignment; + + int group = log2(chunkSize) + 1 - LOG2_QUANTUM; + + //generate size classes + //[index, log2Group, log2Delta, nDelta, isMultiPageSize, isSubPage, log2DeltaLookup] + sizeClasses = new short[group << LOG2_SIZE_CLASS_GROUP][7]; + nSizes = sizeClasses(); + + //generate lookup table + sizeIdx2sizeTab = new int[nSizes]; + pageIdx2sizeTab = new int[nPSizes]; + idx2SizeTab(sizeIdx2sizeTab, pageIdx2sizeTab); + + size2idxTab = new int[lookupMaxSize >> LOG2_QUANTUM]; + size2idxTab(size2idxTab); + } + + protected final int pageSize; + protected final int pageShifts; + protected final int chunkSize; + protected final int directMemoryCacheAlignment; + + final int nSizes; + int nSubpages; + int nPSizes; + + int smallMaxSizeIdx; + + private int lookupMaxSize; + + private final short[][] sizeClasses; + + private final int[] pageIdx2sizeTab; + + // lookup table for sizeIdx <= smallMaxSizeIdx + private final int[] sizeIdx2sizeTab; + + // lookup table used for size <= lookupMaxclass + // spacing is 1 << LOG2_QUANTUM, so the size of array is lookupMaxclass >> LOG2_QUANTUM + private final int[] size2idxTab; + + private int sizeClasses() { + int normalMaxSize = -1; + + int index = 0; + int size = 0; + + int log2Group = LOG2_QUANTUM; + int log2Delta = LOG2_QUANTUM; + int ndeltaLimit = 1 << LOG2_SIZE_CLASS_GROUP; + + //First small group, nDelta start at 0. + //first size class is 1 << LOG2_QUANTUM + int nDelta = 0; + while (nDelta < ndeltaLimit) { + size = sizeClass(index++, log2Group, log2Delta, nDelta++); + } + log2Group += LOG2_SIZE_CLASS_GROUP; + + //All remaining groups, nDelta start at 1. + while (size < chunkSize) { + nDelta = 1; + + while (nDelta <= ndeltaLimit && size < chunkSize) { + size = sizeClass(index++, log2Group, log2Delta, nDelta++); + normalMaxSize = size; + } + + log2Group++; + log2Delta++; + } + + //chunkSize must be normalMaxSize + assert chunkSize == normalMaxSize; + + //return number of size index + return index; + } + + //calculate size class + private int sizeClass(int index, int log2Group, int log2Delta, int nDelta) { + short isMultiPageSize; + if (log2Delta >= pageShifts) { + isMultiPageSize = yes; + } else { + int pageSize = 1 << pageShifts; + int size = (1 << log2Group) + (1 << log2Delta) * nDelta; + + isMultiPageSize = size == size / pageSize * pageSize? yes : no; + } + + int log2Ndelta = nDelta == 0? 0 : log2(nDelta); + + byte remove = 1 << log2Ndelta < nDelta? yes : no; + + int log2Size = log2Delta + log2Ndelta == log2Group? log2Group + 1 : log2Group; + if (log2Size == log2Group) { + remove = yes; + } + + short isSubpage = log2Size < pageShifts + LOG2_SIZE_CLASS_GROUP? yes : no; + + int log2DeltaLookup = log2Size < LOG2_MAX_LOOKUP_SIZE || + log2Size == LOG2_MAX_LOOKUP_SIZE && remove == no + ? log2Delta : no; + + short[] sz = { + (short) index, (short) log2Group, (short) log2Delta, + (short) nDelta, isMultiPageSize, isSubpage, (short) log2DeltaLookup + }; + + sizeClasses[index] = sz; + int size = (1 << log2Group) + (nDelta << log2Delta); + + if (sz[PAGESIZE_IDX] == yes) { + nPSizes++; + } + if (sz[SUBPAGE_IDX] == yes) { + nSubpages++; + smallMaxSizeIdx = index; + } + if (sz[LOG2_DELTA_LOOKUP_IDX] != no) { + lookupMaxSize = size; + } + return size; + } + + private void idx2SizeTab(int[] sizeIdx2sizeTab, int[] pageIdx2sizeTab) { + int pageIdx = 0; + + for (int i = 0; i < nSizes; i++) { + short[] sizeClass = sizeClasses[i]; + int log2Group = sizeClass[LOG2GROUP_IDX]; + int log2Delta = sizeClass[LOG2DELTA_IDX]; + int nDelta = sizeClass[NDELTA_IDX]; + + int size = (1 << log2Group) + (nDelta << log2Delta); + sizeIdx2sizeTab[i] = size; + + if (sizeClass[PAGESIZE_IDX] == yes) { + pageIdx2sizeTab[pageIdx++] = size; + } + } + } + + private void size2idxTab(int[] size2idxTab) { + int idx = 0; + int size = 0; + + for (int i = 0; size <= lookupMaxSize; i++) { + int log2Delta = sizeClasses[i][LOG2DELTA_IDX]; + int times = 1 << log2Delta - LOG2_QUANTUM; + + while (size <= lookupMaxSize && times-- > 0) { + size2idxTab[idx++] = i; + size = idx + 1 << LOG2_QUANTUM; + } + } + } + + @Override + public int sizeIdx2size(int sizeIdx) { + return sizeIdx2sizeTab[sizeIdx]; + } + + @Override + public int sizeIdx2sizeCompute(int sizeIdx) { + int group = sizeIdx >> LOG2_SIZE_CLASS_GROUP; + int mod = sizeIdx & (1 << LOG2_SIZE_CLASS_GROUP) - 1; + + int groupSize = group == 0? 0 : + 1 << LOG2_QUANTUM + LOG2_SIZE_CLASS_GROUP - 1 << group; + + int shift = group == 0? 1 : group; + int lgDelta = shift + LOG2_QUANTUM - 1; + int modSize = mod + 1 << lgDelta; + + return groupSize + modSize; + } + + @Override + public long pageIdx2size(int pageIdx) { + return pageIdx2sizeTab[pageIdx]; + } + + @Override + public long pageIdx2sizeCompute(int pageIdx) { + int group = pageIdx >> LOG2_SIZE_CLASS_GROUP; + int mod = pageIdx & (1 << LOG2_SIZE_CLASS_GROUP) - 1; + + long groupSize = group == 0? 0 : + 1L << pageShifts + LOG2_SIZE_CLASS_GROUP - 1 << group; + + int shift = group == 0? 1 : group; + int log2Delta = shift + pageShifts - 1; + int modSize = mod + 1 << log2Delta; + + return groupSize + modSize; + } + + @Override + public int size2SizeIdx(int size) { + if (size == 0) { + return 0; + } + if (size > chunkSize) { + return nSizes; + } + + if (directMemoryCacheAlignment > 0) { + size = alignSize(size); + } + + if (size <= lookupMaxSize) { + //size-1 / MIN_TINY + return size2idxTab[size - 1 >> LOG2_QUANTUM]; + } + + int x = log2((size << 1) - 1); + int shift = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1 + ? 0 : x - (LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM); + + int group = shift << LOG2_SIZE_CLASS_GROUP; + + int log2Delta = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1 + ? LOG2_QUANTUM : x - LOG2_SIZE_CLASS_GROUP - 1; + + int deltaInverseMask = -1 << log2Delta; + int mod = (size - 1 & deltaInverseMask) >> log2Delta & + (1 << LOG2_SIZE_CLASS_GROUP) - 1; + + return group + mod; + } + + @Override + public int pages2pageIdx(int pages) { + return pages2pageIdxCompute(pages, false); + } + + @Override + public int pages2pageIdxFloor(int pages) { + return pages2pageIdxCompute(pages, true); + } + + private int pages2pageIdxCompute(int pages, boolean floor) { + int pageSize = pages << pageShifts; + if (pageSize > chunkSize) { + return nPSizes; + } + + int x = log2((pageSize << 1) - 1); + + int shift = x < LOG2_SIZE_CLASS_GROUP + pageShifts + ? 0 : x - (LOG2_SIZE_CLASS_GROUP + pageShifts); + + int group = shift << LOG2_SIZE_CLASS_GROUP; + + int log2Delta = x < LOG2_SIZE_CLASS_GROUP + pageShifts + 1? + pageShifts : x - LOG2_SIZE_CLASS_GROUP - 1; + + int deltaInverseMask = -1 << log2Delta; + int mod = (pageSize - 1 & deltaInverseMask) >> log2Delta & + (1 << LOG2_SIZE_CLASS_GROUP) - 1; + + int pageIdx = group + mod; + + if (floor && pageIdx2sizeTab[pageIdx] > pages << pageShifts) { + pageIdx--; + } + + return pageIdx; + } + + // Round size up to the nearest multiple of alignment. + private int alignSize(int size) { + int delta = size & directMemoryCacheAlignment - 1; + return delta == 0? size : size + directMemoryCacheAlignment - delta; + } + + @Override + public int normalizeSize(int size) { + if (size == 0) { + return sizeIdx2sizeTab[0]; + } + if (directMemoryCacheAlignment > 0) { + size = alignSize(size); + } + + if (size <= lookupMaxSize) { + int ret = sizeIdx2sizeTab[size2idxTab[size - 1 >> LOG2_QUANTUM]]; + assert ret == normalizeSizeCompute(size); + return ret; + } + return normalizeSizeCompute(size); + } + + private static int normalizeSizeCompute(int size) { + int x = log2((size << 1) - 1); + int log2Delta = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1 + ? LOG2_QUANTUM : x - LOG2_SIZE_CLASS_GROUP - 1; + int delta = 1 << log2Delta; + int delta_mask = delta - 1; + return size + delta_mask & ~delta_mask; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/SizeClassesMetric.java b/buffer/src/main/java/io/netty/buffer/SizeClassesMetric.java new file mode 100644 index 00000000000..17ade94ac86 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/SizeClassesMetric.java @@ -0,0 +1,87 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer; + +/** + * Expose metrics for an SizeClasses. + */ +public interface SizeClassesMetric { + + /** + * Computes size from lookup table according to sizeIdx. + * + * @return size + */ + int sizeIdx2size(int sizeIdx); + + /** + * Computes size according to sizeIdx. + * + * @return size + */ + int sizeIdx2sizeCompute(int sizeIdx); + + /** + * Computes size from lookup table according to pageIdx. + * + * @return size which is multiples of pageSize. + */ + long pageIdx2size(int pageIdx); + + /** + * Computes size according to pageIdx. + * + * @return size which is multiples of pageSize + */ + long pageIdx2sizeCompute(int pageIdx); + + /** + * Normalizes request size up to the nearest size class. + * + * @param size request size + * + * @return sizeIdx of the size class + */ + int size2SizeIdx(int size); + + /** + * Normalizes request size up to the nearest pageSize class. + * + * @param pages multiples of pageSizes + * + * @return pageIdx of the pageSize class + */ + int pages2pageIdx(int pages); + + /** + * Normalizes request size down to the nearest pageSize class. + * + * @param pages multiples of pageSizes + * + * @return pageIdx of the pageSize class + */ + int pages2pageIdxFloor(int pages); + + /** + * Normalizes usable size that would result from allocating an object with the + * specified size and alignment. + * + * @param size request size + * + * @return normalized size + */ + int normalizeSize(int size); +} diff --git a/buffer/src/main/java/io/netty/buffer/SlicedByteBuf.java b/buffer/src/main/java/io/netty/buffer/SlicedByteBuf.java index d7bb7ab988a..be0b71101eb 100644 --- a/buffer/src/main/java/io/netty/buffer/SlicedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/SlicedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java b/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java index 5d54a1f9f14..995de9ea888 100644 --- a/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/SwappedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.buffer; +import static java.util.Objects.requireNonNull; + import io.netty.util.ByteProcessor; import java.io.IOException; @@ -40,9 +42,7 @@ public class SwappedByteBuf extends ByteBuf { private final ByteOrder order; public SwappedByteBuf(ByteBuf buf) { - if (buf == null) { - throw new NullPointerException("buf"); - } + requireNonNull(buf, "buf"); this.buf = buf; if (buf.order() == ByteOrder.BIG_ENDIAN) { order = ByteOrder.LITTLE_ENDIAN; @@ -58,9 +58,7 @@ public ByteOrder order() { @Override public ByteBuf order(ByteOrder endianness) { - if (endianness == null) { - throw new NullPointerException("endianness"); - } + requireNonNull(endianness, "endianness"); if (endianness == order) { return this; } @@ -151,6 +149,11 @@ public int maxWritableBytes() { return buf.maxWritableBytes(); } + @Override + public int maxFastWritableBytes() { + return buf.maxFastWritableBytes(); + } + @Override public boolean isReadable() { return buf.isReadable(); @@ -177,30 +180,6 @@ public ByteBuf clear() { return this; } - @Override - public ByteBuf markReaderIndex() { - buf.markReaderIndex(); - return this; - } - - @Override - public ByteBuf resetReaderIndex() { - buf.resetReaderIndex(); - return this; - } - - @Override - public ByteBuf markWriterIndex() { - buf.markWriterIndex(); - return this; - } - - @Override - public ByteBuf resetWriterIndex() { - buf.resetWriterIndex(); - return this; - } - @Override public ByteBuf discardReadBytes() { buf.discardReadBytes(); @@ -543,7 +522,7 @@ public short readShort() { @Override public short readShortLE() { - return buf.readShort(); + return buf.readShortLE(); } @Override @@ -563,7 +542,7 @@ public int readMedium() { @Override public int readMediumLE() { - return buf.readMedium(); + return buf.readMediumLE(); } @Override @@ -583,7 +562,7 @@ public int readInt() { @Override public int readIntLE() { - return buf.readInt(); + return buf.readIntLE(); } @Override @@ -603,7 +582,7 @@ public long readLong() { @Override public long readLongLE() { - return buf.readLong(); + return buf.readLongLE(); } @Override @@ -719,7 +698,7 @@ public ByteBuf writeShort(int value) { @Override public ByteBuf writeShortLE(int value) { - buf.writeShort((short) value); + buf.writeShortLE((short) value); return this; } @@ -731,7 +710,7 @@ public ByteBuf writeMedium(int value) { @Override public ByteBuf writeMediumLE(int value) { - buf.writeMedium(value); + buf.writeMediumLE(value); return this; } @@ -743,7 +722,7 @@ public ByteBuf writeInt(int value) { @Override public ByteBuf writeIntLE(int value) { - buf.writeInt(value); + buf.writeIntLE(value); return this; } @@ -755,7 +734,7 @@ public ByteBuf writeLong(long value) { @Override public ByteBuf writeLongLE(long value) { - buf.writeLong(value); + buf.writeLongLE(value); return this; } @@ -977,6 +956,11 @@ public boolean hasMemoryAddress() { return buf.hasMemoryAddress(); } + @Override + public boolean isContiguous() { + return buf.isContiguous(); + } + @Override public long memoryAddress() { return buf.memoryAddress(); @@ -997,6 +981,11 @@ public int refCnt() { return buf.refCnt(); } + @Override + final boolean isAccessible() { + return buf.isAccessible(); + } + @Override public ByteBuf retain() { buf.retain(); @@ -1038,9 +1027,6 @@ public int hashCode() { @Override public boolean equals(Object obj) { - if (this == obj) { - return true; - } if (obj instanceof ByteBuf) { return ByteBufUtil.equals(this, (ByteBuf) obj); } diff --git a/buffer/src/main/java/io/netty/buffer/Unpooled.java b/buffer/src/main/java/io/netty/buffer/Unpooled.java index 3639a177684..66b94e3c186 100644 --- a/buffer/src/main/java/io/netty/buffer/Unpooled.java +++ b/buffer/src/main/java/io/netty/buffer/Unpooled.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,14 +15,17 @@ */ package io.netty.buffer; +import static java.util.Objects.requireNonNull; + +import io.netty.buffer.CompositeByteBuf.ByteWrapper; +import io.netty.util.CharsetUtil; import io.netty.util.internal.PlatformDependent; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.CharBuffer; import java.nio.charset.Charset; -import java.util.ArrayList; -import java.util.List; +import java.util.Arrays; /** @@ -219,7 +222,7 @@ public static ByteBuf wrappedBuffer(long memoryAddress, int size, boolean doFree * Creates a new buffer which wraps the specified buffer's readable bytes. * A modification on the specified buffer's content will be visible to the * returned buffer. - * @param buffer The buffer to wrap. Reference count ownership of this variable is transfered to this method. + * @param buffer The buffer to wrap. Reference count ownership of this variable is transferred to this method. * @return The readable portion of the {@code buffer}, or an empty buffer if there is no readable portion. * The caller is responsible for releasing this buffer. */ @@ -238,18 +241,18 @@ public static ByteBuf wrappedBuffer(ByteBuf buffer) { * content will be visible to the returned buffer. */ public static ByteBuf wrappedBuffer(byte[]... arrays) { - return wrappedBuffer(AbstractByteBufAllocator.DEFAULT_MAX_COMPONENTS, arrays); + return wrappedBuffer(arrays.length, arrays); } /** * Creates a new big-endian composite buffer which wraps the readable bytes of the * specified buffers without copying them. A modification on the content * of the specified buffers will be visible to the returned buffer. - * @param buffers The buffers to wrap. Reference count ownership of all variables is transfered to this method. + * @param buffers The buffers to wrap. Reference count ownership of all variables is transferred to this method. * @return The readable portion of the {@code buffers}. The caller is responsible for releasing this buffer. */ public static ByteBuf wrappedBuffer(ByteBuf... buffers) { - return wrappedBuffer(AbstractByteBufAllocator.DEFAULT_MAX_COMPONENTS, buffers); + return wrappedBuffer(buffers.length, buffers); } /** @@ -258,50 +261,49 @@ public static ByteBuf wrappedBuffer(ByteBuf... buffers) { * specified buffers will be visible to the returned buffer. */ public static ByteBuf wrappedBuffer(ByteBuffer... buffers) { - return wrappedBuffer(AbstractByteBufAllocator.DEFAULT_MAX_COMPONENTS, buffers); + return wrappedBuffer(buffers.length, buffers); } - /** - * Creates a new big-endian composite buffer which wraps the specified - * arrays without copying them. A modification on the specified arrays' - * content will be visible to the returned buffer. - */ - public static ByteBuf wrappedBuffer(int maxNumComponents, byte[]... arrays) { - switch (arrays.length) { + static ByteBuf wrappedBuffer(int maxNumComponents, ByteWrapper wrapper, T[] array) { + switch (array.length) { case 0: break; case 1: - if (arrays[0].length != 0) { - return wrappedBuffer(arrays[0]); + if (!wrapper.isEmpty(array[0])) { + return wrapper.wrap(array[0]); } break; default: - // Get the list of the component, while guessing the byte order. - final List components = new ArrayList(arrays.length); - for (byte[] a: arrays) { - if (a == null) { - break; + for (int i = 0, len = array.length; i < len; i++) { + T bytes = array[i]; + if (bytes == null) { + return EMPTY_BUFFER; } - if (a.length > 0) { - components.add(wrappedBuffer(a)); + if (!wrapper.isEmpty(bytes)) { + return new CompositeByteBuf(ALLOC, false, maxNumComponents, wrapper, array, i); } } - - if (!components.isEmpty()) { - return new CompositeByteBuf(ALLOC, false, maxNumComponents, components); - } } return EMPTY_BUFFER; } + /** + * Creates a new big-endian composite buffer which wraps the specified + * arrays without copying them. A modification on the specified arrays' + * content will be visible to the returned buffer. + */ + public static ByteBuf wrappedBuffer(int maxNumComponents, byte[]... arrays) { + return wrappedBuffer(maxNumComponents, CompositeByteBuf.BYTE_ARRAY_WRAPPER, arrays); + } + /** * Creates a new big-endian composite buffer which wraps the readable bytes of the * specified buffers without copying them. A modification on the content * of the specified buffers will be visible to the returned buffer. * @param maxNumComponents Advisement as to how many independent buffers are allowed to exist before * consolidation occurs. - * @param buffers The buffers to wrap. Reference count ownership of all variables is transfered to this method. + * @param buffers The buffers to wrap. Reference count ownership of all variables is transferred to this method. * @return The readable portion of the {@code buffers}. The caller is responsible for releasing this buffer. */ public static ByteBuf wrappedBuffer(int maxNumComponents, ByteBuf... buffers) { @@ -320,7 +322,7 @@ public static ByteBuf wrappedBuffer(int maxNumComponents, ByteBuf... buffers) { for (int i = 0; i < buffers.length; i++) { ByteBuf buf = buffers[i]; if (buf.isReadable()) { - return new CompositeByteBuf(ALLOC, false, maxNumComponents, buffers, i, buffers.length); + return new CompositeByteBuf(ALLOC, false, maxNumComponents, buffers, i); } buf.release(); } @@ -335,32 +337,7 @@ public static ByteBuf wrappedBuffer(int maxNumComponents, ByteBuf... buffers) { * specified buffers will be visible to the returned buffer. */ public static ByteBuf wrappedBuffer(int maxNumComponents, ByteBuffer... buffers) { - switch (buffers.length) { - case 0: - break; - case 1: - if (buffers[0].hasRemaining()) { - return wrappedBuffer(buffers[0].order(BIG_ENDIAN)); - } - break; - default: - // Get the list of the component, while guessing the byte order. - final List components = new ArrayList(buffers.length); - for (ByteBuffer b: buffers) { - if (b == null) { - break; - } - if (b.remaining() > 0) { - components.add(wrappedBuffer(b.order(BIG_ENDIAN))); - } - } - - if (!components.isEmpty()) { - return new CompositeByteBuf(ALLOC, false, maxNumComponents, components); - } - } - - return EMPTY_BUFFER; + return wrappedBuffer(maxNumComponents, CompositeByteBuf.BYTE_BUFFER_WRAPPER, buffers); } /** @@ -399,7 +376,7 @@ public static ByteBuf copiedBuffer(byte[] array, int offset, int length) { if (length == 0) { return EMPTY_BUFFER; } - byte[] copy = new byte[length]; + byte[] copy = PlatformDependent.allocateUninitializedArray(length); System.arraycopy(array, offset, copy, 0, length); return wrappedBuffer(copy); } @@ -415,7 +392,7 @@ public static ByteBuf copiedBuffer(ByteBuffer buffer) { if (length == 0) { return EMPTY_BUFFER; } - byte[] copy = new byte[length]; + byte[] copy = PlatformDependent.allocateUninitializedArray(length); // Duplicate the buffer so we not adjust the position during our get operation. // See https://github.com/netty/netty/issues/3896 ByteBuffer duplicate = buffer.duplicate(); @@ -472,7 +449,7 @@ public static ByteBuf copiedBuffer(byte[]... arrays) { return EMPTY_BUFFER; } - byte[] mergedArray = new byte[length]; + byte[] mergedArray = PlatformDependent.allocateUninitializedArray(length); for (int i = 0, j = 0; i < arrays.length; i ++) { byte[] a = arrays[i]; System.arraycopy(a, 0, mergedArray, j, a.length); @@ -526,7 +503,7 @@ public static ByteBuf copiedBuffer(ByteBuf... buffers) { return EMPTY_BUFFER; } - byte[] mergedArray = new byte[length]; + byte[] mergedArray = PlatformDependent.allocateUninitializedArray(length); for (int i = 0, j = 0; i < buffers.length; i ++) { ByteBuf b = buffers[i]; int bLen = b.readableBytes(); @@ -581,7 +558,7 @@ public static ByteBuf copiedBuffer(ByteBuffer... buffers) { return EMPTY_BUFFER; } - byte[] mergedArray = new byte[length]; + byte[] mergedArray = PlatformDependent.allocateUninitializedArray(length); for (int i = 0, j = 0; i < buffers.length; i ++) { // Duplicate the buffer so we not adjust the position during our get operation. // See https://github.com/netty/netty/issues/3896 @@ -601,10 +578,14 @@ public static ByteBuf copiedBuffer(ByteBuffer... buffers) { * {@code 0} and the length of the encoded string respectively. */ public static ByteBuf copiedBuffer(CharSequence string, Charset charset) { - if (string == null) { - throw new NullPointerException("string"); - } + requireNonNull(string, "string"); + if (CharsetUtil.UTF_8.equals(charset)) { + return copiedBufferUtf8(string); + } + if (CharsetUtil.US_ASCII.equals(charset)) { + return copiedBufferAscii(string); + } if (string instanceof CharBuffer) { return copiedBuffer((CharBuffer) string, charset); } @@ -612,6 +593,36 @@ public static ByteBuf copiedBuffer(CharSequence string, Charset charset) { return copiedBuffer(CharBuffer.wrap(string), charset); } + private static ByteBuf copiedBufferUtf8(CharSequence string) { + boolean release = true; + // Mimic the same behavior as other copiedBuffer implementations. + ByteBuf buffer = ALLOC.heapBuffer(ByteBufUtil.utf8Bytes(string)); + try { + ByteBufUtil.writeUtf8(buffer, string); + release = false; + return buffer; + } finally { + if (release) { + buffer.release(); + } + } + } + + private static ByteBuf copiedBufferAscii(CharSequence string) { + boolean release = true; + // Mimic the same behavior as other copiedBuffer implementations. + ByteBuf buffer = ALLOC.heapBuffer(string.length()); + try { + ByteBufUtil.writeAscii(buffer, string); + release = false; + return buffer; + } finally { + if (release) { + buffer.release(); + } + } + } + /** * Creates a new big-endian buffer whose content is a subregion of * the specified {@code string} encoded in the specified {@code charset}. @@ -620,9 +631,7 @@ public static ByteBuf copiedBuffer(CharSequence string, Charset charset) { */ public static ByteBuf copiedBuffer( CharSequence string, int offset, int length, Charset charset) { - if (string == null) { - throw new NullPointerException("string"); - } + requireNonNull(string, "string"); if (length == 0) { return EMPTY_BUFFER; } @@ -652,9 +661,7 @@ public static ByteBuf copiedBuffer( * {@code 0} and the length of the encoded string respectively. */ public static ByteBuf copiedBuffer(char[] array, Charset charset) { - if (array == null) { - throw new NullPointerException("array"); - } + requireNonNull(array, "array"); return copiedBuffer(array, 0, array.length, charset); } @@ -665,9 +672,7 @@ public static ByteBuf copiedBuffer(char[] array, Charset charset) { * {@code 0} and the length of the encoded string respectively. */ public static ByteBuf copiedBuffer(char[] array, int offset, int length, Charset charset) { - if (array == null) { - throw new NullPointerException("array"); - } + requireNonNull(array, "array"); if (length == 0) { return EMPTY_BUFFER; } @@ -881,9 +886,36 @@ public static ByteBuf unreleasableBuffer(ByteBuf buf) { /** * Wrap the given {@link ByteBuf}s in an unmodifiable {@link ByteBuf}. Be aware the returned {@link ByteBuf} will * not try to slice the given {@link ByteBuf}s to reduce GC-Pressure. + * + * @deprecated Use {@link #wrappedUnmodifiableBuffer(ByteBuf...)}. */ + @Deprecated public static ByteBuf unmodifiableBuffer(ByteBuf... buffers) { - return new FixedCompositeByteBuf(ALLOC, buffers); + return wrappedUnmodifiableBuffer(true, buffers); + } + + /** + * Wrap the given {@link ByteBuf}s in an unmodifiable {@link ByteBuf}. Be aware the returned {@link ByteBuf} will + * not try to slice the given {@link ByteBuf}s to reduce GC-Pressure. + * + * The returned {@link ByteBuf} may wrap the provided array directly, and so should not be subsequently modified. + */ + public static ByteBuf wrappedUnmodifiableBuffer(ByteBuf... buffers) { + return wrappedUnmodifiableBuffer(false, buffers); + } + + private static ByteBuf wrappedUnmodifiableBuffer(boolean copy, ByteBuf... buffers) { + switch (buffers.length) { + case 0: + return EMPTY_BUFFER; + case 1: + return buffers[0].asReadOnly(); + default: + if (copy) { + buffers = Arrays.copyOf(buffers, buffers.length, ByteBuf[].class); + } + return new FixedCompositeByteBuf(ALLOC, buffers); + } } private Unpooled() { diff --git a/buffer/src/main/java/io/netty/buffer/UnpooledByteBufAllocator.java b/buffer/src/main/java/io/netty/buffer/UnpooledByteBufAllocator.java index 4edf0dcd3d9..2f7a015e93b 100644 --- a/buffer/src/main/java/io/netty/buffer/UnpooledByteBufAllocator.java +++ b/buffer/src/main/java/io/netty/buffer/UnpooledByteBufAllocator.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,11 +15,11 @@ */ package io.netty.buffer; -import io.netty.util.internal.LongCounter; import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.StringUtil; import java.nio.ByteBuffer; +import java.util.concurrent.atomic.LongAdder; /** * Simplistic {@link ByteBufAllocator} implementation that does not pool anything. @@ -140,14 +140,14 @@ private static final class InstrumentedUnpooledUnsafeHeapByteBuf extends Unpoole } @Override - byte[] allocateArray(int initialCapacity) { + protected byte[] allocateArray(int initialCapacity) { byte[] bytes = super.allocateArray(initialCapacity); ((UnpooledByteBufAllocator) alloc()).incrementHeap(bytes.length); return bytes; } @Override - void freeArray(byte[] array) { + protected void freeArray(byte[] array) { int length = array.length; super.freeArray(array); ((UnpooledByteBufAllocator) alloc()).decrementHeap(length); @@ -160,14 +160,14 @@ private static final class InstrumentedUnpooledHeapByteBuf extends UnpooledHeapB } @Override - byte[] allocateArray(int initialCapacity) { + protected byte[] allocateArray(int initialCapacity) { byte[] bytes = super.allocateArray(initialCapacity); ((UnpooledByteBufAllocator) alloc()).incrementHeap(bytes.length); return bytes; } @Override - void freeArray(byte[] array) { + protected void freeArray(byte[] array) { int length = array.length; super.freeArray(array); ((UnpooledByteBufAllocator) alloc()).decrementHeap(length); @@ -247,17 +247,17 @@ protected void freeDirect(ByteBuffer buffer) { } private static final class UnpooledByteBufAllocatorMetric implements ByteBufAllocatorMetric { - final LongCounter directCounter = PlatformDependent.newLongCounter(); - final LongCounter heapCounter = PlatformDependent.newLongCounter(); + final LongAdder directCounter = new LongAdder(); + final LongAdder heapCounter = new LongAdder(); @Override public long usedHeapMemory() { - return heapCounter.value(); + return heapCounter.longValue(); } @Override public long usedDirectMemory() { - return directCounter.value(); + return directCounter.longValue(); } @Override diff --git a/buffer/src/main/java/io/netty/buffer/UnpooledDirectByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnpooledDirectByteBuf.java index 3ce75d1d637..cef96b8050f 100644 --- a/buffer/src/main/java/io/netty/buffer/UnpooledDirectByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnpooledDirectByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,9 @@ */ package io.netty.buffer; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import static java.util.Objects.requireNonNull; + import io.netty.util.internal.PlatformDependent; import java.io.IOException; @@ -36,7 +39,7 @@ public class UnpooledDirectByteBuf extends AbstractReferenceCountedByteBuf { private final ByteBufAllocator alloc; - private ByteBuffer buffer; + ByteBuffer buffer; // accessed by UnpooledUnsafeNoCleanerDirectByteBuf.reallocateDirect() private ByteBuffer tmpNioBuf; private int capacity; private boolean doNotFree; @@ -49,22 +52,16 @@ public class UnpooledDirectByteBuf extends AbstractReferenceCountedByteBuf { */ public UnpooledDirectByteBuf(ByteBufAllocator alloc, int initialCapacity, int maxCapacity) { super(maxCapacity); - if (alloc == null) { - throw new NullPointerException("alloc"); - } - if (initialCapacity < 0) { - throw new IllegalArgumentException("initialCapacity: " + initialCapacity); - } - if (maxCapacity < 0) { - throw new IllegalArgumentException("maxCapacity: " + maxCapacity); - } + requireNonNull(alloc, "alloc"); + checkPositiveOrZero(initialCapacity, "initialCapacity"); + checkPositiveOrZero(maxCapacity, "maxCapacity"); if (initialCapacity > maxCapacity) { throw new IllegalArgumentException(String.format( "initialCapacity(%d) > maxCapacity(%d)", initialCapacity, maxCapacity)); } this.alloc = alloc; - setByteBuffer(ByteBuffer.allocateDirect(initialCapacity)); + setByteBuffer(allocateDirect(initialCapacity), false); } /** @@ -73,13 +70,14 @@ public UnpooledDirectByteBuf(ByteBufAllocator alloc, int initialCapacity, int ma * @param maxCapacity the maximum capacity of the underlying direct buffer */ protected UnpooledDirectByteBuf(ByteBufAllocator alloc, ByteBuffer initialBuffer, int maxCapacity) { + this(alloc, initialBuffer, maxCapacity, false, true); + } + + UnpooledDirectByteBuf(ByteBufAllocator alloc, ByteBuffer initialBuffer, + int maxCapacity, boolean doFree, boolean slice) { super(maxCapacity); - if (alloc == null) { - throw new NullPointerException("alloc"); - } - if (initialBuffer == null) { - throw new NullPointerException("initialBuffer"); - } + requireNonNull(alloc, "alloc"); + requireNonNull(initialBuffer, "initialBuffer"); if (!initialBuffer.isDirect()) { throw new IllegalArgumentException("initialBuffer is not a direct buffer."); } @@ -94,8 +92,8 @@ protected UnpooledDirectByteBuf(ByteBufAllocator alloc, ByteBuffer initialBuffer } this.alloc = alloc; - doNotFree = true; - setByteBuffer(initialBuffer.slice().order(ByteOrder.BIG_ENDIAN)); + doNotFree = !doFree; + setByteBuffer((slice ? initialBuffer.slice() : initialBuffer).order(ByteOrder.BIG_ENDIAN), false); writerIndex(initialCapacity); } @@ -113,13 +111,15 @@ protected void freeDirect(ByteBuffer buffer) { PlatformDependent.freeDirectBuffer(buffer); } - private void setByteBuffer(ByteBuffer buffer) { - ByteBuffer oldBuffer = this.buffer; - if (oldBuffer != null) { - if (doNotFree) { - doNotFree = false; - } else { - freeDirect(oldBuffer); + void setByteBuffer(ByteBuffer buffer, boolean tryFree) { + if (tryFree) { + ByteBuffer oldBuffer = this.buffer; + if (oldBuffer != null) { + if (doNotFree) { + doNotFree = false; + } else { + freeDirect(oldBuffer); + } } } @@ -141,35 +141,23 @@ public int capacity() { @Override public ByteBuf capacity(int newCapacity) { checkNewCapacity(newCapacity); - - int readerIndex = readerIndex(); - int writerIndex = writerIndex(); - int oldCapacity = capacity; + if (newCapacity == oldCapacity) { + return this; + } + int bytesToCopy; if (newCapacity > oldCapacity) { - ByteBuffer oldBuffer = buffer; - ByteBuffer newBuffer = allocateDirect(newCapacity); - oldBuffer.position(0).limit(oldBuffer.capacity()); - newBuffer.position(0).limit(oldBuffer.capacity()); - newBuffer.put(oldBuffer); - newBuffer.clear(); - setByteBuffer(newBuffer); - } else if (newCapacity < oldCapacity) { - ByteBuffer oldBuffer = buffer; - ByteBuffer newBuffer = allocateDirect(newCapacity); - if (readerIndex < newCapacity) { - if (writerIndex > newCapacity) { - writerIndex(writerIndex = newCapacity); - } - oldBuffer.position(readerIndex).limit(writerIndex); - newBuffer.position(readerIndex).limit(writerIndex); - newBuffer.put(oldBuffer); - newBuffer.clear(); - } else { - setIndex(newCapacity, newCapacity); - } - setByteBuffer(newBuffer); + bytesToCopy = oldCapacity; + } else { + trimIndicesToCapacity(newCapacity); + bytesToCopy = newCapacity; } + ByteBuffer oldBuffer = buffer; + ByteBuffer newBuffer = allocateDirect(newCapacity); + oldBuffer.position(0).limit(bytesToCopy); + newBuffer.position(0).limit(bytesToCopy); + newBuffer.put(oldBuffer).clear(); + setByteBuffer(newBuffer, true); return this; } @@ -310,7 +298,7 @@ public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { return this; } - private void getBytes(int index, byte[] dst, int dstIndex, int length, boolean internal) { + void getBytes(int index, byte[] dst, int dstIndex, int length, boolean internal) { checkDstIndex(index, length, dstIndex, dst.length); ByteBuffer tmpBuf; @@ -337,7 +325,7 @@ public ByteBuf getBytes(int index, ByteBuffer dst) { return this; } - private void getBytes(int index, ByteBuffer dst, boolean internal) { + void getBytes(int index, ByteBuffer dst, boolean internal) { checkIndex(index, dst.remaining()); ByteBuffer tmpBuf; @@ -486,26 +474,12 @@ public ByteBuf getBytes(int index, OutputStream out, int length) throws IOExcept return this; } - private void getBytes(int index, OutputStream out, int length, boolean internal) throws IOException { + void getBytes(int index, OutputStream out, int length, boolean internal) throws IOException { ensureAccessible(); if (length == 0) { return; } - - if (buffer.hasArray()) { - out.write(buffer.array(), index + buffer.arrayOffset(), length); - } else { - byte[] tmp = new byte[length]; - ByteBuffer tmpBuf; - if (internal) { - tmpBuf = internalNioBuffer(); - } else { - tmpBuf = buffer.duplicate(); - } - tmpBuf.clear().position(index); - tmpBuf.get(tmp); - out.write(tmp); - } + ByteBufUtil.readBytes(alloc(), internal ? internalNioBuffer() : buffer.duplicate(), index, length, out); } @Override @@ -575,8 +549,8 @@ public int setBytes(int index, InputStream in, int length) throws IOException { if (buffer.hasArray()) { return in.read(buffer.array(), buffer.arrayOffset() + index, length); } else { - byte[] tmp = new byte[length]; - int readBytes = in.read(tmp); + byte[] tmp = ByteBufUtil.threadLocalTempArray(length); + int readBytes = in.read(tmp, 0, length); if (readBytes <= 0) { return readBytes; } @@ -593,7 +567,7 @@ public int setBytes(int index, ScatteringByteChannel in, int length) throws IOEx ByteBuffer tmpBuf = internalNioBuffer(); tmpBuf.clear().position(index).limit(index + length); try { - return in.read(tmpNioBuf); + return in.read(tmpBuf); } catch (ClosedChannelException ignored) { return -1; } @@ -605,7 +579,7 @@ public int setBytes(int index, FileChannel in, long position, int length) throws ByteBuffer tmpBuf = internalNioBuffer(); tmpBuf.clear().position(index).limit(index + length); try { - return in.read(tmpNioBuf, position); + return in.read(tmpBuf, position); } catch (ClosedChannelException ignored) { return -1; } @@ -621,6 +595,11 @@ public ByteBuffer[] nioBuffers(int index, int length) { return new ByteBuffer[] { nioBuffer(index, length) }; } + @Override + public final boolean isContiguous() { + return true; + } + @Override public ByteBuf copy(int index, int length) { ensureAccessible(); diff --git a/buffer/src/main/java/io/netty/buffer/UnpooledDuplicatedByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnpooledDuplicatedByteBuf.java index d1470141155..5b9ea1dc52c 100644 --- a/buffer/src/main/java/io/netty/buffer/UnpooledDuplicatedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnpooledDuplicatedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/UnpooledHeapByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnpooledHeapByteBuf.java index 819e0cb21d4..4aefaff9e80 100644 --- a/buffer/src/main/java/io/netty/buffer/UnpooledHeapByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnpooledHeapByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,9 @@ */ package io.netty.buffer; +import static java.util.Objects.requireNonNull; + +import io.netty.util.internal.EmptyArrays; import io.netty.util.internal.PlatformDependent; import java.io.IOException; @@ -27,8 +30,6 @@ import java.nio.channels.GatheringByteChannel; import java.nio.channels.ScatteringByteChannel; -import static io.netty.util.internal.ObjectUtil.checkNotNull; - /** * Big endian Java heap buffer implementation. It is recommended to use * {@link UnpooledByteBufAllocator#heapBuffer(int, int)}, {@link Unpooled#buffer(int)} and @@ -49,7 +50,7 @@ public class UnpooledHeapByteBuf extends AbstractReferenceCountedByteBuf { public UnpooledHeapByteBuf(ByteBufAllocator alloc, int initialCapacity, int maxCapacity) { super(maxCapacity); - checkNotNull(alloc, "alloc"); + requireNonNull(alloc, "alloc"); if (initialCapacity > maxCapacity) { throw new IllegalArgumentException(String.format( @@ -70,8 +71,8 @@ public UnpooledHeapByteBuf(ByteBufAllocator alloc, int initialCapacity, int maxC protected UnpooledHeapByteBuf(ByteBufAllocator alloc, byte[] initialArray, int maxCapacity) { super(maxCapacity); - checkNotNull(alloc, "alloc"); - checkNotNull(initialArray, "initialArray"); + requireNonNull(alloc, "alloc"); + requireNonNull(initialArray, "initialArray"); if (initialArray.length > maxCapacity) { throw new IllegalArgumentException(String.format( @@ -83,11 +84,11 @@ protected UnpooledHeapByteBuf(ByteBufAllocator alloc, byte[] initialArray, int m setIndex(0, initialArray.length); } - byte[] allocateArray(int initialCapacity) { + protected byte[] allocateArray(int initialCapacity) { return new byte[initialCapacity]; } - void freeArray(byte[] array) { + protected void freeArray(byte[] array) { // NOOP } @@ -113,36 +114,29 @@ public boolean isDirect() { @Override public int capacity() { - ensureAccessible(); return array.length; } @Override public ByteBuf capacity(int newCapacity) { checkNewCapacity(newCapacity); - - int oldCapacity = array.length; byte[] oldArray = array; + int oldCapacity = oldArray.length; + if (newCapacity == oldCapacity) { + return this; + } + + int bytesToCopy; if (newCapacity > oldCapacity) { - byte[] newArray = allocateArray(newCapacity); - System.arraycopy(oldArray, 0, newArray, 0, oldArray.length); - setArray(newArray); - freeArray(oldArray); - } else if (newCapacity < oldCapacity) { - byte[] newArray = allocateArray(newCapacity); - int readerIndex = readerIndex(); - if (readerIndex < newCapacity) { - int writerIndex = writerIndex(); - if (writerIndex > newCapacity) { - writerIndex(writerIndex = newCapacity); - } - System.arraycopy(oldArray, readerIndex, newArray, readerIndex, writerIndex - readerIndex); - } else { - setIndex(newCapacity, newCapacity); - } - setArray(newArray); - freeArray(oldArray); + bytesToCopy = oldCapacity; + } else { + trimIndicesToCapacity(newCapacity); + bytesToCopy = newCapacity; } + byte[] newArray = allocateArray(newCapacity); + System.arraycopy(oldArray, 0, newArray, 0, bytesToCopy); + setArray(newArray); + freeArray(oldArray); return this; } @@ -194,7 +188,7 @@ public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { @Override public ByteBuf getBytes(int index, ByteBuffer dst) { - checkIndex(index, dst.remaining()); + ensureAccessible(); dst.put(array, index, dst.remaining()); return this; } @@ -326,6 +320,11 @@ public ByteBuffer internalNioBuffer(int index, int length) { return (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length); } + @Override + public final boolean isContiguous() { + return true; + } + @Override public byte getByte(int index) { ensureAccessible(); @@ -536,9 +535,7 @@ protected void _setLongLE(int index, long value) { @Override public ByteBuf copy(int index, int length) { checkIndex(index, length); - byte[] copiedArray = new byte[length]; - System.arraycopy(array, index, copiedArray, 0, length); - return new UnpooledHeapByteBuf(alloc(), copiedArray, maxCapacity()); + return alloc().heapBuffer(length, maxCapacity()).writeBytes(array, index, length); } private ByteBuffer internalNioBuffer() { @@ -552,7 +549,7 @@ private ByteBuffer internalNioBuffer() { @Override protected void deallocate() { freeArray(array); - array = null; + array = EmptyArrays.EMPTY_BYTES; } @Override diff --git a/buffer/src/main/java/io/netty/buffer/UnpooledSlicedByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnpooledSlicedByteBuf.java index 5d523c861e0..3c5a7655ea2 100644 --- a/buffer/src/main/java/io/netty/buffer/UnpooledSlicedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnpooledSlicedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeDirectByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeDirectByteBuf.java index 9d425e322e3..6d1e326dc0f 100644 --- a/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeDirectByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeDirectByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,25 +21,14 @@ import java.io.InputStream; import java.io.OutputStream; import java.nio.ByteBuffer; -import java.nio.ByteOrder; -import java.nio.channels.ClosedChannelException; -import java.nio.channels.FileChannel; -import java.nio.channels.GatheringByteChannel; -import java.nio.channels.ScatteringByteChannel; /** * A NIO {@link ByteBuffer} based buffer. It is recommended to use * {@link UnpooledByteBufAllocator#directBuffer(int, int)}, {@link Unpooled#directBuffer(int)} and * {@link Unpooled#wrappedBuffer(ByteBuffer)} instead of calling the constructor explicitly.} */ -public class UnpooledUnsafeDirectByteBuf extends AbstractReferenceCountedByteBuf { +public class UnpooledUnsafeDirectByteBuf extends UnpooledDirectByteBuf { - private final ByteBufAllocator alloc; - - private ByteBuffer tmpNioBuf; - private int capacity; - private boolean doNotFree; - ByteBuffer buffer; long memoryAddress; /** @@ -49,23 +38,7 @@ public class UnpooledUnsafeDirectByteBuf extends AbstractReferenceCountedByteBuf * @param maxCapacity the maximum capacity of the underlying direct buffer */ public UnpooledUnsafeDirectByteBuf(ByteBufAllocator alloc, int initialCapacity, int maxCapacity) { - super(maxCapacity); - if (alloc == null) { - throw new NullPointerException("alloc"); - } - if (initialCapacity < 0) { - throw new IllegalArgumentException("initialCapacity: " + initialCapacity); - } - if (maxCapacity < 0) { - throw new IllegalArgumentException("maxCapacity: " + maxCapacity); - } - if (initialCapacity > maxCapacity) { - throw new IllegalArgumentException(String.format( - "initialCapacity(%d) > maxCapacity(%d)", initialCapacity, maxCapacity)); - } - - this.alloc = alloc; - setByteBuffer(allocateDirect(initialCapacity), false); + super(alloc, initialCapacity, maxCapacity); } /** @@ -74,144 +47,26 @@ public UnpooledUnsafeDirectByteBuf(ByteBufAllocator alloc, int initialCapacity, * @param maxCapacity the maximum capacity of the underlying direct buffer */ protected UnpooledUnsafeDirectByteBuf(ByteBufAllocator alloc, ByteBuffer initialBuffer, int maxCapacity) { - // We never try to free the buffer if it was provided by the end-user as we not know if this is an duplicate or - // an slice. This is done to prevent an IllegalArgumentException when using Java9 as Unsafe.invokeCleaner(...) - // will check if the given buffer is either an duplicate or slice and in this case throw an + // We never try to free the buffer if it was provided by the end-user as we don't know if this is a duplicate or + // a slice. This is done to prevent an IllegalArgumentException when using Java9 as Unsafe.invokeCleaner(...) + // will check if the given buffer is either a duplicate or slice and in this case throw an // IllegalArgumentException. // - // See http://hg.openjdk.java.net/jdk9/hs-demo/jdk/file/0d2ab72ba600/src/jdk.unsupported/share/classes/ + // See https://hg.openjdk.java.net/jdk9/hs-demo/jdk/file/0d2ab72ba600/src/jdk.unsupported/share/classes/ // sun/misc/Unsafe.java#l1250 // // We also call slice() explicitly here to preserve behaviour with previous netty releases. - this(alloc, initialBuffer.slice(), maxCapacity, false); + super(alloc, initialBuffer, maxCapacity, /* doFree = */ false, /* slice = */ true); } UnpooledUnsafeDirectByteBuf(ByteBufAllocator alloc, ByteBuffer initialBuffer, int maxCapacity, boolean doFree) { - super(maxCapacity); - if (alloc == null) { - throw new NullPointerException("alloc"); - } - if (initialBuffer == null) { - throw new NullPointerException("initialBuffer"); - } - if (!initialBuffer.isDirect()) { - throw new IllegalArgumentException("initialBuffer is not a direct buffer."); - } - if (initialBuffer.isReadOnly()) { - throw new IllegalArgumentException("initialBuffer is a read-only buffer."); - } - - int initialCapacity = initialBuffer.remaining(); - if (initialCapacity > maxCapacity) { - throw new IllegalArgumentException(String.format( - "initialCapacity(%d) > maxCapacity(%d)", initialCapacity, maxCapacity)); - } - - this.alloc = alloc; - doNotFree = !doFree; - setByteBuffer(initialBuffer.order(ByteOrder.BIG_ENDIAN), false); - writerIndex(initialCapacity); - } - - /** - * Allocate a new direct {@link ByteBuffer} with the given initialCapacity. - */ - protected ByteBuffer allocateDirect(int initialCapacity) { - return ByteBuffer.allocateDirect(initialCapacity); - } - - /** - * Free a direct {@link ByteBuffer} - */ - protected void freeDirect(ByteBuffer buffer) { - PlatformDependent.freeDirectBuffer(buffer); + super(alloc, initialBuffer, maxCapacity, doFree, false); } + @Override final void setByteBuffer(ByteBuffer buffer, boolean tryFree) { - if (tryFree) { - ByteBuffer oldBuffer = this.buffer; - if (oldBuffer != null) { - if (doNotFree) { - doNotFree = false; - } else { - freeDirect(oldBuffer); - } - } - } - this.buffer = buffer; + super.setByteBuffer(buffer, tryFree); memoryAddress = PlatformDependent.directBufferAddress(buffer); - tmpNioBuf = null; - capacity = buffer.remaining(); - } - - @Override - public boolean isDirect() { - return true; - } - - @Override - public int capacity() { - return capacity; - } - - @Override - public ByteBuf capacity(int newCapacity) { - checkNewCapacity(newCapacity); - - int readerIndex = readerIndex(); - int writerIndex = writerIndex(); - - int oldCapacity = capacity; - if (newCapacity > oldCapacity) { - ByteBuffer oldBuffer = buffer; - ByteBuffer newBuffer = allocateDirect(newCapacity); - oldBuffer.position(0).limit(oldBuffer.capacity()); - newBuffer.position(0).limit(oldBuffer.capacity()); - newBuffer.put(oldBuffer); - newBuffer.clear(); - setByteBuffer(newBuffer, true); - } else if (newCapacity < oldCapacity) { - ByteBuffer oldBuffer = buffer; - ByteBuffer newBuffer = allocateDirect(newCapacity); - if (readerIndex < newCapacity) { - if (writerIndex > newCapacity) { - writerIndex(writerIndex = newCapacity); - } - oldBuffer.position(readerIndex).limit(writerIndex); - newBuffer.position(readerIndex).limit(writerIndex); - newBuffer.put(oldBuffer); - newBuffer.clear(); - } else { - setIndex(newCapacity, newCapacity); - } - setByteBuffer(newBuffer, true); - } - return this; - } - - @Override - public ByteBufAllocator alloc() { - return alloc; - } - - @Override - public ByteOrder order() { - return ByteOrder.BIG_ENDIAN; - } - - @Override - public boolean hasArray() { - return false; - } - - @Override - public byte[] array() { - throw new UnsupportedOperationException("direct buffer"); - } - - @Override - public int arrayOffset() { - throw new UnsupportedOperationException("direct buffer"); } @Override @@ -225,11 +80,23 @@ public long memoryAddress() { return memoryAddress; } + @Override + public byte getByte(int index) { + checkIndex(index); + return _getByte(index); + } + @Override protected byte _getByte(int index) { return UnsafeByteBufUtil.getByte(addr(index)); } + @Override + public short getShort(int index) { + checkIndex(index, 2); + return _getShort(index); + } + @Override protected short _getShort(int index) { return UnsafeByteBufUtil.getShort(addr(index)); @@ -240,6 +107,12 @@ protected short _getShortLE(int index) { return UnsafeByteBufUtil.getShortLE(addr(index)); } + @Override + public int getUnsignedMedium(int index) { + checkIndex(index, 3); + return _getUnsignedMedium(index); + } + @Override protected int _getUnsignedMedium(int index) { return UnsafeByteBufUtil.getUnsignedMedium(addr(index)); @@ -250,6 +123,12 @@ protected int _getUnsignedMediumLE(int index) { return UnsafeByteBufUtil.getUnsignedMediumLE(addr(index)); } + @Override + public int getInt(int index) { + checkIndex(index, 4); + return _getInt(index); + } + @Override protected int _getInt(int index) { return UnsafeByteBufUtil.getInt(addr(index)); @@ -260,6 +139,12 @@ protected int _getIntLE(int index) { return UnsafeByteBufUtil.getIntLE(addr(index)); } + @Override + public long getLong(int index) { + checkIndex(index, 8); + return _getLong(index); + } + @Override protected long _getLong(int index) { return UnsafeByteBufUtil.getLong(addr(index)); @@ -277,23 +162,19 @@ public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { } @Override - public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { + void getBytes(int index, byte[] dst, int dstIndex, int length, boolean internal) { UnsafeByteBufUtil.getBytes(this, addr(index), index, dst, dstIndex, length); - return this; } @Override - public ByteBuf getBytes(int index, ByteBuffer dst) { + void getBytes(int index, ByteBuffer dst, boolean internal) { UnsafeByteBufUtil.getBytes(this, addr(index), index, dst); - return this; } @Override - public ByteBuf readBytes(ByteBuffer dst) { - int length = dst.remaining(); - checkReadableBytes(length); - getBytes(readerIndex, dst); - readerIndex += length; + public ByteBuf setByte(int index, int value) { + checkIndex(index); + _setByte(index, value); return this; } @@ -302,6 +183,13 @@ protected void _setByte(int index, int value) { UnsafeByteBufUtil.setByte(addr(index), value); } + @Override + public ByteBuf setShort(int index, int value) { + checkIndex(index, 2); + _setShort(index, value); + return this; + } + @Override protected void _setShort(int index, int value) { UnsafeByteBufUtil.setShort(addr(index), value); @@ -312,6 +200,13 @@ protected void _setShortLE(int index, int value) { UnsafeByteBufUtil.setShortLE(addr(index), value); } + @Override + public ByteBuf setMedium(int index, int value) { + checkIndex(index, 3); + _setMedium(index, value); + return this; + } + @Override protected void _setMedium(int index, int value) { UnsafeByteBufUtil.setMedium(addr(index), value); @@ -322,6 +217,13 @@ protected void _setMediumLE(int index, int value) { UnsafeByteBufUtil.setMediumLE(addr(index), value); } + @Override + public ByteBuf setInt(int index, int value) { + checkIndex(index, 4); + _setInt(index, value); + return this; + } + @Override protected void _setInt(int index, int value) { UnsafeByteBufUtil.setInt(addr(index), value); @@ -332,6 +234,13 @@ protected void _setIntLE(int index, int value) { UnsafeByteBufUtil.setIntLE(addr(index), value); } + @Override + public ByteBuf setLong(int index, long value) { + checkIndex(index, 8); + _setLong(index, value); + return this; + } + @Override protected void _setLong(int index, long value) { UnsafeByteBufUtil.setLong(addr(index), value); @@ -361,62 +270,8 @@ public ByteBuf setBytes(int index, ByteBuffer src) { } @Override - public ByteBuf getBytes(int index, OutputStream out, int length) throws IOException { + void getBytes(int index, OutputStream out, int length, boolean internal) throws IOException { UnsafeByteBufUtil.getBytes(this, addr(index), index, out, length); - return this; - } - - @Override - public int getBytes(int index, GatheringByteChannel out, int length) throws IOException { - return getBytes(index, out, length, false); - } - - private int getBytes(int index, GatheringByteChannel out, int length, boolean internal) throws IOException { - ensureAccessible(); - if (length == 0) { - return 0; - } - - ByteBuffer tmpBuf; - if (internal) { - tmpBuf = internalNioBuffer(); - } else { - tmpBuf = buffer.duplicate(); - } - tmpBuf.clear().position(index).limit(index + length); - return out.write(tmpBuf); - } - - @Override - public int getBytes(int index, FileChannel out, long position, int length) throws IOException { - return getBytes(index, out, position, length, false); - } - - private int getBytes(int index, FileChannel out, long position, int length, boolean internal) throws IOException { - ensureAccessible(); - if (length == 0) { - return 0; - } - - ByteBuffer tmpBuf = internal ? internalNioBuffer() : buffer.duplicate(); - tmpBuf.clear().position(index).limit(index + length); - return out.write(tmpBuf, position); - } - - @Override - public int readBytes(GatheringByteChannel out, int length) throws IOException { - checkReadableBytes(length); - int readBytes = getBytes(readerIndex, out, length, true); - readerIndex += readBytes; - return readBytes; - } - - @Override - public int readBytes(FileChannel out, long position, int length) throws IOException { - checkReadableBytes(length); - int readBytes = getBytes(readerIndex, out, position, length, true); - readerIndex += readBytes; - return readBytes; } @Override @@ -424,85 +279,12 @@ public int setBytes(int index, InputStream in, int length) throws IOException { return UnsafeByteBufUtil.setBytes(this, addr(index), index, in, length); } - @Override - public int setBytes(int index, ScatteringByteChannel in, int length) throws IOException { - ensureAccessible(); - ByteBuffer tmpBuf = internalNioBuffer(); - tmpBuf.clear().position(index).limit(index + length); - try { - return in.read(tmpBuf); - } catch (ClosedChannelException ignored) { - return -1; - } - } - - @Override - public int setBytes(int index, FileChannel in, long position, int length) throws IOException { - ensureAccessible(); - ByteBuffer tmpBuf = internalNioBuffer(); - tmpBuf.clear().position(index).limit(index + length); - try { - return in.read(tmpBuf, position); - } catch (ClosedChannelException ignored) { - return -1; - } - } - - @Override - public int nioBufferCount() { - return 1; - } - - @Override - public ByteBuffer[] nioBuffers(int index, int length) { - return new ByteBuffer[] { nioBuffer(index, length) }; - } - @Override public ByteBuf copy(int index, int length) { return UnsafeByteBufUtil.copy(this, addr(index), index, length); } - @Override - public ByteBuffer internalNioBuffer(int index, int length) { - checkIndex(index, length); - return (ByteBuffer) internalNioBuffer().clear().position(index).limit(index + length); - } - - private ByteBuffer internalNioBuffer() { - ByteBuffer tmpNioBuf = this.tmpNioBuf; - if (tmpNioBuf == null) { - this.tmpNioBuf = tmpNioBuf = buffer.duplicate(); - } - return tmpNioBuf; - } - - @Override - public ByteBuffer nioBuffer(int index, int length) { - checkIndex(index, length); - return ((ByteBuffer) buffer.duplicate().position(index).limit(index + length)).slice(); - } - - @Override - protected void deallocate() { - ByteBuffer buffer = this.buffer; - if (buffer == null) { - return; - } - - this.buffer = null; - - if (!doNotFree) { - freeDirect(buffer); - } - } - - @Override - public ByteBuf unwrap() { - return null; - } - - long addr(int index) { + final long addr(int index) { return memoryAddress + index; } diff --git a/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeHeapByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeHeapByteBuf.java index 51786f1567c..283e76fe6cb 100644 --- a/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeHeapByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeHeapByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,12 @@ import io.netty.util.internal.PlatformDependent; -class UnpooledUnsafeHeapByteBuf extends UnpooledHeapByteBuf { +/** + * Big endian Java heap buffer implementation. It is recommended to use + * {@link UnpooledByteBufAllocator#heapBuffer(int, int)}, {@link Unpooled#buffer(int)} and + * {@link Unpooled#wrappedBuffer(byte[])} instead of calling the constructor explicitly. + */ +public class UnpooledUnsafeHeapByteBuf extends UnpooledHeapByteBuf { /** * Creates a new heap buffer with a newly allocated byte array. @@ -25,12 +30,12 @@ class UnpooledUnsafeHeapByteBuf extends UnpooledHeapByteBuf { * @param initialCapacity the initial capacity of the underlying byte array * @param maxCapacity the max capacity of the underlying byte array */ - UnpooledUnsafeHeapByteBuf(ByteBufAllocator alloc, int initialCapacity, int maxCapacity) { + public UnpooledUnsafeHeapByteBuf(ByteBufAllocator alloc, int initialCapacity, int maxCapacity) { super(alloc, initialCapacity, maxCapacity); } @Override - byte[] allocateArray(int initialCapacity) { + protected byte[] allocateArray(int initialCapacity) { return PlatformDependent.allocateUninitializedArray(initialCapacity); } @@ -243,26 +248,18 @@ protected void _setLongLE(int index, long value) { @Override public ByteBuf setZero(int index, int length) { - if (PlatformDependent.javaVersion() >= 7) { - // Only do on java7+ as the needed Unsafe call was only added there. - checkIndex(index, length); - UnsafeByteBufUtil.setZero(array, index, length); - return this; - } - return super.setZero(index, length); + checkIndex(index, length); + UnsafeByteBufUtil.setZero(array, index, length); + return this; } @Override public ByteBuf writeZero(int length) { - if (PlatformDependent.javaVersion() >= 7) { - // Only do on java7+ as the needed Unsafe call was only added there. - ensureWritable(length); - int wIndex = writerIndex; - UnsafeByteBufUtil.setZero(array, wIndex, length); - writerIndex = wIndex + length; - return this; - } - return super.writeZero(length); + ensureWritable(length); + int wIndex = writerIndex; + UnsafeByteBufUtil.setZero(array, wIndex, length); + writerIndex = wIndex + length; + return this; } @Override diff --git a/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeNoCleanerDirectByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeNoCleanerDirectByteBuf.java index 3b9c05b83b1..e30f9293f53 100644 --- a/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeNoCleanerDirectByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnpooledUnsafeNoCleanerDirectByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -48,18 +48,8 @@ public ByteBuf capacity(int newCapacity) { return this; } - ByteBuffer newBuffer = reallocateDirect(buffer, newCapacity); - - if (newCapacity < oldCapacity) { - if (readerIndex() < newCapacity) { - if (writerIndex() > newCapacity) { - writerIndex(newCapacity); - } - } else { - setIndex(newCapacity, newCapacity); - } - } - setByteBuffer(newBuffer, false); + trimIndicesToCapacity(newCapacity); + setByteBuffer(reallocateDirect(buffer, newCapacity), false); return this; } } diff --git a/buffer/src/main/java/io/netty/buffer/UnreleasableByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnreleasableByteBuf.java index ba06103ff4d..4773ac57e2d 100644 --- a/buffer/src/main/java/io/netty/buffer/UnreleasableByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnreleasableByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.buffer; +import static java.util.Objects.requireNonNull; + import java.nio.ByteOrder; /** @@ -31,9 +33,7 @@ final class UnreleasableByteBuf extends WrappedByteBuf { @Override public ByteBuf order(ByteOrder endianness) { - if (endianness == null) { - throw new NullPointerException("endianness"); - } + requireNonNull(endianness, "endianness"); if (endianness == order()) { return this; } diff --git a/buffer/src/main/java/io/netty/buffer/UnsafeByteBufUtil.java b/buffer/src/main/java/io/netty/buffer/UnsafeByteBufUtil.java index 1016cc4ab93..741b2ddf9f7 100644 --- a/buffer/src/main/java/io/netty/buffer/UnsafeByteBufUtil.java +++ b/buffer/src/main/java/io/netty/buffer/UnsafeByteBufUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -25,8 +25,8 @@ import java.nio.ReadOnlyBufferException; import static io.netty.util.internal.MathUtil.isOutOfBounds; -import static io.netty.util.internal.ObjectUtil.checkNotNull; import static io.netty.util.internal.PlatformDependent.BIG_ENDIAN_NATIVE_ORDER; +import static java.util.Objects.requireNonNull; /** * All operations get and set as {@link ByteOrder#BIG_ENDIAN}. @@ -463,7 +463,7 @@ static int setBytes(AbstractByteBuf buf, long addr, int index, InputStream in, i static void getBytes(AbstractByteBuf buf, long addr, int index, ByteBuf dst, int dstIndex, int length) { buf.checkIndex(index, length); - checkNotNull(dst, "dst"); + requireNonNull(dst, "dst"); if (isOutOfBounds(dstIndex, length, dst.capacity())) { throw new IndexOutOfBoundsException("dstIndex: " + dstIndex); } @@ -479,7 +479,7 @@ static void getBytes(AbstractByteBuf buf, long addr, int index, ByteBuf dst, int static void getBytes(AbstractByteBuf buf, long addr, int index, byte[] dst, int dstIndex, int length) { buf.checkIndex(index, length); - checkNotNull(dst, "dst"); + requireNonNull(dst, "dst"); if (isOutOfBounds(dstIndex, length, dst.length)) { throw new IndexOutOfBoundsException("dstIndex: " + dstIndex); } @@ -514,7 +514,7 @@ static void getBytes(AbstractByteBuf buf, long addr, int index, ByteBuffer dst) static void setBytes(AbstractByteBuf buf, long addr, int index, ByteBuf src, int srcIndex, int length) { buf.checkIndex(index, length); - checkNotNull(src, "src"); + requireNonNull(src, "src"); if (isOutOfBounds(srcIndex, length, src.capacity())) { throw new IndexOutOfBoundsException("srcIndex: " + srcIndex); } @@ -532,6 +532,13 @@ static void setBytes(AbstractByteBuf buf, long addr, int index, ByteBuf src, int static void setBytes(AbstractByteBuf buf, long addr, int index, byte[] src, int srcIndex, int length) { buf.checkIndex(index, length); + // we need to check not null for src as it may cause the JVM crash + // See https://github.com/netty/netty/issues/10791 + requireNonNull(src, "src"); + if (isOutOfBounds(srcIndex, length, src.length)) { + throw new IndexOutOfBoundsException("srcIndex: " + srcIndex); + } + if (length != 0) { PlatformDependent.copyMemory(src, srcIndex, addr, length); } @@ -583,18 +590,34 @@ private static void setSingleBytes(final AbstractByteBuf buf, final long addr, f static void getBytes(AbstractByteBuf buf, long addr, int index, OutputStream out, int length) throws IOException { buf.checkIndex(index, length); if (length != 0) { - ByteBuf tmpBuf = buf.alloc().heapBuffer(length); - try { - byte[] tmp = tmpBuf.array(); - int offset = tmpBuf.arrayOffset(); - PlatformDependent.copyMemory(addr, tmp, offset, length); - out.write(tmp, offset, length); - } finally { - tmpBuf.release(); + int len = Math.min(length, ByteBufUtil.WRITE_CHUNK_SIZE); + if (len <= ByteBufUtil.MAX_TL_ARRAY_LEN || !buf.alloc().isDirectBufferPooled()) { + getBytes(addr, ByteBufUtil.threadLocalTempArray(len), 0, len, out, length); + } else { + // if direct buffers are pooled chances are good that heap buffers are pooled as well. + ByteBuf tmpBuf = buf.alloc().heapBuffer(len); + try { + byte[] tmp = tmpBuf.array(); + int offset = tmpBuf.arrayOffset(); + getBytes(addr, tmp, offset, len, out, length); + } finally { + tmpBuf.release(); + } } } } + private static void getBytes(long inAddr, byte[] in, int inOffset, int inLen, OutputStream out, int outLen) + throws IOException { + do { + int len = Math.min(inLen, outLen); + PlatformDependent.copyMemory(inAddr, in, inOffset, len); + out.write(in, inOffset, len); + outLen -= len; + inAddr += len; + } while (outLen > 0); + } + static void setZero(long addr, int length) { if (length == 0) { return; diff --git a/buffer/src/main/java/io/netty/buffer/UnsafeDirectSwappedByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnsafeDirectSwappedByteBuf.java index 206b637b5d9..dca920ed3de 100644 --- a/buffer/src/main/java/io/netty/buffer/UnsafeDirectSwappedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnsafeDirectSwappedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * -* http://www.apache.org/licenses/LICENSE-2.0 +* https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/UnsafeHeapSwappedByteBuf.java b/buffer/src/main/java/io/netty/buffer/UnsafeHeapSwappedByteBuf.java index 2a76906593c..8de2870f960 100644 --- a/buffer/src/main/java/io/netty/buffer/UnsafeHeapSwappedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/UnsafeHeapSwappedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * -* http://www.apache.org/licenses/LICENSE-2.0 +* https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/WrappedByteBuf.java b/buffer/src/main/java/io/netty/buffer/WrappedByteBuf.java index 45aa60ce889..ecf51bfc846 100644 --- a/buffer/src/main/java/io/netty/buffer/WrappedByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/WrappedByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,6 +16,8 @@ package io.netty.buffer; +import static java.util.Objects.requireNonNull; + import io.netty.util.ByteProcessor; import io.netty.util.internal.StringUtil; @@ -41,9 +43,7 @@ class WrappedByteBuf extends ByteBuf { protected final ByteBuf buf; protected WrappedByteBuf(ByteBuf buf) { - if (buf == null) { - throw new NullPointerException("buf"); - } + requireNonNull(buf, "buf"); this.buf = buf; } @@ -52,6 +52,11 @@ public final boolean hasMemoryAddress() { return buf.hasMemoryAddress(); } + @Override + public boolean isContiguous() { + return buf.isContiguous(); + } + @Override public final long memoryAddress() { return buf.memoryAddress(); @@ -151,6 +156,11 @@ public final int maxWritableBytes() { return buf.maxWritableBytes(); } + @Override + public int maxFastWritableBytes() { + return buf.maxFastWritableBytes(); + } + @Override public final boolean isReadable() { return buf.isReadable(); @@ -167,30 +177,6 @@ public final ByteBuf clear() { return this; } - @Override - public final ByteBuf markReaderIndex() { - buf.markReaderIndex(); - return this; - } - - @Override - public final ByteBuf resetReaderIndex() { - buf.resetReaderIndex(); - return this; - } - - @Override - public final ByteBuf markWriterIndex() { - buf.markWriterIndex(); - return this; - } - - @Override - public final ByteBuf resetWriterIndex() { - buf.resetWriterIndex(); - return this; - } - @Override public ByteBuf discardReadBytes() { buf.discardReadBytes(); @@ -1033,4 +1019,9 @@ public boolean release() { public boolean release(int decrement) { return buf.release(decrement); } + + @Override + final boolean isAccessible() { + return buf.isAccessible(); + } } diff --git a/buffer/src/main/java/io/netty/buffer/WrappedCompositeByteBuf.java b/buffer/src/main/java/io/netty/buffer/WrappedCompositeByteBuf.java index 8f5161620f9..44f9b504774 100644 --- a/buffer/src/main/java/io/netty/buffer/WrappedCompositeByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/WrappedCompositeByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -98,6 +98,11 @@ public final int maxWritableBytes() { return wrapped.maxWritableBytes(); } + @Override + public int maxFastWritableBytes() { + return wrapped.maxFastWritableBytes(); + } + @Override public int ensureWritable(int minWritableBytes, boolean force) { return wrapped.ensureWritable(minWritableBytes, force); @@ -423,6 +428,11 @@ public final int refCnt() { return wrapped.refCnt(); } + @Override + final boolean isAccessible() { + return wrapped.isAccessible(); + } + @Override public ByteBuf duplicate() { return wrapped.duplicate(); @@ -543,6 +553,12 @@ public CompositeByteBuf addComponent(boolean increaseWriterIndex, int cIndex, By return this; } + @Override + public CompositeByteBuf addFlattenedComponents(boolean increaseWriterIndex, ByteBuf buffer) { + wrapped.addFlattenedComponents(increaseWriterIndex, buffer); + return this; + } + @Override public CompositeByteBuf removeComponent(int cIndex) { wrapped.removeComponent(cIndex); @@ -916,30 +932,6 @@ public final CompositeByteBuf clear() { return this; } - @Override - public final CompositeByteBuf markReaderIndex() { - wrapped.markReaderIndex(); - return this; - } - - @Override - public final CompositeByteBuf resetReaderIndex() { - wrapped.resetReaderIndex(); - return this; - } - - @Override - public final CompositeByteBuf markWriterIndex() { - wrapped.markWriterIndex(); - return this; - } - - @Override - public final CompositeByteBuf resetWriterIndex() { - wrapped.resetWriterIndex(); - return this; - } - @Override public CompositeByteBuf ensureWritable(int minWritableBytes) { wrapped.ensureWritable(minWritableBytes); diff --git a/buffer/src/main/java/io/netty/buffer/WrappedUnpooledUnsafeDirectByteBuf.java b/buffer/src/main/java/io/netty/buffer/WrappedUnpooledUnsafeDirectByteBuf.java index 91db4ded197..dd8493c0587 100644 --- a/buffer/src/main/java/io/netty/buffer/WrappedUnpooledUnsafeDirectByteBuf.java +++ b/buffer/src/main/java/io/netty/buffer/WrappedUnpooledUnsafeDirectByteBuf.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/api/AllocationType.java b/buffer/src/main/java/io/netty/buffer/api/AllocationType.java new file mode 100644 index 00000000000..669db3e2e51 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/AllocationType.java @@ -0,0 +1,25 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +/** + * An object used by {@linkplain BufferAllocator buffer allocators} to communicate desirable properties of an + * allocation to a {@linkplain MemoryManager memory manager}, such as whether an allocation should be off-heap. + *

    + * Standard implementations of this interface can be found in {@link StandardAllocationTypes}. + */ +public interface AllocationType { +} diff --git a/buffer/src/main/java/io/netty/buffer/api/AllocatorControl.java b/buffer/src/main/java/io/netty/buffer/api/AllocatorControl.java new file mode 100644 index 00000000000..43db39963ca --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/AllocatorControl.java @@ -0,0 +1,60 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import io.netty.util.internal.UnstableApi; + +/** + * Methods for accessing and controlling the internals of an allocator. + * This interface is intended to be used by implementors of the {@link BufferAllocator}, {@link Buffer} and + * {@link MemoryManager} interfaces. + * + * @apiNote This interface is public because it is a necessary integration point to separate allocators from concrete + * buffer implementations. The API is {@linkplain UnstableApi unstable} because Netty's own allocators are the primary + * customer of this API, and backwards compatibility at this level should not prevent us from evolving it. + */ +@UnstableApi +public interface AllocatorControl { + /** + * Allocates a buffer that is not tethered to any particular {@link Buffer} object, + * and return the recoverable memory object from it. + *

    + * This allows a buffer to implement {@link Buffer#ensureWritable(int)} by having new memory allocated to it, + * without that memory being attached to some other lifetime. + * + * @param originator The buffer that originated the request for an untethered memory allocated. + * @param size The size of the requested memory allocation, in bytes. + * @return A {@link UntetheredMemory} object that is the requested allocation. + */ + UntetheredMemory allocateUntethered(Buffer originator, int size); + + /** + * Memory that isn't attached to any particular buffer. + */ + interface UntetheredMemory { + /** + * Produces the recoverable memory object associated with this piece of untethered memory. + * @implNote This method should only be called once, since it might be expensive. + */ + Memory memory(); + + /** + * Produces the drop instance associated with this piece of untethered memory. + * @implNote This method should only be called once, since it might be expensive, or interact with Cleaners. + */ + Drop drop(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/Buffer.java b/buffer/src/main/java/io/netty/buffer/api/Buffer.java new file mode 100644 index 00000000000..1cdd0f48af1 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/Buffer.java @@ -0,0 +1,712 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import java.nio.ByteBuffer; + +/** + * A life cycled buffer of memory, with separate reader and writer offsets. + *

    + * A buffer is a logically sequential stretch of memory with a certain capacity, an offset for writing, + * and an offset for reading. + * Buffers may be {@linkplain CompositeBuffer composed} of multiple {@linkplain #countComponents() components}, + * where each component is a guaranteed contiguous chunk of memory. + * + *

    Creating a buffer

    + * + * Buffers are created by {@linkplain BufferAllocator allocators}, and their {@code allocate} family of methods. + * A number of standard allocators exist, and are available through static methods on the {@code BufferAllocator} + * interface. + * + *

    Buffer life cycle

    + * + * The buffer has a life cycle, where it is allocated, used, and deallocated. + * When the buffer is initially allocated, a pairing {@link #close()} call will deallocate it. + * If a buffer is {@linkplain #send() sent} elsewhere, the {@linkplain #close() close} method on the given instance + * will become a no-op. + * The buffer can be thought of as a view onto memory, and calling {@link #send()} on the buffer will effectively close + * that view, and recreate it upon reception at its destination. + * + *

    Thread-safety

    + * + * Buffers are not thread-safe. + * + *

    Accessing data

    + * + * Data access methods fall into two classes: + *
      + *
    1. Access that are based on, and updates, the read or write offset positions.
    2. + *
      • These accessor methods are typically called {@code readX} or {@code writeX}.
      + *
    3. Access that take offsets as arguments, and do not update read or write offset positions.
    4. + *
      • These accessor methods are typically called {@code getX} or {@code setX}.
      + *
    + * + * A buffer contains two mutable offset positions: one for reading and one for writing. + * These positions use zero-based indexing, + * such that the first byte of data in the buffer is placed at offset {@code 0}, + * and the last byte in the buffer is at offset {@link #capacity() capacity - 1}. + * The {@link #readerOffset()} is the offset into the buffer from which the next read will take place, + * and is initially zero. + * The reader offset must always be less than or equal to the {@link #writerOffset()}. + * The {@link #writerOffset()} is likewise the offset into the buffer where the next write will take place. + * The writer offset is also initially zero, and must be less than or equal to the {@linkplain #capacity() capacity}. + *

    + * This carves the buffer into three regions, as demonstrated by this diagram: + *

    + *      +-------------------+------------------+------------------+
    + *      | discardable bytes |  readable bytes  |  writable bytes  |
    + *      |                   |     (CONTENT)    |                  |
    + *      +-------------------+------------------+------------------+
    + *      |                   |                  |                  |
    + *      0      <=     readerOffset  <=   writerOffset    <=    capacity
    + * 
    + * + *

    Byte Order

    + * + * Buffers are always big endian, and this cannot be changed. + * Usages that need to get, set, read, or write, little-endian values will have to flip the byte order of the values + * they read and write. + * + *

    Splitting buffers

    + * + * The {@link #split()} method breaks a buffer into two. + * The two buffers will share the underlying memory, but their regions will not overlap, ensuring that the memory is + * safely shared between the two. + *

    + * Splitting a buffer is useful for when you want to hand over a region of a buffer to some other, + * perhaps unknown, piece of code, and relinquish your ownership of that buffer region in the process. + * Examples include aggregating messages into an accumulator buffer, and sending messages down the pipeline for + * further processing, as split buffer regions, once their data has been received in its entirety. + * + * If you instead wish to temporarily share a region of a buffer, you will have to pass offset and length along with the + * buffer, or you will have to make a copy of the region. + * + *

    Buffers as constants

    + * + * Sometimes, the same bit of data will be processed or transmitted over and over again. In such cases, it can be + * tempting to allocate and fill a buffer once, and then reuse it. + * Such reuse must be done carefully, however, to avoid a number of bugs. + * The {@link BufferAllocator} has a {@link BufferAllocator#constBufferSupplier(byte[])} method that solves this, and + * prevents these bugs from occurring. + */ +public interface Buffer extends Resource, BufferAccessor { + /** + * The capacity of this buffer, that is, the maximum number of bytes it can contain. + * + * @return The capacity in bytes. + */ + int capacity(); + + /** + * Get the current reader offset. The next read will happen from this byte offset into the buffer. + * + * @return The current reader offset. + */ + int readerOffset(); + + /** + * Set the reader offset. Make the next read happen from the given offset into the buffer. + * + * @param offset The reader offset to set. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the specified {@code offset} is less than zero or greater than the current + * {@link #writerOffset()}. + * @throws BufferClosedException if this buffer is closed. + */ + Buffer readerOffset(int offset); + + /** + * Get the current writer offset. The next write will happen at this byte offset into the buffer. + * + * @return The current writer offset. + */ + int writerOffset(); + + /** + * Set the writer offset. Make the next write happen at the given offset. + * + * @param offset The writer offset to set. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the specified {@code offset} is less than the current + * {@link #readerOffset()} or greater than {@link #capacity()}. + * @throws BufferClosedException if this buffer is closed. + * @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}. + */ + Buffer writerOffset(int offset); + + /** + * Returns the number of readable bytes which is equal to {@code (writerOffset() - readerOffset())}. + */ + default int readableBytes() { + return writerOffset() - readerOffset(); + } + + /** + * Returns the number of writable bytes which is equal to {@code (capacity() - writerOffset())}. + */ + default int writableBytes() { + return capacity() - writerOffset(); + } + + /** + * Fills the buffer with the given byte value. This method does not respect the {@link #readerOffset()} or {@link + * #writerOffset()}, but copies the full capacity of the buffer. The {@link #readerOffset()} and {@link + * #writerOffset()} are not modified. + * + * @param value The byte value to write at every offset in the buffer. + * @return This Buffer. + * @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}. + */ + Buffer fill(byte value); + + /** + * Gives the native memory address backing this buffer, or return 0 if this buffer has no native memory address. + * @return The native memory address, if any, otherwise 0. + */ + long nativeAddress(); + + /** + * Makes this buffer read-only. This is irreversible. + * This operation is also idempotent, so calling this method multiple times on the same buffer makes no difference. + * + * @return This buffer instance. + */ + Buffer makeReadOnly(); + + /** + * Queries if this buffer is read-only or not. + * + * @return {@code true} if this buffer is read-only, {@code false} otherwise. + */ + boolean readOnly(); + + /** + * Copies the given length of data from this buffer into the given destination array, beginning at the given source + * position in this buffer, and the given destination position in the destination array. + *

    + * This method does not read or modify the {@linkplain #writerOffset() write offset} or the + * {@linkplain #readerOffset() read offset}. + * + * @param srcPos The byte offset into this buffer from where the copying should start; the byte at this offset in + * this buffer will be copied to the {@code destPos} index in the {@code dest} array. + * @param dest The destination byte array. + * @param destPos The index into the {@code dest} array from where the copying should start. + * @param length The number of bytes to copy. + * @throws NullPointerException if the destination array is null. + * @throws IndexOutOfBoundsException if the source or destination positions, or the length, are negative, + * or if the resulting end positions reaches beyond the end of either this buffer, or the destination array. + * @throws BufferClosedException if this buffer is closed. + */ + void copyInto(int srcPos, byte[] dest, int destPos, int length); + + /** + * Copies the given length of data from this buffer into the given destination byte buffer, beginning at the given + * source position in this buffer, and the given destination position in the destination byte buffer. + *

    + * This method does not read or modify the {@linkplain #writerOffset() write offset} or the + * {@linkplain #readerOffset() read offset}, nor is the position of the destination buffer changed. + *

    + * The position and limit of the destination byte buffer are also ignored, and do not influence {@code destPos} + * or {@code length}. + * + * @param srcPos The byte offset into this buffer from where the copying should start; the byte at this offset in + * this buffer will be copied to the {@code destPos} index in the {@code dest} array. + * @param dest The destination byte buffer. + * @param destPos The index into the {@code dest} array from where the copying should start. + * @param length The number of bytes to copy. + * @throws NullPointerException if the destination buffer is null. + * @throws IndexOutOfBoundsException if the source or destination positions, or the length, are negative, + * or if the resulting end positions reaches beyond the end of either this buffer, or the destination array. + * @throws java.nio.ReadOnlyBufferException if the destination buffer is read-only. + * @throws BufferClosedException if this buffer is closed. + */ + void copyInto(int srcPos, ByteBuffer dest, int destPos, int length); + + /** + * Copies the given length of data from this buffer into the given destination buffer, beginning at the given + * source position in this buffer, and the given destination position in the destination buffer. + *

    + * This method does not read or modify the {@linkplain #writerOffset() write offset} or the + * {@linkplain #readerOffset() read offset} on this buffer, nor on the destination buffer. + *

    + * The read and write offsets of the destination buffer are also ignored, and do not influence {@code destPos} + * or {@code length}. + * + * @param srcPos The byte offset into this buffer from where the copying should start; the byte at this offset in + * this buffer will be copied to the {@code destPos} index in the {@code dest} array. + * @param dest The destination buffer. + * @param destPos The index into the {@code dest} array from where the copying should start. + * @param length The number of bytes to copy. + * @throws NullPointerException if the destination buffer is null. + * @throws IndexOutOfBoundsException if the source or destination positions, or the length, are negative, + * or if the resulting end positions reaches beyond the end of either this buffer, or the destination array. + * @throws BufferReadOnlyException if the destination buffer is read-only. + * @throws BufferClosedException if this or the destination buffer is closed. + */ + void copyInto(int srcPos, Buffer dest, int destPos, int length); + + /** + * Writes into this buffer, all the readable bytes from the given buffer. + * This updates the {@linkplain #writerOffset() write offset} of this buffer, and the + * {@linkplain #readerOffset() reader offset} of the given buffer. + * + * @param source The buffer to read from. + * @return This buffer. + * @throws NullPointerException If the source buffer is {@code null}. + */ + default Buffer writeBytes(Buffer source) { + int size = source.readableBytes(); + int woff = writerOffset(); + source.copyInto(source.readerOffset(), this, woff, size); + source.readerOffset(source.readerOffset() + size); + writerOffset(woff + size); + return this; + } + + /** + * Writes into this buffer, all the bytes from the given byte array. + * This updates the {@linkplain #writerOffset() write offset} of this buffer by the length of the array. + * + * @param source The byte array to read from. + * @return This buffer. + */ + default Buffer writeBytes(byte[] source) { + return writeBytes(source, 0, source.length); + } + + /** + * Writes into this buffer, all the bytes from the given byte array. + * This updates the {@linkplain #writerOffset() write offset} of this buffer by the length of the array. + * + * @param source The byte array to read from. + * @param srcPos Position in the {@code source} from where bytes should be written to this buffer. + * @param length The number of bytes to copy. + * @return This buffer. + */ + default Buffer writeBytes(byte[] source, int srcPos, int length) { + int woff = writerOffset(); + writerOffset(woff + length); + for (int i = 0; i < length; i++) { + setByte(woff + i, source[srcPos + i]); + } + return this; + } + + /** + * Resets the {@linkplain #readerOffset() read offset} and the {@linkplain #writerOffset() write offset} on this + * buffer to zero, and return this buffer. + * + * @return This buffer instance. + */ + default Buffer resetOffsets() { + readerOffset(0); + writerOffset(0); + return this; + } + + /** + * Opens a cursor to iterate the readable bytes of this buffer. The {@linkplain #readerOffset() reader offset} and + * {@linkplain #writerOffset() writer offset} are not modified by the cursor. + *

    + * Care should be taken to ensure that the buffer's lifetime extends beyond the cursor and the iteration, and that + * the {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified + * while the iteration takes place. Otherwise, unpredictable behaviour might result. + * + * @return A {@link ByteCursor} for iterating the readable bytes of this buffer. + */ + ByteCursor openCursor(); + + /** + * Opens a cursor to iterate the given number bytes of this buffer, starting at the given offset. + * The {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified by + * the cursor. + *

    + * Care should be taken to ensure that the buffer's lifetime extends beyond the cursor and the iteration, and that + * the {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified + * while the iteration takes place. Otherwise, unpredictable behaviour might result. + * + * @param fromOffset The offset into the buffer where iteration should start. + * The first byte read from the iterator will be the byte at this offset. + * @param length The number of bytes to iterate. + * @return A {@link ByteCursor} for the given stretch of bytes of this buffer. + * @throws IllegalArgumentException if the length is negative, or if the region given by the {@code fromOffset} and + * the {@code length} reaches outside the bounds of this buffer. + */ + ByteCursor openCursor(int fromOffset, int length); + + /** + * Opens a cursor to iterate the readable bytes of this buffer, in reverse. + * The {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified by + * the cursor. + *

    + * Care should be taken to ensure that the buffer's lifetime extends beyond the cursor and the iteration, and that + * the {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified + * while the iteration takes place. Otherwise, unpredictable behaviour might result. + * + * @return A {@link ByteCursor} for the readable bytes of this buffer. + */ + default ByteCursor openReverseCursor() { + int woff = writerOffset(); + return openReverseCursor(woff == 0? 0 : woff - 1, readableBytes()); + } + + /** + * Opens a cursor to iterate the given number bytes of this buffer, in reverse, starting at the given offset. + * The {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified by + * the cursor. + *

    + * Care should be taken to ensure that the buffer's lifetime extends beyond the cursor and the iteration, and that + * the {@linkplain #readerOffset() reader offset} and {@linkplain #writerOffset() writer offset} are not modified + * while the iteration takes place. Otherwise, unpredictable behaviour might result. + * + * @param fromOffset The offset into the buffer where iteration should start. + * The first byte read from the iterator will be the byte at this offset. + * @param length The number of bytes to iterate. + * @return A {@link ByteCursor} for the given stretch of bytes of this buffer. + * @throws IllegalArgumentException if the length is negative, or if the region given by the {@code fromOffset} and + * the {@code length} reaches outside the bounds of this buffer. + */ + ByteCursor openReverseCursor(int fromOffset, int length); + + /** + * Ensures that this buffer has at least the given number of bytes of + * {@linkplain #writableBytes() available space for writing}. + * If this buffer already has the necessary space, then this method returns immediately. + * If this buffer does not already have the necessary space, then it will be expanded using the + * {@link BufferAllocator} the buffer was created with. + * This method is the same as calling {@link #ensureWritable(int, int, boolean)} where {@code allowCompaction} is + * {@code false}. + * + * @param size The requested number of bytes of space that should be available for writing. + * @return This buffer instance. + * @throws IllegalStateException if this buffer is in a bad state. + * @throws BufferClosedException if this buffer is closed. + * @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}. + */ + default Buffer ensureWritable(int size) { + ensureWritable(size, 1, true); + return this; + } + + /** + * Ensures that this buffer has at least the given number of bytes of + * {@linkplain #writableBytes() available space for writing}. + * If this buffer already has the necessary space, then this method returns immediately. + * If this buffer does not already have the necessary space, then space will be made available in one or all of + * the following available ways: + * + *

      + *
    • + * If {@code allowCompaction} is {@code true}, and sum of the read and writable bytes would be enough to + * satisfy the request, and it (depending on the buffer implementation) seems faster and easier to compact + * the existing buffer rather than allocation a new buffer, then the requested bytes will be made available + * that way. The compaction will not necessarily work the same way as the {@link #compact()} method, as the + * implementation may be able to make the requested bytes available with less effort than is strictly + * mandated by the {@link #compact()} method. + *
    • + *
    • + * Regardless of the value of the {@code allowCompaction}, the implementation may make more space available + * by just allocating more or larger buffers. This allocation would use the same {@link BufferAllocator} + * that this buffer was created with. + *
    • + *
    • + * If {@code allowCompaction} is {@code true}, then the implementation may choose to do a combination of + * compaction and allocation. + *
    • + *
    + * + * @param size The requested number of bytes of space that should be available for writing. + * @return This buffer instance. + * @param minimumGrowth The minimum number of bytes to grow by. If it is determined that memory should be allocated + * and copied, make sure that the new memory allocation is bigger than the old one by at least + * this many bytes. This way, the buffer can grow by more than what is immediately necessary, + * thus amortising the costs of allocating and copying. + * @param allowCompaction {@code true} if the method is allowed to modify the + * {@linkplain #readerOffset() reader offset} and + * {@linkplain #writerOffset() writer offset}, otherwise {@code false}. + * @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}. + * @throws IllegalArgumentException if {@code size} or {@code minimumGrowth} are negative. + * @throws IllegalStateException if this buffer is in a bad state. + */ + Buffer ensureWritable(int size, int minimumGrowth, boolean allowCompaction); + + /** + * Returns a copy of this buffer's readable bytes. + * Modifying the content of the returned buffer will not affect this buffers contents. + * The two buffers will maintain separate offsets. This method is identical to + * {@code buf.copy(buf.readerOffset(), buf.readableBytes())}. + * This method does not modify {@link #readerOffset()} or {@link #writerOffset()} of this buffer. + *

    + * The copy is created with a {@linkplain #writerOffset() write offset} equal to the length of the copied data, + * so that the entire contents of the copy is ready to be read. + *

    + * The returned buffer will not be read-only, regardless of the {@linkplain #readOnly() read-only state} of this + * buffer. + * + * @return A new buffer instance, with independent {@link #readerOffset()} and {@link #writerOffset()}, + * that contains a copy of the readable region of this buffer. + * @throws BufferClosedException if this buffer is closed. + */ + default Buffer copy() { + int offset = readerOffset(); + int length = readableBytes(); + return copy(offset, length); + } + + /** + * Returns a copy of the given region of this buffer. + * Modifying the content of the returned buffer will not affect this buffers contents. + * The two buffers will maintain separate offsets. + * This method does not modify {@link #readerOffset()} or {@link #writerOffset()} of this buffer. + *

    + * The copy is created with a {@linkplain #writerOffset() write offset} equal to the length of the copy, + * so that the entire contents of the copy is ready to be read. + *

    + * The returned buffer will not be read-only, regardless of the {@linkplain #readOnly() read-only state} of this + * buffer. + * + * @param offset The offset where copying should start from. This is the offset of the first byte copied. + * @param length The number of bytes to copy, and the capacity of the returned buffer. + * @return A new buffer instance, with independent {@link #readerOffset()} and {@link #writerOffset()}, + * that contains a copy of the given region of this buffer. + * @throws IllegalArgumentException if the {@code offset} or {@code length} reaches outside the bounds of the + * buffer. + * @throws BufferClosedException if this buffer is closed. + */ + Buffer copy(int offset, int length); + + /** + * Splits the buffer into two, at the {@linkplain #writerOffset() write offset} position. + *

    + * The region of this buffer that contain the previously read and readable bytes, will be captured and returned in + * a new buffer, that will hold its own ownership of that region. This allows the returned buffer to be + * independently {@linkplain #send() sent} to other threads. + *

    + * The returned buffer will adopt the {@link #readerOffset()} of this buffer, and have its {@link #writerOffset()} + * and {@link #capacity()} both set to the equal to the write-offset of this buffer. + *

    + * The memory region in the returned buffer will become inaccessible through this buffer. This buffer will have its + * capacity reduced by the capacity of the returned buffer, and the read and write offsets of this buffer will both + * become zero, even though their position in memory remain unchanged. + *

    + * Effectively, the following transformation takes place: + *

    {@code
    +     *         This buffer:
    +     *          +------------------------------------------+
    +     *         0|   |r/o                  |w/o             |cap
    +     *          +---+---------------------+----------------+
    +     *         /   /                     / \               \
    +     *        /   /                     /   \               \
    +     *       /   /                     /     \               \
    +     *      /   /                     /       \               \
    +     *     /   /                     /         \               \
    +     *    +---+---------------------+           +---------------+
    +     *    |   |r/o                  |w/o & cap  |r/o & w/o      |cap
    +     *    +---+---------------------+           +---------------+
    +     *    Returned buffer.                      This buffer.
    +     * }
    + * When the buffers are in this state, both of the split parts retain an atomic reference count on the + * underlying memory. This means that shared underlying memory will not be deallocated or returned to a pool, until + * all the split parts have been closed. + *

    + * Composite buffers have it a little easier, in that at most only one of the constituent buffers will actually be + * split. If the split point lands perfectly between two constituent buffers, then a composite buffer can + * simply split its internal array in two. + *

    + * Split buffers support all operations that normal buffers do, including {@link #ensureWritable(int)}. + *

    + * See the Splitting buffers section for details. + * + * @return A new buffer with independent and exclusive ownership over the previously read and readable bytes from + * this buffer. + */ + default Buffer split() { + return split(writerOffset()); + } + + /** + * Splits the buffer into two, at the given {@code splitOffset}. + *

    + * The region of this buffer that precede the {@code splitOffset}, will be captured and returned in a new + * buffer, that will hold its own ownership of that region. This allows the returned buffer to be independently + * {@linkplain #send() sent} to other threads. + *

    + * The returned buffer will adopt the {@link #readerOffset()} and {@link #writerOffset()} of this buffer, + * but truncated to fit within the capacity dictated by the {@code splitOffset}. + *

    + * The memory region in the returned buffer will become inaccessible through this buffer. If the + * {@link #readerOffset()} or {@link #writerOffset()} of this buffer lie prior to the {@code splitOffset}, + * then those offsets will be moved forward, so they land on offset 0 after the split. + *

    + * Effectively, the following transformation takes place: + *

    {@code
    +     *         This buffer:
    +     *          +--------------------------------+
    +     *         0|               |splitOffset     |cap
    +     *          +---------------+----------------+
    +     *         /               / \               \
    +     *        /               /   \               \
    +     *       /               /     \               \
    +     *      /               /       \               \
    +     *     /               /         \               \
    +     *    +---------------+           +---------------+
    +     *    |               |cap        |               |cap
    +     *    +---------------+           +---------------+
    +     *    Returned buffer.            This buffer.
    +     * }
    + * When the buffers are in this state, both of the split parts retain an atomic reference count on the + * underlying memory. This means that shared underlying memory will not be deallocated or returned to a pool, until + * all the split parts have been closed. + *

    + * Composite buffers have it a little easier, in that at most only one of the constituent buffers will actually be + * split. If the split point lands perfectly between two constituent buffers, then a composite buffer can + * simply split its internal array in two. + *

    + * Split buffers support all operations that normal buffers do, including {@link #ensureWritable(int)}. + *

    + * See the Splitting buffers section for details. + * + * @param splitOffset The offset into this buffer where it should be split. After the split, the data at this offset + * will be at offset zero in this buffer. + * @return A new buffer with independent and exclusive ownership over the bytes from the beginning to the given + * offset of this buffer. + */ + Buffer split(int splitOffset); + + /** + * Discards the read bytes, and moves the buffer contents to the beginning of the buffer. + * + * @return This buffer instance. + * @throws BufferReadOnlyException if this buffer is {@linkplain #readOnly() read-only}. + * @throws IllegalStateException if this buffer is in a bad state. + */ + Buffer compact(); + + /** + * Get the number of "components" in this buffer. For composite buffers, this is the number of transitive + * constituent buffers, while non-composite buffers only have one component. + * + * @return The number of components in this buffer. + */ + int countComponents(); + + /** + * Get the number of "components" in this buffer, that are readable. These are the components that would be + * processed by {@link #forEachReadable(int, ReadableComponentProcessor)}. For composite buffers, this is the + * number of transitive constituent buffers that are readable, while non-composite buffers only have at most one + * readable component. + *

    + * The number of readable components may be less than the {@link #countComponents() component count}, if not all of + * them have readable data. + * + * @return The number of readable components in this buffer. + */ + int countReadableComponents(); + + /** + * Get the number of "components" in this buffer, that are writable. These are the components that would be + * processed by {@link #forEachWritable(int, WritableComponentProcessor)}. For composite buffers, this is the + * number of transitive constituent buffers that are writable, while non-composite buffers only have at most one + * writable component. + *

    + * The number of writable components may be less than the {@link #countComponents() component count}, if not all of + * them have space for writing. + * + * @return The number of writable components in this buffer. + */ + int countWritableComponents(); + + /** + * Processes all readable components of this buffer, and return the number of components processed. + *

    + * The given {@linkplain ReadableComponentProcessor processor} is called for each readable component in this buffer, + * and passed a component index, for the given component in the iteration, and a {@link ReadableComponent} object + * for accessing the data within the given component. + *

    + * The component index is specific to the particular invocation of this method. The first call to the consumer will + * be passed the given initial index, and the next call will be passed the initial index plus one, and so on. + *

    + * The {@linkplain ReadableComponentProcessor component processor} may stop the iteration at any time by returning + * {@code false}. + * This will cause the number of components processed to be returned as a negative number (to signal early return), + * and the number of components processed may then be less than the + * {@linkplain #countReadableComponents() readable component count}. + *

    + * Note that the {@link ReadableComponent} instance passed to the consumer could be reused for + * multiple calls, so the data must be extracted from the component in the context of the iteration. + *

    + * The {@link ByteBuffer} instances obtained from the component, share lifetime with that internal component. + * This means they can be accessed as long as the internal memory store remain unchanged. Methods that may cause + * such changes are {@link #split(int)}, {@link #split()}, {@link #compact()}, {@link #ensureWritable(int)}, + * {@link #ensureWritable(int, int, boolean)}, and {@link #send()}. + *

    + * The best way to ensure this doesn't cause any trouble, is to use the buffers directly as part of the iteration. + *

    + * Note that the arrays, memory addresses, and byte buffers exposed as components by this method, + * should not be used for changing the buffer contents. Doing so may cause undefined behaviour. + *

    + * Changes to position and limit of the byte buffers exposed via the processed components, are not reflected back to + * this buffer instance. + * + * @param initialIndex The initial index of the iteration, and the index that will be passed to the first call to + * the {@linkplain ReadableComponentProcessor#process(int, ReadableComponent) processor}. + * @param processor The processor that will be used to process the buffer components. + * @return The number of readable components processed, as a positive number if all readable components were + * processed, or as a negative number if the iteration was stopped because + * {@link ReadableComponentProcessor#process(int, ReadableComponent)} returned {@code false}. + * In any case, the number of components processed may be less than {@link #countComponents()}. + */ + int forEachReadable(int initialIndex, ReadableComponentProcessor processor) throws E; + + /** + * Process all writable components of this buffer, and return the number of components processed. + *

    + * The given {@linkplain WritableComponentProcessor processor} is called for each writable component in this buffer, + * and passed a component index, for the given component in the iteration, and a {@link WritableComponent} object + * for accessing the data within the given component. + *

    + * The component index is specific to the particular invocation of this method. The first call to the consumer will + * be passed the given initial index, and the next call will be passed the initial index plus one, and so on. + *

    + * The {@link WritableComponentProcessor component processor} may stop the iteration at any time by returning + * {@code false}. + * This will cause the number of components processed to be returned as a negative number (to signal early return), + * and the number of components processed may then be less than the + * {@linkplain #countReadableComponents() readable component count}. + *

    + * Note that the {@link WritableComponent} instance passed to the consumer could be reused for + * multiple calls, so the data must be extracted from the component in the context of the iteration. + *

    + * The {@link ByteBuffer} instances obtained from the component, share lifetime with that internal component. + * This means they can be accessed as long as the internal memory store remain unchanged. Methods that may cause + * such changes are {@link #split(int)}, {@link #split()}, {@link #compact()}, {@link #ensureWritable(int)}, + * {@link #ensureWritable(int, int, boolean)}, and {@link #send()}. + *

    + * The best way to ensure this doesn't cause any trouble, is to use the buffers directly as part of the iteration. + *

    + * Changes to position and limit of the byte buffers exposed via the processed components, are not reflected back to + * this buffer instance. + * + * @param initialIndex The initial index of the iteration, and the index that will be passed to the first call to + * the {@linkplain WritableComponentProcessor#process(int, WritableComponent) processor}. + * @param processor The processor that will be used to process the buffer components. + * @return The number of writable components processed, as a positive number if all writable components were + * processed, or as a negative number if the iteration was stopped because + * {@link WritableComponentProcessor#process(int, WritableComponent)} returned {@code false}. + * In any case, the number of components processed may be less than {@link #countComponents()}. + */ + int forEachWritable(int initialIndex, WritableComponentProcessor processor) throws E; +} diff --git a/buffer/src/main/java/io/netty/buffer/api/BufferAccessor.java b/buffer/src/main/java/io/netty/buffer/api/BufferAccessor.java new file mode 100644 index 00000000000..afcfd5d6f8d --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/BufferAccessor.java @@ -0,0 +1,614 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +/** + * This interface is just the primitive data accessor methods that {@link Buffer} exposes. + * It can be useful if you only need the data access methods, and perhaps wish to decorate or modify their behaviour. + * Usually, you'd use the {@link Buffer} interface directly, since this lets you properly control the buffer reference + * count. + */ +public interface BufferAccessor { + // + /** + * Read the byte value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by {@link Byte#BYTES}. + * The value is read using a two's complement 8-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The byte value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Byte#BYTES}. + */ + byte readByte(); + + /** + * Get the byte value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using a two's complement 8-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The byte value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Byte#BYTES}. + */ + byte getByte(int roff); + + /** + * Read the unsigned byte value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by {@link Byte#BYTES}. + * The value is read using an unsigned two's complement 8-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The unsigned byte value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Byte#BYTES}. + */ + int readUnsignedByte(); + + /** + * Get the unsigned byte value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using an unsigned two's complement 8-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The unsigned byte value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Byte#BYTES}. + */ + int getUnsignedByte(int roff); + + /** + * Write the given byte value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by {@link Byte#BYTES}. + * The value is written using a two's complement 8-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The byte value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Byte#BYTES}. + */ + Buffer writeByte(byte value); + + /** + * Set the given byte value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using a two's complement 8-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The byte value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Byte#BYTES}. + */ + Buffer setByte(int woff, byte value); + + /** + * Write the given unsigned byte value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by {@link Byte#BYTES}. + * The value is written using an unsigned two's complement 8-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Byte#BYTES}. + */ + Buffer writeUnsignedByte(int value); + + /** + * Set the given unsigned byte value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using an unsigned two's complement 8-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Byte#BYTES}. + */ + Buffer setUnsignedByte(int woff, int value); + + /** + * Read the char value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by 2. + * The value is read using a 2-byte UTF-16 encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The char value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than 2. + */ + char readChar(); + + /** + * Get the char value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using a 2-byte UTF-16 encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The char value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus 2. + */ + char getChar(int roff); + + /** + * Write the given char value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by 2. + * The value is written using a 2-byte UTF-16 encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The char value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than 2. + */ + Buffer writeChar(char value); + + /** + * Set the given char value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using a 2-byte UTF-16 encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The char value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus 2. + */ + Buffer setChar(int woff, char value); + + /** + * Read the short value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by {@link Short#BYTES}. + * The value is read using a two's complement 16-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The short value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Short#BYTES}. + */ + short readShort(); + + /** + * Get the short value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using a two's complement 16-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The short value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Short#BYTES}. + */ + short getShort(int roff); + + /** + * Read the unsigned short value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by {@link Short#BYTES}. + * The value is read using an unsigned two's complement 16-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The unsigned short value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Short#BYTES}. + */ + int readUnsignedShort(); + + /** + * Get the unsigned short value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using an unsigned two's complement 16-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The unsigned short value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Short#BYTES}. + */ + int getUnsignedShort(int roff); + + /** + * Write the given short value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by {@link Short#BYTES}. + * The value is written using a two's complement 16-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The short value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Short#BYTES}. + */ + Buffer writeShort(short value); + + /** + * Set the given short value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using a two's complement 16-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The short value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Short#BYTES}. + */ + Buffer setShort(int woff, short value); + + /** + * Write the given unsigned short value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by {@link Short#BYTES}. + * The value is written using an unsigned two's complement 16-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Short#BYTES}. + */ + Buffer writeUnsignedShort(int value); + + /** + * Set the given unsigned short value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using an unsigned two's complement 16-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Short#BYTES}. + */ + Buffer setUnsignedShort(int woff, int value); + + /** + * Read the int value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by 3. + * The value is read using a two's complement 24-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The int value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than 3. + */ + int readMedium(); + + /** + * Get the int value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using a two's complement 24-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The int value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus 3. + */ + int getMedium(int roff); + + /** + * Read the unsigned int value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by 3. + * The value is read using an unsigned two's complement 24-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The unsigned int value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than 3. + */ + int readUnsignedMedium(); + + /** + * Get the unsigned int value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using an unsigned two's complement 24-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The unsigned int value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus 3. + */ + int getUnsignedMedium(int roff); + + /** + * Write the given int value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by 3. + * The value is written using a two's complement 24-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than 3. + */ + Buffer writeMedium(int value); + + /** + * Set the given int value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using a two's complement 24-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus 3. + */ + Buffer setMedium(int woff, int value); + + /** + * Write the given unsigned int value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by 3. + * The value is written using an unsigned two's complement 24-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than 3. + */ + Buffer writeUnsignedMedium(int value); + + /** + * Set the given unsigned int value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using an unsigned two's complement 24-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus 3. + */ + Buffer setUnsignedMedium(int woff, int value); + + /** + * Read the int value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by {@link Integer#BYTES}. + * The value is read using a two's complement 32-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The int value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Integer#BYTES}. + */ + int readInt(); + + /** + * Get the int value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using a two's complement 32-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The int value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Integer#BYTES}. + */ + int getInt(int roff); + + /** + * Read the unsigned int value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by {@link Integer#BYTES}. + * The value is read using an unsigned two's complement 32-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The unsigned int value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Integer#BYTES}. + */ + long readUnsignedInt(); + + /** + * Get the unsigned int value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using an unsigned two's complement 32-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The unsigned int value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Integer#BYTES}. + */ + long getUnsignedInt(int roff); + + /** + * Write the given int value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by {@link Integer#BYTES}. + * The value is written using a two's complement 32-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Integer#BYTES}. + */ + Buffer writeInt(int value); + + /** + * Set the given int value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using a two's complement 32-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The int value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Integer#BYTES}. + */ + Buffer setInt(int woff, int value); + + /** + * Write the given unsigned int value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by {@link Integer#BYTES}. + * The value is written using an unsigned two's complement 32-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The long value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Integer#BYTES}. + */ + Buffer writeUnsignedInt(long value); + + /** + * Set the given unsigned int value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using an unsigned two's complement 32-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The long value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Integer#BYTES}. + */ + Buffer setUnsignedInt(int woff, long value); + + /** + * Read the float value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by {@link Float#BYTES}. + * The value is read using a 32-bit IEEE floating point encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The float value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Float#BYTES}. + */ + float readFloat(); + + /** + * Get the float value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using a 32-bit IEEE floating point encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The float value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Float#BYTES}. + */ + float getFloat(int roff); + + /** + * Write the given float value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by {@link Float#BYTES}. + * The value is written using a 32-bit IEEE floating point encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The float value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Float#BYTES}. + */ + Buffer writeFloat(float value); + + /** + * Set the given float value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using a 32-bit IEEE floating point encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The float value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Float#BYTES}. + */ + Buffer setFloat(int woff, float value); + + /** + * Read the long value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by {@link Long#BYTES}. + * The value is read using a two's complement 64-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The long value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Long#BYTES}. + */ + long readLong(); + + /** + * Get the long value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using a two's complement 64-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The long value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Long#BYTES}. + */ + long getLong(int roff); + + /** + * Write the given long value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by {@link Long#BYTES}. + * The value is written using a two's complement 64-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The long value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Long#BYTES}. + */ + Buffer writeLong(long value); + + /** + * Set the given long value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using a two's complement 64-bit encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The long value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Long#BYTES}. + */ + Buffer setLong(int woff, long value); + + /** + * Read the double value at the current {@link Buffer#readerOffset()}, + * and increases the reader offset by {@link Double#BYTES}. + * The value is read using a 64-bit IEEE floating point encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @return The double value at the current reader offset. + * @throws IndexOutOfBoundsException If {@link Buffer#readableBytes} is less than {@link Double#BYTES}. + */ + double readDouble(); + + /** + * Get the double value at the given reader offset. + * The {@link Buffer#readerOffset()} is not modified. + * The value is read using a 64-bit IEEE floating point encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param roff The read offset, an absolute offset into this buffer, to read from. + * @return The double value at the given offset. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Double#BYTES}. + */ + double getDouble(int roff); + + /** + * Write the given double value at the current {@link Buffer#writerOffset()}, + * and increase the writer offset by {@link Double#BYTES}. + * The value is written using a 64-bit IEEE floating point encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param value The double value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException If {@link Buffer#writableBytes} is less than {@link Double#BYTES}. + */ + Buffer writeDouble(double value); + + /** + * Set the given double value at the given write offset. The {@link Buffer#writerOffset()} is not modified. + * The value is written using a 64-bit IEEE floating point encoding, + * in {@link java.nio.ByteOrder#BIG_ENDIAN} byte order. + * + * @param woff The write offset, an absolute offset into this buffer to write to. + * @param value The double value to write. + * @return This Buffer. + * @throws IndexOutOfBoundsException if the given offset is out of bounds of the buffer, that is, less than 0 or + * greater than {@link Buffer#capacity()} minus {@link Double#BYTES}. + */ + Buffer setDouble(int woff, double value); + // +} diff --git a/buffer/src/main/java/io/netty/buffer/api/BufferAllocator.java b/buffer/src/main/java/io/netty/buffer/api/BufferAllocator.java new file mode 100644 index 00000000000..0d91b8af917 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/BufferAllocator.java @@ -0,0 +1,128 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import io.netty.buffer.api.pool.PooledBufferAllocator; + +import java.util.function.Supplier; + +/** + * Interface for allocating {@link Buffer}s. + */ +public interface BufferAllocator extends AutoCloseable { + /** + * Produces a {@link BufferAllocator} that allocates unpooled, on-heap buffers. + * On-heap buffers have a {@code byte[]} internally, and their {@linkplain Buffer#nativeAddress() native address} + * is zero. + *

    + * The concrete {@link Buffer} implementation is chosen by {@link MemoryManager#instance()}. + * + * @return A non-pooling allocator of on-heap buffers + */ + static BufferAllocator onHeapUnpooled() { + return new ManagedBufferAllocator(MemoryManager.instance(), false); + } + + /** + * Produces a {@link BufferAllocator} that allocates unpooled, off-heap buffers. + * Off-heap buffers a native memory pointer internally, which can be obtained from their + * {@linkplain Buffer#nativeAddress() native address method. + *

    + * The concrete {@link Buffer} implementation is chosen by {@link MemoryManager#instance()}. + * + * @return A non-pooling allocator of on-heap buffers + */ + static BufferAllocator offHeapUnpooled() { + return new ManagedBufferAllocator(MemoryManager.instance(), true); + } + + /** + * Produces a pooling {@link BufferAllocator} that allocates and recycles on-heap buffers. + * On-heap buffers have a {@code byte[]} internally, and their {@linkplain Buffer#nativeAddress() native address} + * is zero. + *

    + * The concrete {@link Buffer} implementation is chosen by {@link MemoryManager#instance()}. + * + * @return A pooling allocator of on-heap buffers + */ + static BufferAllocator onHeapPooled() { + return new PooledBufferAllocator(MemoryManager.instance(), false); + } + + /** + * Produces a pooling {@link BufferAllocator} that allocates and recycles off-heap buffers. + * Off-heap buffers a native memory pointer internally, which can be obtained from their + * {@linkplain Buffer#nativeAddress() native address method. + *

    + * The concrete {@link Buffer} implementation is chosen by {@link MemoryManager#instance()}. + * + * @return A pooling allocator of on-heap buffers + */ + static BufferAllocator offHeapPooled() { + return new PooledBufferAllocator(MemoryManager.instance(), true); + } + + /** + * Allocate a {@link Buffer} of the given size in bytes. This method may throw an {@link OutOfMemoryError} if there + * is not enough free memory available to allocate a {@link Buffer} of the requested size. + *

    + * The buffer will use big endian byte order. + * + * @param size The size of {@link Buffer} to allocate. + * @return The newly allocated {@link Buffer}. + * @throws IllegalStateException if this allocator has been {@linkplain #close() closed}. + */ + Buffer allocate(int size); + + /** + * Create a supplier of "constant" {@linkplain Buffer Buffers} from this allocator, that all have the given + * byte contents. The buffer has the same capacity as the byte array length, and its write offset is placed at the + * end, and its read offset is at the beginning, such that the entire buffer contents are readable. + *

    + * The buffers produced by the supplier will each have their own independent life-cycle, and closing them will + * make them {@linkplain Buffer#isAccessible() inaccessible}, just like normally allocated buffers. + *

    + * The buffers produced are "constants", in the sense that they are {@linkplain Buffer#readOnly() read-only}. + *

    + * It can generally be expected, but is not guaranteed, that the returned supplier is more resource efficient than + * allocating and copying memory with other available APIs. In such optimised implementations, the underlying memory + * baking the buffers will be shared among all the buffers produced by the supplier. + *

    + * The primary use case for this API, is when you need to repeatedly produce buffers with the same contents, and + * you perhaps wish to keep a {@code static final} field with these contents. The supplier-based API enforces + * that each usage get their own distinct buffer instance. Each of these instances cannot interfere with each other, + * so bugs like closing, or modifying the contents, of a shared buffer cannot occur. + * + * @param bytes The byte contents of the buffers produced by the returned supplier. + * @return A supplier of read-only buffers with the given contents. + * @throws IllegalStateException if this allocator has been {@linkplain #close() closed}, but any supplier obtained + * prior to closing the allocator will continue to work. + */ + Supplier constBufferSupplier(byte[] bytes); + + /** + * Close this allocator, freeing all of its internal resources. + *

    + * Existing (currently in-use) allocated buffers will not be impacted by calling this method. + * If this is a pooling or caching allocator, then existing buffers will be immediately freed when they are closed, + * instead of being pooled or cached. + *

    + * The allocator can no longer be used to allocate more buffers after calling this method. + * Attempting to allocate from a closed allocator will cause {@link IllegalStateException}s to be thrown. + */ + @Override + void close(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/BufferClosedException.java b/buffer/src/main/java/io/netty/buffer/api/BufferClosedException.java new file mode 100644 index 00000000000..ea22dbbdda1 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/BufferClosedException.java @@ -0,0 +1,31 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +/** + * An exception thrown when an operation is attempted on a {@link Buffer} when it has been closed. + */ +public final class BufferClosedException extends IllegalStateException { + private static final long serialVersionUID = 85913332711192868L; + + public BufferClosedException() { + this("This buffer is closed."); + } + + public BufferClosedException(final String message) { + super(message); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/BufferHolder.java b/buffer/src/main/java/io/netty/buffer/api/BufferHolder.java new file mode 100644 index 00000000000..e811790d20e --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/BufferHolder.java @@ -0,0 +1,149 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import io.netty.buffer.api.internal.ResourceSupport; +import io.netty.buffer.api.internal.Statics; + +import java.lang.invoke.VarHandle; +import java.util.Objects; + +import static java.lang.invoke.MethodHandles.lookup; + +/** + * The {@link BufferHolder} is an abstract class that simplifies the implementation of objects that themselves contain + * a {@link Buffer} instance. + *

    + * The {@link BufferHolder} can only hold on to a single buffer, so objects and classes that need to hold on to multiple + * buffers will have to do their implementation from scratch, though they can use the code of the {@link BufferHolder} + * as inspiration. Alternatively, multiple buffers can be + * {@linkplain CompositeBuffer#compose(BufferAllocator, Send[]) composed} into a single buffer, which can then be put + * in a buffer holder. + *

    + * If you just want an object that is a reference to a buffer, then the {@link BufferRef} can be used for that purpose. + * If you have an advanced use case where you wish to implement {@link Resource}, and tightly control lifetimes, then + * {@link ResourceSupport} can be of help. + * + * @param The concrete {@link BufferHolder} type. + */ +public abstract class BufferHolder> implements Resource { + private static final VarHandle BUF = Statics.findVarHandle(lookup(), BufferHolder.class, "buf", Buffer.class); + private Buffer buf; + + /** + * Create a new {@link BufferHolder} to hold the given {@linkplain Buffer buffer}. + * + * @param buf The {@linkplain Buffer buffer} to be held by this holder. + */ + protected BufferHolder(Buffer buf) { + this.buf = Objects.requireNonNull(buf, "The buffer cannot be null."); + } + + /** + * Create a new {@link BufferHolder} to hold the {@linkplain Buffer buffer} received from the given {@link Send}. + *

    + * The {@link BufferHolder} will then be holding exclusive ownership of the buffer. + * + * @param send The {@linkplain Buffer buffer} to be held by this holder. + */ + protected BufferHolder(Send send) { + buf = Objects.requireNonNull(send, "The Send-object cannot be null.").receive(); + } + + @Override + public void close() { + buf.close(); + } + + @SuppressWarnings("unchecked") + @Override + public Send send() { + return buf.send().map((Class) getClass(), this::receive); + } + + /** + * Called when a {@linkplain #send() sent} {@link BufferHolder} is received by the recipient. + * The {@link BufferHolder} should return a new concrete instance, that wraps the given {@link Buffer} object. + * + * @param buf The {@link Buffer} that is {@linkplain Send#receive() received} by the recipient, + * and needs to be wrapped in a new {@link BufferHolder} instance. + * @return A new {@linkplain T buffer holder} instance, containing the given {@linkplain Buffer buffer}. + */ + protected abstract T receive(Buffer buf); + + /** + * Replace the underlying referenced buffer with the given buffer. + *

    + * This method is protected to permit advanced use cases of {@link BufferHolder} sub-class implementations. + *

    + * Note: This method closes the current buffer, + * and takes exclusive ownership of the received buffer. + *

    + * The buffer assignment is performed using a plain store. + * + * @param send The new {@link Buffer} instance that is replacing the currently held buffer. + */ + protected final void replaceBuffer(Send send) { + Buffer received = send.receive(); + buf.close(); + buf = received; + } + + /** + * Replace the underlying referenced buffer with the given buffer. + *

    + * This method is protected to permit advanced use cases of {@link BufferHolder} sub-class implementations. + *

    + * Note: this method closes the current buffer, + * and takes exclusive ownership of the received buffer. + *

    + * The buffer assignment is performed using a volatile store. + * + * @param send The {@link Send} with the new {@link Buffer} instance that is replacing the currently held buffer. + */ + protected final void replaceBufferVolatile(Send send) { + Buffer received = send.receive(); + var prev = (Buffer) BUF.getAndSet(this, received); + prev.close(); + } + + /** + * Access the held {@link Buffer} instance. + *

    + * The access is performed using a plain load. + * + * @return The {@link Buffer} instance being held by this {@linkplain T buffer holder}. + */ + protected final Buffer getBuffer() { + return buf; + } + + /** + * Access the held {@link Buffer} instance. + *

    + * The access is performed using a volatile load. + * + * @return The {@link Buffer} instance being held by this {@linkplain T buffer holder}. + */ + protected final Buffer getBufferVolatile() { + return (Buffer) BUF.getVolatile(this); + } + + @Override + public boolean isAccessible() { + return buf.isAccessible(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/BufferReadOnlyException.java b/buffer/src/main/java/io/netty/buffer/api/BufferReadOnlyException.java new file mode 100644 index 00000000000..29e98b7fc63 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/BufferReadOnlyException.java @@ -0,0 +1,31 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +/** + * An exception thrown when an operation is attempted on a {@linkplain Buffer#readOnly() read-only} {@link Buffer}. + */ +public final class BufferReadOnlyException extends UnsupportedOperationException { + private static final long serialVersionUID = 4855825594125231593L; + + public BufferReadOnlyException() { + this("This buffer is read-only."); + } + + public BufferReadOnlyException(final String message) { + super(message); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/BufferRef.java b/buffer/src/main/java/io/netty/buffer/api/BufferRef.java new file mode 100644 index 00000000000..8cfa4a799c4 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/BufferRef.java @@ -0,0 +1,73 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import java.lang.invoke.VarHandle; + +/** + * A mutable reference to a buffer. + */ +public final class BufferRef extends BufferHolder { + /** + * Create a reference to the given {@linkplain Buffer buffer}. + * + * @param buf The buffer to reference. + */ + private BufferRef(Buffer buf) { + super(buf); + // BufferRef is meant to be atomic, so we need to add a fence to get the semantics of a volatile store. + VarHandle.fullFence(); + } + + /** + * Create a reference that holds the exclusive ownership of the sent buffer. + * + * @param send The {@linkplain Send sent} buffer to take ownership of. + */ + public BufferRef(Send send) { + super(send); + // BufferRef is meant to be atomic, so we need to add a fence to get the semantics of a volatile store. + VarHandle.fullFence(); + } + + @Override + protected BufferRef receive(Buffer buf) { + return new BufferRef(buf); + } + + /** + * Replace the underlying referenced buffer with the given buffer. + *

    + * Note: this method closes the current buffer, + * and takes exclusive ownership of the received buffer. + *

    + * The buffer assignment is performed using a volatile store. + * + * @param send The {@link Send} with the new {@link Buffer} instance that is replacing the currently held buffer. + */ + public void replace(Send send) { + replaceBufferVolatile(send); + } + + /** + * Access the buffer in this reference. + * + * @return The buffer held by the reference. + */ + public Buffer content() { + return getBufferVolatile(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/BufferStub.java b/buffer/src/main/java/io/netty/buffer/api/BufferStub.java new file mode 100644 index 00000000000..c1fa48577ef --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/BufferStub.java @@ -0,0 +1,460 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import java.nio.ByteBuffer; + +/** + * A stub of a {@link Buffer} implementation that implements all buffer methods by delegating them to a wrapped buffer + * instance. + *

    + * This can be used when writing automated tests for code that integrates with {@link Buffer}, but should not be used in + * production code. + */ +public class BufferStub implements Buffer { + protected final Buffer delegate; + + /** + * Create a new buffer stub that delegates all calls to the given instance. + * + * @param delegate The buffer instance to delegate all method calls to. + */ + public BufferStub(Buffer delegate) { + this.delegate = delegate; + } + + @Override + public int capacity() { + return delegate.capacity(); + } + + @Override + public int readerOffset() { + return delegate.readerOffset(); + } + + @Override + public Buffer readerOffset(int offset) { + return delegate.readerOffset(offset); + } + + @Override + public int writerOffset() { + return delegate.writerOffset(); + } + + @Override + public Buffer writerOffset(int offset) { + return delegate.writerOffset(offset); + } + + @Override + public int readableBytes() { + return delegate.readableBytes(); + } + + @Override + public int writableBytes() { + return delegate.writableBytes(); + } + + @Override + public Buffer fill(byte value) { + return delegate.fill(value); + } + + @Override + public long nativeAddress() { + return delegate.nativeAddress(); + } + + @Override + public Buffer makeReadOnly() { + return delegate.makeReadOnly(); + } + + @Override + public boolean readOnly() { + return delegate.readOnly(); + } + + @Override + public void copyInto(int srcPos, byte[] dest, int destPos, int length) { + delegate.copyInto(srcPos, dest, destPos, length); + } + + @Override + public void copyInto(int srcPos, ByteBuffer dest, int destPos, int length) { + delegate.copyInto(srcPos, dest, destPos, length); + } + + @Override + public void copyInto(int srcPos, Buffer dest, int destPos, int length) { + delegate.copyInto(srcPos, dest, destPos, length); + } + + @Override + public Buffer writeBytes(Buffer source) { + return delegate.writeBytes(source); + } + + @Override + public Buffer writeBytes(byte[] source) { + return delegate.writeBytes(source); + } + + @Override + public Buffer resetOffsets() { + return delegate.resetOffsets(); + } + + @Override + public ByteCursor openCursor() { + return delegate.openCursor(); + } + + @Override + public ByteCursor openCursor(int fromOffset, int length) { + return delegate.openCursor(fromOffset, length); + } + + @Override + public ByteCursor openReverseCursor() { + return delegate.openReverseCursor(); + } + + @Override + public ByteCursor openReverseCursor(int fromOffset, int length) { + return delegate.openReverseCursor(fromOffset, length); + } + + @Override + public Buffer ensureWritable(int size) { + return delegate.ensureWritable(size); + } + + @Override + public Buffer ensureWritable(int size, int minimumGrowth, boolean allowCompaction) { + return delegate.ensureWritable(size, minimumGrowth, allowCompaction); + } + + @Override + public Buffer copy() { + return delegate.copy(); + } + + @Override + public Buffer copy(int offset, int length) { + return delegate.copy(offset, length); + } + + @Override + public Buffer split() { + return delegate.split(); + } + + @Override + public Buffer split(int splitOffset) { + return delegate.split(splitOffset); + } + + @Override + public Buffer compact() { + return delegate.compact(); + } + + @Override + public int countComponents() { + return delegate.countComponents(); + } + + @Override + public int countReadableComponents() { + return delegate.countReadableComponents(); + } + + @Override + public int countWritableComponents() { + return delegate.countWritableComponents(); + } + + @Override + public int forEachReadable(int initialIndex, + ReadableComponentProcessor processor) throws E { + return delegate.forEachReadable(initialIndex, processor); + } + + @Override + public int forEachWritable(int initialIndex, + WritableComponentProcessor processor) throws E { + return delegate.forEachWritable(initialIndex, processor); + } + + @Override + public byte readByte() { + return delegate.readByte(); + } + + @Override + public byte getByte(int roff) { + return delegate.getByte(roff); + } + + @Override + public int readUnsignedByte() { + return delegate.readUnsignedByte(); + } + + @Override + public int getUnsignedByte(int roff) { + return delegate.getUnsignedByte(roff); + } + + @Override + public Buffer writeByte(byte value) { + return delegate.writeByte(value); + } + + @Override + public Buffer setByte(int woff, byte value) { + return delegate.setByte(woff, value); + } + + @Override + public Buffer writeUnsignedByte(int value) { + return delegate.writeUnsignedByte(value); + } + + @Override + public Buffer setUnsignedByte(int woff, int value) { + return delegate.setUnsignedByte(woff, value); + } + + @Override + public char readChar() { + return delegate.readChar(); + } + + @Override + public char getChar(int roff) { + return delegate.getChar(roff); + } + + @Override + public Buffer writeChar(char value) { + return delegate.writeChar(value); + } + + @Override + public Buffer setChar(int woff, char value) { + return delegate.setChar(woff, value); + } + + @Override + public short readShort() { + return delegate.readShort(); + } + + @Override + public short getShort(int roff) { + return delegate.getShort(roff); + } + + @Override + public int readUnsignedShort() { + return delegate.readUnsignedShort(); + } + + @Override + public int getUnsignedShort(int roff) { + return delegate.getUnsignedShort(roff); + } + + @Override + public Buffer writeShort(short value) { + return delegate.writeShort(value); + } + + @Override + public Buffer setShort(int woff, short value) { + return delegate.setShort(woff, value); + } + + @Override + public Buffer writeUnsignedShort(int value) { + return delegate.writeUnsignedShort(value); + } + + @Override + public Buffer setUnsignedShort(int woff, int value) { + return delegate.setUnsignedShort(woff, value); + } + + @Override + public int readMedium() { + return delegate.readMedium(); + } + + @Override + public int getMedium(int roff) { + return delegate.getMedium(roff); + } + + @Override + public int readUnsignedMedium() { + return delegate.readUnsignedMedium(); + } + + @Override + public int getUnsignedMedium(int roff) { + return delegate.getUnsignedMedium(roff); + } + + @Override + public Buffer writeMedium(int value) { + return delegate.writeMedium(value); + } + + @Override + public Buffer setMedium(int woff, int value) { + return delegate.setMedium(woff, value); + } + + @Override + public Buffer writeUnsignedMedium(int value) { + return delegate.writeUnsignedMedium(value); + } + + @Override + public Buffer setUnsignedMedium(int woff, int value) { + return delegate.setUnsignedMedium(woff, value); + } + + @Override + public int readInt() { + return delegate.readInt(); + } + + @Override + public int getInt(int roff) { + return delegate.getInt(roff); + } + + @Override + public long readUnsignedInt() { + return delegate.readUnsignedInt(); + } + + @Override + public long getUnsignedInt(int roff) { + return delegate.getUnsignedInt(roff); + } + + @Override + public Buffer writeInt(int value) { + return delegate.writeInt(value); + } + + @Override + public Buffer setInt(int woff, int value) { + return delegate.setInt(woff, value); + } + + @Override + public Buffer writeUnsignedInt(long value) { + return delegate.writeUnsignedInt(value); + } + + @Override + public Buffer setUnsignedInt(int woff, long value) { + return delegate.setUnsignedInt(woff, value); + } + + @Override + public float readFloat() { + return delegate.readFloat(); + } + + @Override + public float getFloat(int roff) { + return delegate.getFloat(roff); + } + + @Override + public Buffer writeFloat(float value) { + return delegate.writeFloat(value); + } + + @Override + public Buffer setFloat(int woff, float value) { + return delegate.setFloat(woff, value); + } + + @Override + public long readLong() { + return delegate.readLong(); + } + + @Override + public long getLong(int roff) { + return delegate.getLong(roff); + } + + @Override + public Buffer writeLong(long value) { + return delegate.writeLong(value); + } + + @Override + public Buffer setLong(int woff, long value) { + return delegate.setLong(woff, value); + } + + @Override + public double readDouble() { + return delegate.readDouble(); + } + + @Override + public double getDouble(int roff) { + return delegate.getDouble(roff); + } + + @Override + public Buffer writeDouble(double value) { + return delegate.writeDouble(value); + } + + @Override + public Buffer setDouble(int woff, double value) { + return delegate.setDouble(woff, value); + } + + @Override + public Send send() { + return delegate.send(); + } + + @Override + public void close() { + delegate.close(); + } + + @Override + public boolean isAccessible() { + return delegate.isAccessible(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/ByteCursor.java b/buffer/src/main/java/io/netty/buffer/api/ByteCursor.java new file mode 100644 index 00000000000..271e1829d93 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/ByteCursor.java @@ -0,0 +1,72 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.api; + +import io.netty.util.ByteProcessor; + +/** + * The ByteCursor scans through a sequence of bytes. + * This is similar to {@link ByteProcessor}, but for external iteration rather than internal iteration. + * The external iteration allows the callers to control the pace of the iteration. + */ +public interface ByteCursor { + /** + * Check if the iterator has at least one byte left, and if so, read that byte and move the cursor forward. + * The byte will then be available through the {@link #getByte()}. + * + * @return {@code true} if the cursor read a byte and moved forward, otherwise {@code false}. + */ + boolean readByte(); + + /** + * Return the last byte that was read by {@link #readByte()}. + * If {@link #readByte()} has not been called on this cursor before, then {@code -1} is returned. + * + * @return The next byte that was read by the most recent successful call to {@link #readByte()}. + */ + byte getByte(); + + /** + * The current position of this iterator into the underlying sequence of bytes. + * For instance, if we are iterating a buffer, this would be the iterators current offset into the buffer. + * + * @return The current iterator offset into the underlying sequence of bytes. + */ + int currentOffset(); + + /** + * Get the current number of bytes left in the iterator. + * + * @return The number of bytes left in the iterator. + */ + int bytesLeft(); + + /** + * Process the remaining bytes in this iterator with the given {@link ByteProcessor}. + * This method consumes the iterator. + * + * @param processor The processor to use for processing the bytes in the iterator. + * @return The number of bytes processed, if the {@link ByteProcessor#process(byte) process} method returned + * {@code false}, or {@code -1} if the whole iterator was processed. + */ + default int process(ByteProcessor processor) { + boolean requestMore = true; + int count = 0; + while (readByte() && (requestMore = processor.process(getByte()))) { + count++; + } + return requestMore? -1 : count; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/CompositeBuffer.java b/buffer/src/main/java/io/netty/buffer/api/CompositeBuffer.java new file mode 100644 index 00000000000..26033a6cb26 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/CompositeBuffer.java @@ -0,0 +1,1735 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import io.netty.buffer.api.internal.ResourceSupport; +import io.netty.buffer.api.internal.Statics; + +import java.nio.ByteBuffer; +import java.nio.ReadOnlyBufferException; +import java.util.Arrays; +import java.util.Collections; +import java.util.IdentityHashMap; +import java.util.Objects; +import java.util.Set; +import java.util.stream.Stream; + +import static io.netty.buffer.api.internal.Statics.bufferIsClosed; +import static io.netty.buffer.api.internal.Statics.bufferIsReadOnly; +import static java.lang.Math.addExact; + +/** + * The {@code CompositeBuffer} is a concrete {@link Buffer} implementation that make a number of other buffers appear + * as one. A composite buffer behaves the same as a normal, non-composite buffer in every way, so you normally don't + * need to handle them specially. + *

    + * A composite buffer is constructed using one of the {@code compose} methods: + *

      + *
    • + * {@link #compose(BufferAllocator, Send[])} creates a composite buffer from the buffers that are sent to it via + * the passed in send objects. Since {@link Send#receive()} transfers ownership, the resulting composite buffer + * will have ownership, because it is guaranteed that there are no other references to its constituent buffers. + *
    • + *
    • + * {@link #compose(BufferAllocator)} creates an empty, zero capacity, composite buffer. Such empty buffers may + * change their {@linkplain #readOnly() read-only} states when they gain their first component. + *
    • + *
    + * Composite buffers can later be extended with internally allocated components, with {@link #ensureWritable(int)}, + * or with externally allocated buffers, using {@link #extendWith(Send)}. + * + *

    Constituent buffer requirements

    + * + * The buffers that are being composed to form the composite buffer, need to live up to a number of requirements. + * Basically, if we imagine that the constituent buffers have their memory regions concatenated together, then the + * result needs to make sense. + *

    + * The read and write offsets of the constituent buffers must be arranged such that there are no "gaps" when viewed + * as a single connected chunk of memory. + * Specifically, there can be at most one buffer whose write offset is neither zero nor at capacity, + * and all buffers prior to it must have their write offsets at capacity, and all buffers after it must have a + * write-offset of zero. + * Likewise, there can be at most one buffer whose read offset is neither zero nor at capacity, + * and all buffers prior to it must have their read offsets at capacity, and all buffers after it must have a read + * offset of zero. + * Furthermore, the sum of the read offsets must be less than or equal to the sum of the write-offsets. + *

    + * Reads and writes to the composite buffer that modifies the read or write offsets, will also modify the relevant + * offsets in the constituent buffers. + *

    + * It is not a requirement that the buffers have the same size. + *

    + * It is not a requirement that the buffers are allocated by this allocator, but if + * {@link Buffer#ensureWritable(int)} is called on the composed buffer, and the composed buffer needs to be + * expanded, then this allocator instance will be used for allocation the extra memory. + * + *

    Ownership and Send

    + * + * {@linkplain Resource#send() Sending} a composite buffer implies sending all of its constituent buffers. + * For sending to be possible, both the composite buffer itself, and all of its constituent buffers, must be in a + * state that permits them being sent. This should be the case by default, as it shouldn't be possible to create + * composite buffers that can't be sent. + */ +public final class CompositeBuffer extends ResourceSupport implements Buffer { + /** + * The max array size is JVM implementation dependant, but most seem to settle on {@code Integer.MAX_VALUE - 8}. + * We set the max composite buffer capacity to the same, since it would otherwise be impossible to create a + * non-composite copy of the buffer. + */ + private static final int MAX_CAPACITY = Integer.MAX_VALUE - 8; + private static final Drop COMPOSITE_DROP = new Drop<>() { + @Override + public void drop(CompositeBuffer buf) { + buf.makeInaccessible(); + RuntimeException re = null; + for (Buffer b : buf.bufs) { + try { + b.close(); + } catch (RuntimeException e) { + if (re == null) { + re = e; + } else { + re.addSuppressed(e); + } + } + } + } + + @Override + public String toString() { + return "COMPOSITE_DROP"; + } + }; + private static final Buffer[] EMPTY_BUFFER_ARRAY = new Buffer[0]; + + private final BufferAllocator allocator; + private final TornBufferAccessor tornBufAccessors; + private Buffer[] bufs; + private int[] offsets; // The offset, for the composite buffer, where each constituent buffer starts. + private int capacity; + private int roff; + private int woff; + private int subOffset; // The next offset *within* a consituent buffer to read from or write to. + private boolean closed; + private boolean readOnly; + + /** + * Compose the given sequence of sends of buffers and present them as a single buffer. + *

    + * When a composite buffer is closed, all of its constituent component buffers are closed as well. + *

    + * See the class documentation for more information on what is required of the given buffers for composition to be + * allowed. + * + * @param allocator The allocator for the composite buffer. This allocator will be used e.g. to service + * {@link #ensureWritable(int)} calls. + * @param sends The sent buffers to compose into a single buffer view. + * @return A buffer composed of, and backed by, the given buffers. + * @throws IllegalStateException if one of the sends have already been received. The remaining buffers and sends + * will be closed and discarded, respectively. + */ + @SafeVarargs + public static CompositeBuffer compose(BufferAllocator allocator, Send... sends) { + Buffer[] bufs = new Buffer[sends.length]; + RuntimeException ise = null; + for (int i = 0; i < sends.length; i++) { + if (ise != null) { + try { + sends[i].close(); + } catch (Exception closeExc) { + ise.addSuppressed(closeExc); + } + } else { + try { + bufs[i] = sends[i].receive(); + } catch (RuntimeException e) { + // We catch RuntimeException instead of IllegalStateException to ensure cleanup always happens + // regardless of the exception thrown. + ise = e; + for (int j = 0; j < i; j++) { + try { + bufs[j].close(); + } catch (Exception closeExc) { + ise.addSuppressed(closeExc); + } + } + } + } + } + if (ise != null) { + throw ise; + } + return new CompositeBuffer(allocator, filterExternalBufs(Arrays.stream(bufs)), COMPOSITE_DROP); + } + + /** + * Create an empty composite buffer, that has no components. The buffer can be extended with components using either + * {@link #ensureWritable(int)} or {@link #extendWith(Send)}. + * + * @param allocator The allocator for the composite buffer. This allocator will be used e.g. to service + * {@link #ensureWritable(int)} calls. + * @return A composite buffer that has no components, and has a capacity of zero. + */ + public static CompositeBuffer compose(BufferAllocator allocator) { + return new CompositeBuffer(allocator, EMPTY_BUFFER_ARRAY, COMPOSITE_DROP); + } + + /** + * Check if the given buffer is a {@linkplain #compose(BufferAllocator, Send...) composite} buffer or not. + * @param composite The buffer to check. + * @return {@code true} if the given buffer was created with {@link #compose(BufferAllocator, Send...)}, + * {@code false} otherwise. + */ + public static boolean isComposite(Buffer composite) { + return composite.getClass() == CompositeBuffer.class; + } + + private static Buffer[] filterExternalBufs(Stream refs) { + // We filter out all zero-capacity buffers because they wouldn't contribute to the composite buffer anyway, + // and also, by ensuring that all constituent buffers contribute to the size of the composite buffer, + // we make sure that the number of composite buffers will never become greater than the number of bytes in + // the composite buffer. + // This restriction guarantees that methods like countComponents, forEachReadable and forEachWritable, + // will never overflow their component counts. + // Allocating a new array unconditionally also prevents external modification of the array. + Buffer[] bufs = refs + .filter(CompositeBuffer::discardEmpty) + .flatMap(CompositeBuffer::flattenBuffer) + .toArray(Buffer[]::new); + // Make sure there are no duplicates among the buffers. + Set duplicatesCheck = Collections.newSetFromMap(new IdentityHashMap<>()); + duplicatesCheck.addAll(Arrays.asList(bufs)); + if (duplicatesCheck.size() < bufs.length) { + IllegalArgumentException iae = new IllegalArgumentException( + "Cannot create composite buffer with duplicate constituent buffer components."); + for (Buffer buf : bufs) { + try { + buf.close(); + } catch (Exception closeExc) { + iae.addSuppressed(closeExc); + } + } + throw iae; + } + return bufs; + } + + private static boolean discardEmpty(Buffer buf) { + if (buf.capacity() > 0) { + return true; + } else { + // If we filter a buffer out, then we must make sure to close it since it's ownership was sent to us. + buf.close(); + return false; + } + } + + private static Stream flattenBuffer(Buffer buf) { + if (buf instanceof CompositeBuffer) { + // Extract components so composite buffers always have non-composite components. + var composite = (CompositeBuffer) buf; + return Stream.of(composite.bufs); + } + return Stream.of(buf); + } + + private CompositeBuffer(BufferAllocator allocator, Buffer[] bufs, Drop drop) { + super(drop); + try { + this.allocator = Objects.requireNonNull(allocator, "BufferAllocator cannot be null."); + if (bufs.length > 0) { + boolean targetReadOnly = bufs[0].readOnly(); + for (Buffer buf : bufs) { + if (buf.readOnly() != targetReadOnly) { + throw new IllegalArgumentException("Constituent buffers have inconsistent read-only state."); + } + } + readOnly = targetReadOnly; + } + this.bufs = bufs; + computeBufferOffsets(); + tornBufAccessors = new TornBufferAccessor(this); + } catch (Exception e) { + // Always close bufs on exception, since we've taken ownership of them at this point. + for (Buffer buf : bufs) { + try { + buf.close(); + } catch (Exception closeExc) { + e.addSuppressed(closeExc); + } + } + throw e; + } + } + + private void computeBufferOffsets() { + if (bufs.length > 0) { + int woff = 0; + int roff = 0; + boolean woffMidpoint = false; + for (Buffer buf : bufs) { + if (buf.writableBytes() == 0) { + woff += buf.capacity(); + } else if (!woffMidpoint) { + woff += buf.writerOffset(); + woffMidpoint = true; + } else if (buf.writerOffset() != 0) { + throw new IllegalArgumentException( + "The given buffers cannot be composed because they leave an unwritten gap: " + + Arrays.toString(bufs) + '.'); + } + } + boolean roffMidpoint = false; + for (Buffer buf : bufs) { + if (buf.readableBytes() == 0 && buf.writableBytes() == 0) { + roff += buf.capacity(); + } else if (!roffMidpoint) { + roff += buf.readerOffset(); + roffMidpoint = true; + } else if (buf.readerOffset() != 0) { + throw new IllegalArgumentException( + "The given buffers cannot be composed because they leave an unread gap: " + + Arrays.toString(bufs) + '.'); + } + } + assert roff <= woff: + "The given buffers place the read offset ahead of the write offset: " + Arrays.toString(bufs) + '.'; + // Commit computed offsets. + this.woff = woff; + this.roff = roff; + } + + offsets = new int[bufs.length]; + long cap = 0; + for (int i = 0; i < bufs.length; i++) { + offsets[i] = (int) cap; + cap += bufs[i].capacity(); + } + if (cap > MAX_CAPACITY) { + throw new IllegalArgumentException( + "Combined size of the constituent buffers is too big. " + + "The maximum buffer capacity is " + MAX_CAPACITY + " (Integer.MAX_VALUE - 8), " + + "but the sum of the constituent buffer capacities was " + cap + '.'); + } + capacity = (int) cap; + } + + @Override + public String toString() { + return "Buffer[roff:" + roff + ", woff:" + woff + ", cap:" + capacity + ']'; + } + + @Override + protected RuntimeException createResourceClosedException() { + return bufferIsClosed(this); + } + + @Override + public int capacity() { + return capacity; + } + + @Override + public int readerOffset() { + return roff; + } + + @Override + public CompositeBuffer readerOffset(int index) { + prepRead(index, 0); + int indexLeft = index; + for (Buffer buf : bufs) { + buf.readerOffset(Math.min(indexLeft, buf.capacity())); + indexLeft = Math.max(0, indexLeft - buf.capacity()); + } + roff = index; + return this; + } + + @Override + public int writerOffset() { + return woff; + } + + @Override + public CompositeBuffer writerOffset(int index) { + checkWriteBounds(index, 0); + int indexLeft = index; + for (Buffer buf : bufs) { + buf.writerOffset(Math.min(indexLeft, buf.capacity())); + indexLeft = Math.max(0, indexLeft - buf.capacity()); + } + woff = index; + return this; + } + + @Override + public CompositeBuffer fill(byte value) { + for (Buffer buf : bufs) { + buf.fill(value); + } + return this; + } + + @Override + public long nativeAddress() { + return 0; + } + + @Override + public CompositeBuffer makeReadOnly() { + for (Buffer buf : bufs) { + buf.makeReadOnly(); + } + readOnly = true; + return this; + } + + @Override + public boolean readOnly() { + return readOnly; + } + + @Override + public CompositeBuffer copy(int offset, int length) { + checkWriteBounds(offset, length); + if (offset < 0 || length < 0) { + throw new IllegalArgumentException( + "Offset and length cannot be negative, but offset was " + + offset + ", and length was " + length + '.'); + } + Buffer choice = (Buffer) chooseBuffer(offset, 0); + Buffer[] copies; + + if (length > 0) { + copies = new Buffer[bufs.length]; + int off = subOffset; + int cap = length; + int i; + int j = 0; + for (i = searchOffsets(offset); cap > 0; i++) { + var buf = bufs[i]; + int avail = buf.capacity() - off; + copies[j++] = buf.copy(off, Math.min(cap, avail)); + cap -= avail; + off = 0; + } + copies = Arrays.copyOf(copies, j); + } else { + // Specialize for length == 0, since we must copy from at least one constituent buffer. + copies = new Buffer[] { choice.copy(subOffset, 0) }; + } + + return new CompositeBuffer(allocator, copies, COMPOSITE_DROP); + } + + @Override + public void copyInto(int srcPos, byte[] dest, int destPos, int length) { + copyInto(srcPos, (s, b, d, l) -> b.copyInto(s, dest, d, l), destPos, length); + } + + @Override + public void copyInto(int srcPos, ByteBuffer dest, int destPos, int length) { + if (dest.isReadOnly()) { + throw new ReadOnlyBufferException(); + } + copyInto(srcPos, (s, b, d, l) -> b.copyInto(s, dest, d, l), destPos, length); + } + + private void copyInto(int srcPos, CopyInto dest, int destPos, int length) { + if (length < 0) { + throw new IndexOutOfBoundsException("Length cannot be negative: " + length + '.'); + } + if (srcPos < 0) { + throw indexOutOfBounds(srcPos, false); + } + if (srcPos + length > capacity) { + throw indexOutOfBounds(srcPos + length, false); + } + while (length > 0) { + var buf = (Buffer) chooseBuffer(srcPos, 0); + int toCopy = Math.min(buf.capacity() - subOffset, length); + dest.copyInto(subOffset, buf, destPos, toCopy); + srcPos += toCopy; + destPos += toCopy; + length -= toCopy; + } + } + + @FunctionalInterface + private interface CopyInto { + void copyInto(int srcPos, Buffer src, int destPos, int length); + } + + @Override + public void copyInto(int srcPos, Buffer dest, int destPos, int length) { + if (length < 0) { + throw new IndexOutOfBoundsException("Length cannot be negative: " + length + '.'); + } + if (srcPos < 0) { + throw indexOutOfBounds(srcPos, false); + } + if (addExact(srcPos, length) > capacity) { + throw indexOutOfBounds(srcPos + length, false); + } + if (dest.readOnly()) { + throw bufferIsReadOnly(dest); + } + + // Iterate in reverse to account for src and dest buffer overlap. + // todo optimise by delegating to constituent buffers. + var cursor = openReverseCursor(srcPos + length - 1, length); + while (cursor.readByte()) { + dest.setByte(destPos + --length, cursor.getByte()); + } + } + + @Override + public ByteCursor openCursor() { + return openCursor(readerOffset(), readableBytes()); + } + + @Override + public ByteCursor openCursor(int fromOffset, int length) { + if (fromOffset < 0) { + throw new IllegalArgumentException("The fromOffset cannot be negative: " + fromOffset + '.'); + } + if (length < 0) { + throw new IllegalArgumentException("The length cannot be negative: " + length + '.'); + } + if (capacity < addExact(fromOffset, length)) { + throw new IllegalArgumentException("The fromOffset+length is beyond the end of the buffer: " + + "fromOffset=" + fromOffset + ", length=" + length + '.'); + } + int startBufferIndex = searchOffsets(fromOffset); + int off = fromOffset - offsets[startBufferIndex]; + Buffer startBuf = bufs[startBufferIndex]; + ByteCursor startCursor = startBuf.openCursor(off, Math.min(startBuf.capacity() - off, length)); + return new ByteCursor() { + int index = fromOffset; + final int end = fromOffset + length; + int bufferIndex = startBufferIndex; + int initOffset = startCursor.currentOffset(); + ByteCursor cursor = startCursor; + byte byteValue = -1; + + @Override + public boolean readByte() { + if (cursor.readByte()) { + byteValue = cursor.getByte(); + return true; + } + if (bytesLeft() > 0) { + nextCursor(); + cursor.readByte(); + byteValue = cursor.getByte(); + return true; + } + return false; + } + + private void nextCursor() { + bufferIndex++; + Buffer nextBuf = bufs[bufferIndex]; + cursor = nextBuf.openCursor(0, Math.min(nextBuf.capacity(), bytesLeft())); + initOffset = 0; + } + + @Override + public byte getByte() { + return byteValue; + } + + @Override + public int currentOffset() { + int currOff = cursor.currentOffset(); + index += currOff - initOffset; + initOffset = currOff; + return index; + } + + @Override + public int bytesLeft() { + return end - currentOffset(); + } + }; + } + + @Override + public ByteCursor openReverseCursor(int fromOffset, int length) { + if (fromOffset < 0) { + throw new IllegalArgumentException("The fromOffset cannot be negative: " + fromOffset + '.'); + } + if (length < 0) { + throw new IllegalArgumentException("The length cannot be negative: " + length + '.'); + } + if (fromOffset - length < -1) { + throw new IllegalArgumentException("The fromOffset-length would underflow the buffer: " + + "fromOffset=" + fromOffset + ", length=" + length + '.'); + } + int startBufferIndex = searchOffsets(fromOffset); + int off = fromOffset - offsets[startBufferIndex]; + Buffer startBuf = bufs[startBufferIndex]; + ByteCursor startCursor = startBuf.openReverseCursor(off, Math.min(off + 1, length)); + return new ByteCursor() { + int index = fromOffset; + final int end = fromOffset - length; + int bufferIndex = startBufferIndex; + int initOffset = startCursor.currentOffset(); + ByteCursor cursor = startCursor; + byte byteValue = -1; + + @Override + public boolean readByte() { + if (cursor.readByte()) { + byteValue = cursor.getByte(); + return true; + } + if (bytesLeft() > 0) { + nextCursor(); + cursor.readByte(); + byteValue = cursor.getByte(); + return true; + } + return false; + } + + private void nextCursor() { + bufferIndex--; + Buffer nextBuf = bufs[bufferIndex]; + int length = Math.min(nextBuf.capacity(), bytesLeft()); + int offset = nextBuf.capacity() - 1; + cursor = nextBuf.openReverseCursor(offset, length); + initOffset = offset; + } + + @Override + public byte getByte() { + return byteValue; + } + + @Override + public int currentOffset() { + int currOff = cursor.currentOffset(); + index -= initOffset - currOff; + initOffset = currOff; + return index; + } + + @Override + public int bytesLeft() { + return currentOffset() - end; + } + }; + } + + @Override + public CompositeBuffer ensureWritable(int size, int minimumGrowth, boolean allowCompaction) { + if (!isAccessible()) { + throw bufferIsClosed(this); + } + if (!isOwned()) { + throw new IllegalStateException("Buffer is not owned. Only owned buffers can call ensureWritable."); + } + if (size < 0) { + throw new IllegalArgumentException("Cannot ensure writable for a negative size: " + size + '.'); + } + if (minimumGrowth < 0) { + throw new IllegalArgumentException("The minimum growth cannot be negative: " + minimumGrowth + '.'); + } + if (readOnly) { + throw bufferIsReadOnly(this); + } + if (writableBytes() >= size) { + // We already have enough space. + return this; + } + + if (allowCompaction && size <= roff) { + // Let's see if we can solve some or all of the requested size with compaction. + // We always compact as much as is possible, regardless of size. This amortizes our work. + int compactableBuffers = 0; + for (Buffer buf : bufs) { + if (buf.capacity() != buf.readerOffset()) { + break; + } + compactableBuffers++; + } + if (compactableBuffers > 0) { + Buffer[] compactable; + if (compactableBuffers < bufs.length) { + compactable = new Buffer[compactableBuffers]; + System.arraycopy(bufs, 0, compactable, 0, compactable.length); + System.arraycopy(bufs, compactable.length, bufs, 0, bufs.length - compactable.length); + System.arraycopy(compactable, 0, bufs, bufs.length - compactable.length, compactable.length); + } else { + compactable = bufs; + } + for (Buffer buf : compactable) { + buf.resetOffsets(); + } + computeBufferOffsets(); + if (writableBytes() >= size) { + // Now we have enough space. + return this; + } + } else if (bufs.length == 1) { + // If we only have a single component buffer, then we can safely compact that in-place. + bufs[0].compact(); + computeBufferOffsets(); + if (writableBytes() >= size) { + // Now we have enough space. + return this; + } + } + } + + int growth = Math.max(size - writableBytes(), minimumGrowth); + Statics.assertValidBufferSize(capacity() + (long) growth); + Buffer extension = allocator.allocate(growth); + unsafeExtendWith(extension); + return this; + } + + /** + * Extend this composite buffer with the given extension buffer. + * This works as if the extension had originally been included at the end of the list of constituent buffers when + * the composite buffer was created. + * The extension buffer is added to the end of this composite buffer, which is modified in-place. + * + * @see #compose(BufferAllocator, Send...) + * @param extension The buffer to extend the composite buffer with. + * @return This composite buffer instance. + */ + public CompositeBuffer extendWith(Send extension) { + Buffer buffer = Objects.requireNonNull(extension, "Extension buffer cannot be null.").receive(); + if (!isAccessible() || !isOwned()) { + buffer.close(); + if (!isAccessible()) { + throw bufferIsClosed(this); + } + throw new IllegalStateException("This buffer cannot be extended because it is not in an owned state."); + } + if (bufs.length > 0 && buffer.readOnly() != readOnly()) { + buffer.close(); + throw new IllegalArgumentException( + "This buffer is " + (readOnly? "read-only" : "writable") + ", " + + "and cannot be extended with a buffer that is " + + (buffer.readOnly()? "read-only." : "writable.")); + } + + long extensionCapacity = buffer.capacity(); + if (extensionCapacity == 0) { + // Extending by a zero-sized buffer makes no difference. Especially since it's not allowed to change the + // capacity of buffers that are constituents of composite buffers. + // This also ensures that methods like countComponents, and forEachReadable, do not have to worry about + // overflow in their component counters. + buffer.close(); + return this; + } + + long newSize = capacity() + extensionCapacity; + Statics.assertValidBufferSize(newSize); + + Buffer[] restoreTemp = bufs; // We need this to restore our buffer array, in case offset computations fail. + try { + if (buffer instanceof CompositeBuffer) { + // If the extension is itself a composite buffer, then extend this one by all the constituent + // component buffers. + CompositeBuffer compositeExtension = (CompositeBuffer) buffer; + Buffer[] addedBuffers = compositeExtension.bufs; + Set duplicatesCheck = Collections.newSetFromMap(new IdentityHashMap<>()); + duplicatesCheck.addAll(Arrays.asList(bufs)); + duplicatesCheck.addAll(Arrays.asList(addedBuffers)); + if (duplicatesCheck.size() < bufs.length + addedBuffers.length) { + throw extensionDuplicatesException(); + } + int extendAtIndex = bufs.length; + bufs = Arrays.copyOf(bufs, extendAtIndex + addedBuffers.length); + System.arraycopy(addedBuffers, 0, bufs, extendAtIndex, addedBuffers.length); + computeBufferOffsets(); + } else { + for (Buffer buf : restoreTemp) { + if (buf == buffer) { + throw extensionDuplicatesException(); + } + } + unsafeExtendWith(buffer); + } + if (restoreTemp.length == 0) { + readOnly = buffer.readOnly(); + } + } catch (Exception e) { + bufs = restoreTemp; + throw e; + } + return this; + } + + private static IllegalArgumentException extensionDuplicatesException() { + return new IllegalArgumentException( + "The composite buffer cannot be extended with the given extension," + + " as it would cause the buffer to have duplicate constituent buffers."); + } + + private void unsafeExtendWith(Buffer extension) { + bufs = Arrays.copyOf(bufs, bufs.length + 1); + bufs[bufs.length - 1] = extension; + computeBufferOffsets(); + } + + private void checkSplit(int splitOffset) { + if (splitOffset < 0) { + throw new IllegalArgumentException("The split offset cannot be negative: " + splitOffset + '.'); + } + if (capacity() < splitOffset) { + throw new IllegalArgumentException("The split offset cannot be greater than the buffer capacity, " + + "but the split offset was " + splitOffset + ", and capacity is " + capacity() + '.'); + } + if (!isAccessible()) { + throw attachTrace(bufferIsClosed(this)); + } + if (!isOwned()) { + throw new IllegalStateException("Cannot split a buffer that is not owned."); + } + } + + @Override + public CompositeBuffer split(int splitOffset) { + checkSplit(splitOffset); + if (bufs.length == 0) { + // Splitting a zero-length buffer is trivial. + return new CompositeBuffer(allocator, bufs, unsafeGetDrop()); + } + + int i = searchOffsets(splitOffset); + int off = splitOffset - offsets[i]; + Buffer[] splits = Arrays.copyOf(bufs, off == 0? i : 1 + i); + bufs = Arrays.copyOfRange(bufs, off == bufs[i].capacity()? 1 + i : i, bufs.length); + if (off > 0 && splits.length > 0 && off < splits[splits.length - 1].capacity()) { + splits[splits.length - 1] = bufs[0].split(off); + } + computeBufferOffsets(); + return buildSplitBuffer(splits); + } + + private CompositeBuffer buildSplitBuffer(Buffer[] splits) { + // TODO do we need to preserve read-only state of empty buffer? + return new CompositeBuffer(allocator, splits, unsafeGetDrop()); + } + + /** + * Split this buffer at a component boundary that is less than or equal to the given offset. + *

    + * This method behaves the same as {@link #split(int)}, except no components are split. + * + * @param splitOffset The maximum split offset. The real split offset will be at a component boundary that is less + * than or equal to this offset. + * @return A new buffer with independent and exclusive ownership over the bytes from the beginning to a component + * boundary less than or equal to the given offset of this buffer. + */ + public CompositeBuffer splitComponentsFloor(int splitOffset) { + checkSplit(splitOffset); + if (bufs.length == 0) { + // Splitting a zero-length buffer is trivial. + return new CompositeBuffer(allocator, bufs, unsafeGetDrop()); + } + + int i = searchOffsets(splitOffset); + int off = splitOffset - offsets[i]; + if (off == bufs[i].capacity()) { + i++; + } + Buffer[] splits = Arrays.copyOf(bufs, i); + bufs = Arrays.copyOfRange(bufs, i, bufs.length); + computeBufferOffsets(); + return buildSplitBuffer(splits); + } + + /** + * Split this buffer at a component boundary that is greater than or equal to the given offset. + *

    + * This method behaves the same as {@link #split(int)}, except no components are split. + * + * @param splitOffset The minimum split offset. The real split offset will be at a component boundary that is + * greater than or equal to this offset. + * @return A new buffer with independent and exclusive ownership over the bytes from the beginning to a component + * boundary greater than or equal to the given offset of this buffer. + */ + public CompositeBuffer splitComponentsCeil(int splitOffset) { + checkSplit(splitOffset); + if (bufs.length == 0) { + // Splitting a zero-length buffer is trivial. + return new CompositeBuffer(allocator, bufs, unsafeGetDrop()); + } + + int i = searchOffsets(splitOffset); + int off = splitOffset - offsets[i]; + if (0 < off && off <= bufs[i].capacity()) { + i++; + } + Buffer[] splits = Arrays.copyOf(bufs, i); + bufs = Arrays.copyOfRange(bufs, i, bufs.length); + computeBufferOffsets(); + return buildSplitBuffer(splits); + } + + @Override + public CompositeBuffer compact() { + if (!isOwned()) { + throw new IllegalStateException("Buffer must be owned in order to compact."); + } + if (readOnly()) { + throw new BufferReadOnlyException("Buffer must be writable in order to compact, but was read-only."); + } + int distance = roff; + if (distance == 0) { + return this; + } + int pos = 0; + // TODO maybe we can delegate to a copyInto method, once it's more optimised + var cursor = openCursor(); + while (cursor.readByte()) { + setByte(pos, cursor.getByte()); + pos++; + } + readerOffset(0); + writerOffset(woff - distance); + return this; + } + + @Override + public int countComponents() { + int sum = 0; + for (Buffer buf : bufs) { + sum += buf.countComponents(); + } + return sum; + } + + @Override + public int countReadableComponents() { + int sum = 0; + for (Buffer buf : bufs) { + sum += buf.countReadableComponents(); + } + return sum; + } + + @Override + public int countWritableComponents() { + int sum = 0; + for (Buffer buf : bufs) { + sum += buf.countWritableComponents(); + } + return sum; + } + + @Override + public int forEachReadable(int initialIndex, ReadableComponentProcessor processor) + throws E { + checkReadBounds(readerOffset(), Math.max(1, readableBytes())); + int visited = 0; + for (Buffer buf : bufs) { + if (buf.readableBytes() > 0) { + int count = buf.forEachReadable(visited + initialIndex, processor); + if (count > 0) { + visited += count; + } else { + visited = -visited + count; + break; + } + } + } + return visited; + } + + @Override + public int forEachWritable(int initialIndex, WritableComponentProcessor processor) + throws E { + checkWriteBounds(writerOffset(), Math.max(1, writableBytes())); + int visited = 0; + for (Buffer buf : bufs) { + if (buf.writableBytes() > 0) { + int count = buf.forEachWritable(visited + initialIndex, processor); + if (count > 0) { + visited += count; + } else { + visited = -visited + count; + break; + } + } + } + return visited; + } + + // + @Override + public byte readByte() { + return prepRead(Byte.BYTES).readByte(); + } + + @Override + public byte getByte(int roff) { + return prepGet(roff, Byte.BYTES).getByte(subOffset); + } + + @Override + public int readUnsignedByte() { + return prepRead(Byte.BYTES).readUnsignedByte(); + } + + @Override + public int getUnsignedByte(int roff) { + return prepGet(roff, Byte.BYTES).getUnsignedByte(subOffset); + } + + @Override + public CompositeBuffer writeByte(byte value) { + prepWrite(Byte.BYTES).writeByte(value); + return this; + } + + @Override + public CompositeBuffer setByte(int woff, byte value) { + prepWrite(woff, Byte.BYTES).setByte(subOffset, value); + return this; + } + + @Override + public CompositeBuffer writeUnsignedByte(int value) { + prepWrite(Byte.BYTES).writeUnsignedByte(value); + return this; + } + + @Override + public CompositeBuffer setUnsignedByte(int woff, int value) { + prepWrite(woff, Byte.BYTES).setUnsignedByte(subOffset, value); + return this; + } + + @Override + public char readChar() { + return prepRead(2).readChar(); + } + + @Override + public char getChar(int roff) { + return prepGet(roff, 2).getChar(subOffset); + } + + @Override + public CompositeBuffer writeChar(char value) { + prepWrite(2).writeChar(value); + return this; + } + + @Override + public CompositeBuffer setChar(int woff, char value) { + prepWrite(woff, 2).setChar(subOffset, value); + return this; + } + + @Override + public short readShort() { + return prepRead(Short.BYTES).readShort(); + } + + @Override + public short getShort(int roff) { + return prepGet(roff, Short.BYTES).getShort(subOffset); + } + + @Override + public int readUnsignedShort() { + return prepRead(Short.BYTES).readShort(); + } + + @Override + public int getUnsignedShort(int roff) { + return prepGet(roff, Short.BYTES).getUnsignedShort(subOffset); + } + + @Override + public CompositeBuffer writeShort(short value) { + prepWrite(Short.BYTES).writeShort(value); + return this; + } + + @Override + public CompositeBuffer setShort(int woff, short value) { + prepWrite(woff, Short.BYTES).setShort(subOffset, value); + return this; + } + + @Override + public CompositeBuffer writeUnsignedShort(int value) { + prepWrite(Short.BYTES).writeUnsignedShort(value); + return this; + } + + @Override + public CompositeBuffer setUnsignedShort(int woff, int value) { + prepWrite(woff, Short.BYTES).setUnsignedShort(subOffset, value); + return this; + } + + @Override + public int readMedium() { + return prepRead(3).readMedium(); + } + + @Override + public int getMedium(int roff) { + return prepGet(roff, 3).getMedium(subOffset); + } + + @Override + public int readUnsignedMedium() { + return prepRead(3).readMedium(); + } + + @Override + public int getUnsignedMedium(int roff) { + return prepGet(roff, 3).getMedium(subOffset); + } + + @Override + public CompositeBuffer writeMedium(int value) { + prepWrite(3).writeMedium(value); + return this; + } + + @Override + public CompositeBuffer setMedium(int woff, int value) { + prepWrite(woff, 3).setMedium(subOffset, value); + return this; + } + + @Override + public CompositeBuffer writeUnsignedMedium(int value) { + prepWrite(3).writeUnsignedMedium(value); + return this; + } + + @Override + public CompositeBuffer setUnsignedMedium(int woff, int value) { + prepWrite(woff, 3).setUnsignedMedium(subOffset, value); + return this; + } + + @Override + public int readInt() { + return prepRead(Integer.BYTES).readInt(); + } + + @Override + public int getInt(int roff) { + return prepGet(roff, Integer.BYTES).getInt(subOffset); + } + + @Override + public long readUnsignedInt() { + return prepRead(Integer.BYTES).readUnsignedInt(); + } + + @Override + public long getUnsignedInt(int roff) { + return prepGet(roff, Integer.BYTES).getUnsignedInt(subOffset); + } + + @Override + public CompositeBuffer writeInt(int value) { + prepWrite(Integer.BYTES).writeInt(value); + return this; + } + + @Override + public CompositeBuffer setInt(int woff, int value) { + prepWrite(woff, Integer.BYTES).setInt(subOffset, value); + return this; + } + + @Override + public CompositeBuffer writeUnsignedInt(long value) { + prepWrite(Integer.BYTES).writeUnsignedInt(value); + return this; + } + + @Override + public CompositeBuffer setUnsignedInt(int woff, long value) { + prepWrite(woff, Integer.BYTES).setUnsignedInt(subOffset, value); + return this; + } + + @Override + public float readFloat() { + return prepRead(Float.BYTES).readFloat(); + } + + @Override + public float getFloat(int roff) { + return prepGet(roff, Float.BYTES).getFloat(subOffset); + } + + @Override + public CompositeBuffer writeFloat(float value) { + prepWrite(Float.BYTES).writeFloat(value); + return this; + } + + @Override + public CompositeBuffer setFloat(int woff, float value) { + prepWrite(woff, Float.BYTES).setFloat(subOffset, value); + return this; + } + + @Override + public long readLong() { + return prepRead(Long.BYTES).readLong(); + } + + @Override + public long getLong(int roff) { + return prepGet(roff, Long.BYTES).getLong(subOffset); + } + + @Override + public CompositeBuffer writeLong(long value) { + prepWrite(Long.BYTES).writeLong(value); + return this; + } + + @Override + public CompositeBuffer setLong(int woff, long value) { + prepWrite(woff, Long.BYTES).setLong(subOffset, value); + return this; + } + + @Override + public double readDouble() { + return prepRead(Double.BYTES).readDouble(); + } + + @Override + public double getDouble(int roff) { + return prepGet(roff, Double.BYTES).getDouble(subOffset); + } + + @Override + public CompositeBuffer writeDouble(double value) { + prepWrite(Double.BYTES).writeDouble(value); + return this; + } + + @Override + public CompositeBuffer setDouble(int woff, double value) { + prepWrite(woff, Double.BYTES).setDouble(subOffset, value); + return this; + } + // + + @Override + protected Owned prepareSend() { + @SuppressWarnings("unchecked") + Send[] sends = new Send[bufs.length]; + try { + for (int i = 0; i < bufs.length; i++) { + sends[i] = bufs[i].send(); + } + } catch (Throwable throwable) { + // Repair our bufs array. + for (int i = 0; i < sends.length; i++) { + if (sends[i] != null) { + try { + bufs[i] = sends[i].receive(); + } catch (Exception e) { + throwable.addSuppressed(e); + } + } + } + throw throwable; + } + boolean readOnly = this.readOnly; + makeInaccessible(); + return new Owned() { + @Override + public CompositeBuffer transferOwnership(Drop drop) { + Buffer[] received = new Buffer[sends.length]; + for (int i = 0; i < sends.length; i++) { + received[i] = sends[i].receive(); + } + var composite = new CompositeBuffer(allocator, received, drop); + composite.readOnly = readOnly; + drop.attach(composite); + return composite; + } + }; + } + + void makeInaccessible() { + capacity = 0; + roff = 0; + woff = 0; + readOnly = false; + closed = true; + } + + @Override + protected boolean isOwned() { + return super.isOwned() && allConstituentsAreOwned(); + } + + private boolean allConstituentsAreOwned() { + boolean result = true; + for (Buffer buf : bufs) { + result &= Statics.isOwned((ResourceSupport) buf); + } + return result; + } + + long readPassThrough() { + var buf = choosePassThroughBuffer(subOffset++); + assert buf != tornBufAccessors: "Recursive call to torn buffer."; + return buf.readUnsignedByte(); + } + + void writePassThrough(int value) { + var buf = choosePassThroughBuffer(subOffset++); + assert buf != tornBufAccessors: "Recursive call to torn buffer."; + buf.writeUnsignedByte(value); + } + + long getPassThrough(int roff) { + var buf = chooseBuffer(roff, 1); + assert buf != tornBufAccessors: "Recursive call to torn buffer."; + return buf.getUnsignedByte(subOffset); + } + + void setPassThrough(int woff, int value) { + var buf = chooseBuffer(woff, 1); + assert buf != tornBufAccessors: "Recursive call to torn buffer."; + buf.setUnsignedByte(subOffset, value); + } + + private BufferAccessor prepRead(int size) { + var buf = prepRead(roff, size); + roff += size; + return buf; + } + + private BufferAccessor prepRead(int index, int size) { + checkReadBounds(index, size); + return chooseBuffer(index, size); + } + + private void checkReadBounds(int index, int size) { + if (index < 0 || woff < index + size) { + throw indexOutOfBounds(index, false); + } + } + + private BufferAccessor prepGet(int index, int size) { + checkGetBounds(index, size); + return chooseBuffer(index, size); + } + + private void checkGetBounds(int index, int size) { + if (index < 0 || capacity < index + size) { + throw indexOutOfBounds(index, false); + } + } + + private BufferAccessor prepWrite(int size) { + var buf = prepWrite(woff, size); + woff += size; + return buf; + } + + private BufferAccessor prepWrite(int index, int size) { + checkWriteBounds(index, size); + return chooseBuffer(index, size); + } + + private void checkWriteBounds(int index, int size) { + if (index < 0 || capacity < index + size) { + throw indexOutOfBounds(index, true); + } + } + + private RuntimeException indexOutOfBounds(int index, boolean write) { + if (closed) { + return bufferIsClosed(this); + } + if (write && readOnly) { + return bufferIsReadOnly(this); + } + return new IndexOutOfBoundsException( + "Index " + index + " is out of bounds: [read 0 to " + woff + ", write 0 to " + + capacity + "]."); + } + + private BufferAccessor chooseBuffer(int index, int size) { + int i = searchOffsets(index); + if (i == bufs.length) { + // This happens when the read/write offsets are parked 1 byte beyond the end of the buffer. + // In that case it should not matter what buffer is returned, because it shouldn't be used anyway. + return null; + } + int off = index - offsets[i]; + Buffer candidate = bufs[i]; + if (off + size <= candidate.capacity()) { + subOffset = off; + return candidate; + } + subOffset = index; + return tornBufAccessors; + } + + private BufferAccessor choosePassThroughBuffer(int index) { + int i = searchOffsets(index); + return bufs[i]; + } + + private int searchOffsets(int index) { + int i = Arrays.binarySearch(offsets, index); + return i < 0? -(i + 2) : i; + } + + // + private static final class TornBufferAccessor implements BufferAccessor { + private final CompositeBuffer buf; + + private TornBufferAccessor(CompositeBuffer buf) { + this.buf = buf; + } + + @Override + public byte readByte() { + throw new AssertionError("Method should not be used."); + } + + @Override + public byte getByte(int roff) { + throw new AssertionError("Method should not be used."); + } + + @Override + public int readUnsignedByte() { + throw new AssertionError("Method should not be used."); + } + + @Override + public int getUnsignedByte(int roff) { + throw new AssertionError("Method should not be used."); + } + + @Override + public Buffer writeByte(byte value) { + throw new AssertionError("Method should not be used."); + } + + @Override + public Buffer setByte(int woff, byte value) { + throw new AssertionError("Method should not be used."); + } + + @Override + public Buffer writeUnsignedByte(int value) { + throw new AssertionError("Method should not be used."); + } + + @Override + public Buffer setUnsignedByte(int woff, int value) { + throw new AssertionError("Method should not be used."); + } + + @Override + public char readChar() { + return (char) (read() << 8 | read()); + } + + @Override + public char getChar(int roff) { + return (char) (read(roff) << 8 | read(roff + 1)); + } + + @Override + public Buffer writeChar(char value) { + write(value >>> 8); + write(value & 0xFF); + return buf; + } + + @Override + public Buffer setChar(int woff, char value) { + write(woff, value >>> 8); + write(woff + 1, value & 0xFF); + return buf; + } + + @Override + public short readShort() { + return (short) (read() << 8 | read()); + } + + @Override + public short getShort(int roff) { + return (short) (read(roff) << 8 | read(roff + 1)); + } + + @Override + public int readUnsignedShort() { + return (int) (read() << 8 | read()) & 0xFFFF; + } + + @Override + public int getUnsignedShort(int roff) { + return (int) (read(roff) << 8 | read(roff + 1)) & 0xFFFF; + } + + @Override + public Buffer writeShort(short value) { + write(value >>> 8); + write(value & 0xFF); + return buf; + } + + @Override + public Buffer setShort(int woff, short value) { + write(woff, value >>> 8); + write(woff + 1, value & 0xFF); + return buf; + } + + @Override + public Buffer writeUnsignedShort(int value) { + write(value >>> 8); + write(value & 0xFF); + return buf; + } + + @Override + public Buffer setUnsignedShort(int woff, int value) { + write(woff, value >>> 8); + write(woff + 1, value & 0xFF); + return buf; + } + + @Override + public int readMedium() { + return (int) (read() << 16 | read() << 8 | read()); + } + + @Override + public int getMedium(int roff) { + return (int) (read(roff) << 16 | read(roff + 1) << 8 | read(roff + 2)); + } + + @Override + public int readUnsignedMedium() { + return (int) (read() << 16 | read() << 8 | read()) & 0xFFFFFF; + } + + @Override + public int getUnsignedMedium(int roff) { + return (int) (read(roff) << 16 | read(roff + 1) << 8 | read(roff + 2)) & 0xFFFFFF; + } + + @Override + public Buffer writeMedium(int value) { + write(value >>> 16); + write(value >>> 8 & 0xFF); + write(value & 0xFF); + return buf; + } + + @Override + public Buffer setMedium(int woff, int value) { + write(woff, value >>> 16); + write(woff + 1, value >>> 8 & 0xFF); + write(woff + 2, value & 0xFF); + return buf; + } + + @Override + public Buffer writeUnsignedMedium(int value) { + write(value >>> 16); + write(value >>> 8 & 0xFF); + write(value & 0xFF); + return buf; + } + + @Override + public Buffer setUnsignedMedium(int woff, int value) { + write(woff, value >>> 16); + write(woff + 1, value >>> 8 & 0xFF); + write(woff + 2, value & 0xFF); + return buf; + } + + @Override + public int readInt() { + return (int) (read() << 24 | read() << 16 | read() << 8 | read()); + } + + @Override + public int getInt(int roff) { + return (int) (read(roff) << 24 | read(roff + 1) << 16 | read(roff + 2) << 8 | read(roff + 3)); + } + + @Override + public long readUnsignedInt() { + return (read() << 24 | read() << 16 | read() << 8 | read()) & 0xFFFFFFFFL; + } + + @Override + public long getUnsignedInt(int roff) { + return (read(roff) << 24 | read(roff + 1) << 16 | read(roff + 2) << 8 | read(roff + 3)) & 0xFFFFFFFFL; + } + + @Override + public Buffer writeInt(int value) { + write(value >>> 24); + write(value >>> 16 & 0xFF); + write(value >>> 8 & 0xFF); + write(value & 0xFF); + return buf; + } + + @Override + public Buffer setInt(int woff, int value) { + write(woff, value >>> 24); + write(woff + 1, value >>> 16 & 0xFF); + write(woff + 2, value >>> 8 & 0xFF); + write(woff + 3, value & 0xFF); + return buf; + } + + @Override + public Buffer writeUnsignedInt(long value) { + write((int) (value >>> 24)); + write((int) (value >>> 16 & 0xFF)); + write((int) (value >>> 8 & 0xFF)); + write((int) (value & 0xFF)); + return buf; + } + + @Override + public Buffer setUnsignedInt(int woff, long value) { + write(woff, (int) (value >>> 24)); + write(woff + 1, (int) (value >>> 16 & 0xFF)); + write(woff + 2, (int) (value >>> 8 & 0xFF)); + write(woff + 3, (int) (value & 0xFF)); + return buf; + } + + @Override + public float readFloat() { + return Float.intBitsToFloat(readInt()); + } + + @Override + public float getFloat(int roff) { + return Float.intBitsToFloat(getInt(roff)); + } + + @Override + public Buffer writeFloat(float value) { + return writeUnsignedInt(Float.floatToRawIntBits(value)); + } + + @Override + public Buffer setFloat(int woff, float value) { + return setUnsignedInt(woff, Float.floatToRawIntBits(value)); + } + + @Override + public long readLong() { + return read() << 56 | read() << 48 | read() << 40 | read() << 32 | + read() << 24 | read() << 16 | read() << 8 | read(); + } + + @Override + public long getLong(int roff) { + return read(roff) << 56 | read(roff + 1) << 48 | read(roff + 2) << 40 | read(roff + 3) << 32 | + read(roff + 4) << 24 | read(roff + 5) << 16 | read(roff + 6) << 8 | read(roff + 7); + } + + @Override + public Buffer writeLong(long value) { + write((int) (value >>> 56)); + write((int) (value >>> 48 & 0xFF)); + write((int) (value >>> 40 & 0xFF)); + write((int) (value >>> 32 & 0xFF)); + write((int) (value >>> 24 & 0xFF)); + write((int) (value >>> 16 & 0xFF)); + write((int) (value >>> 8 & 0xFF)); + write((int) (value & 0xFF)); + return buf; + } + + @Override + public Buffer setLong(int woff, long value) { + write(woff, (int) (value >>> 56)); + write(woff + 1, (int) (value >>> 48 & 0xFF)); + write(woff + 2, (int) (value >>> 40 & 0xFF)); + write(woff + 3, (int) (value >>> 32 & 0xFF)); + write(woff + 4, (int) (value >>> 24 & 0xFF)); + write(woff + 5, (int) (value >>> 16 & 0xFF)); + write(woff + 6, (int) (value >>> 8 & 0xFF)); + write(woff + 7, (int) (value & 0xFF)); + return buf; + } + + @Override + public double readDouble() { + return Double.longBitsToDouble(readLong()); + } + + @Override + public double getDouble(int roff) { + return Double.longBitsToDouble(getLong(roff)); + } + + @Override + public Buffer writeDouble(double value) { + return writeLong(Double.doubleToRawLongBits(value)); + } + + @Override + public Buffer setDouble(int woff, double value) { + return setLong(woff, Double.doubleToRawLongBits(value)); + } + + private long read() { + return buf.readPassThrough(); + } + + private void write(int value) { + buf.writePassThrough(value); + } + + private long read(int roff) { + return buf.getPassThrough(roff); + } + + private void write(int woff, int value) { + buf.setPassThrough(woff, value); + } + } + // +} diff --git a/buffer/src/main/java/io/netty/buffer/api/Drop.java b/buffer/src/main/java/io/netty/buffer/api/Drop.java new file mode 100644 index 00000000000..95f61a3c733 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/Drop.java @@ -0,0 +1,40 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +/** + * An interface used by {@link Resource} instances to implement their resource disposal mechanics. + * The {@link #drop(Object)} method will be called by the resource when they are closed. + * + * @param The type of resource that can be dropped. + */ +@FunctionalInterface +public interface Drop { + /** + * Dispose of the resources in the given {@link Resource} instance. + * + * @param obj The {@link Resource} instance being dropped. + */ + void drop(T obj); + + /** + * Called when the resource changes owner. + * + * @param obj The new {@link Resource} instance with the new owner. + */ + default void attach(T obj) { + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/ManagedBufferAllocator.java b/buffer/src/main/java/io/netty/buffer/api/ManagedBufferAllocator.java new file mode 100644 index 00000000000..d47e1574937 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/ManagedBufferAllocator.java @@ -0,0 +1,77 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import io.netty.buffer.api.internal.Statics; + +import java.util.function.Supplier; + +import static io.netty.buffer.api.internal.Statics.NO_OP_DROP; +import static io.netty.buffer.api.internal.Statics.allocatorClosedException; + +class ManagedBufferAllocator implements BufferAllocator, AllocatorControl { + private final MemoryManager manager; + private final AllocationType allocationType; + private volatile boolean closed; + + ManagedBufferAllocator(MemoryManager manager, boolean direct) { + this.manager = manager; + allocationType = direct? StandardAllocationTypes.OFF_HEAP : StandardAllocationTypes.ON_HEAP; + } + + @Override + public Buffer allocate(int size) { + if (closed) { + throw allocatorClosedException(); + } + Statics.assertValidBufferSize(size); + return manager.allocateShared(this, size, manager.drop(), Statics.CLEANER, allocationType); + } + + @Override + public Supplier constBufferSupplier(byte[] bytes) { + if (closed) { + throw allocatorClosedException(); + } + Buffer constantBuffer = manager.allocateShared( + this, bytes.length, manager.drop(), Statics.CLEANER, allocationType); + constantBuffer.writeBytes(bytes).makeReadOnly(); + return () -> manager.allocateConstChild(constantBuffer); + } + + @Override + public void close() { + closed = true; + } + + @SuppressWarnings("unchecked") + @Override + public UntetheredMemory allocateUntethered(Buffer originator, int size) { + Statics.assertValidBufferSize(size); + var buf = manager.allocateShared(this, size, NO_OP_DROP, Statics.CLEANER, allocationType); + return new UntetheredMemory() { + @Override + public Memory memory() { + return (Memory) manager.unwrapRecoverableMemory(buf); + } + + @Override + public Drop drop() { + return (Drop) manager.drop(); + } + }; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/MemoryManager.java b/buffer/src/main/java/io/netty/buffer/api/MemoryManager.java new file mode 100644 index 00000000000..060b0a38596 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/MemoryManager.java @@ -0,0 +1,175 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import io.netty.buffer.api.internal.MemoryManagerLoader; +import io.netty.buffer.api.internal.MemoryManagerOverride; +import io.netty.util.internal.UnstableApi; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.lang.ref.Cleaner; +import java.util.Optional; +import java.util.ServiceConfigurationError; +import java.util.ServiceLoader.Provider; +import java.util.function.Supplier; +import java.util.stream.Stream; + +/** + * The choice of {@code MemoryManager} implementation also determines the choice of {@link Buffer} implementation. + * It is the MemoryManager that implement memory allocation, and how to wrap the allocated memory in a {@link Buffer} + * interface. + * + * @apiNote This is a low-level, {@linkplain UnstableApi unstable}, API that is used for + * {@link BufferAllocator BufferAllocator} implementations to build upon. + * The methods in this interface are unsafe, because they can be used to violate the safety guarantees of the + * {@link Buffer} API, and potentially also the safety guarantees of the JVM. + */ +@UnstableApi +public interface MemoryManager { + /** + * Get the default, or currently configured, memory managers instance. + * @return A MemoryManagers instance. + */ + static MemoryManager instance() { + return MemoryManagerOverride.configuredOrDefaultManager(); + } + + /** + * Temporarily override the default configured memory managers instance. + *

    + * Calls to {@link #instance()} from within the given supplier will get the given managers instance. + * + * @param managers Override the default configured managers instance with this instance. + * @param supplier The supplier function to be called while the override is in place. + * @param The result type from the supplier. + * @return The result from the supplier. + */ + static T using(MemoryManager managers, Supplier supplier) { + return MemoryManagerOverride.using(managers, supplier); + } + + /** + * Get a lazy-loading stream of all available memory managers. + * + * @return A stream of providers of memory managers instances. + */ + static Stream> availableManagers() { + return MemoryManagerLoader.stream(); + } + + /** + * Find a {@link MemoryManager} implementation by its {@linkplain #implementationName() implementation name}. + * + * @param implementationName The named implementation to look for. + * @return A {@link MemoryManager} implementation, if any was found. + */ + static Optional lookupImplementation(String implementationName) { + return availableManagers() + .flatMap(provider -> { + try { + return Stream.ofNullable(provider.get()); + } catch (ServiceConfigurationError | Exception e) { + InternalLogger logger = InternalLoggerFactory.getInstance(MemoryManager.class); + if (logger.isTraceEnabled()) { + logger.debug("Failed to load a MemoryManager implementation.", e); + } else { + logger.debug("Failed to load a MemoryManager implementation: " + e.getMessage()); + } + return Stream.empty(); + } + }) + .filter(impl -> implementationName.equals(impl.implementationName())) + .findFirst(); + } + + /** + * Allocates a shared buffer. "Shared" is the normal type of buffer, and means the buffer permit concurrent access + * from multiple threads, within the limited thread-safety guarantees of the {@link Buffer} interface. + * + * @param allocatorControl Call-back interface for controlling the {@linkplain BufferAllocator allocator} that + * requested the allocation of this buffer. + * @param size The size of the buffer to allocate. This size is assumed to be valid for the implementation. + * @param drop The {@link Drop} instance to use when the buffer is {@linkplain Resource#close() closed}. + * @param cleaner The {@link Cleaner} that the underlying memory should be attached to. Can be {@code null}. + * @param allocationType The type of allocation to perform. + * Typically, one of the {@linkplain StandardAllocationTypes}. + * @return A {@link Buffer} instance with the given configuration. + * @throws IllegalArgumentException For unknown {@link AllocationType}s. + */ + Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop drop, Cleaner cleaner, + AllocationType allocationType); + + /** + * Allocates a constant buffer based on the given parent. A "constant" buffer is conceptually similar to a read-only + * buffer, but the implementation may share the underlying memory across multiple buffer instance - something that + * is normally not allowed by the API. This allows efficient implementation of the + * {@link BufferAllocator#constBufferSupplier(byte[])} method. + *

    + * Note: the const-parent buffer must be allocated by this memory manager. + * + * @param readOnlyConstParent The read-only parent buffer for which a const buffer should be created. The parent + * buffer is allocated in the usual way, with + * {@link #allocateShared(AllocatorControl, long, Drop, Cleaner, AllocationType)}, + * initialised with contents, and then made {@linkplain Buffer#makeReadOnly() read-only}. + * @return A const buffer with the same size, contents, and read-only state of the given parent buffer. + */ + Buffer allocateConstChild(Buffer readOnlyConstParent); + + /** + * The buffer implementation-specific {@link Drop} implementation that will release the underlying memory. + * + * @return A new drop instance. + */ + Drop drop(); + + /** + * Create an object that represents the internal memory of the given buffer. + * + * @param buf The buffer to unwrap. + * @return The internal memory of the given buffer, as an opaque object. + */ + Object unwrapRecoverableMemory(Buffer buf); + + /** + * Recover the memory from a prior {@link #unwrapRecoverableMemory(Buffer)} call, and wrap it in a {@link Buffer} + * instance. + * + * @param allocatorControl The allocator control to attach to the buffer. + * @param recoverableMemory The opaque memory to use for the buffer. + * @param drop The {@link Drop} instance to use when the buffer is {@linkplain Resource#close() closed}. + * @return A {@link Buffer} instance backed by the given recovered memory. + */ + Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop drop); + + /** + * Produces a slice of the given internal memory representation object. + * + * @param memory The opaque memory to slice. + * @param offset The offset into the memory to slice from. + * @param length The length of the slice. + * @return A new opaque memory instance that represents the given slice of the original. + */ + Object sliceMemory(Object memory, int offset, int length); + + /** + * Get the name for this implementation, which can be used for finding this particular implementation via the + * {@link #lookupImplementation(String)} method. + * + * @return The name of this memory managers implementation. + */ + String implementationName(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/Owned.java b/buffer/src/main/java/io/netty/buffer/api/Owned.java new file mode 100644 index 00000000000..c7d8548c8a7 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/Owned.java @@ -0,0 +1,38 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +/** + * This interface encapsulates the ownership of a {@link Resource}, and exposes a method that may be used to transfer + * this ownership to the specified recipient thread. + * + * @param The concrete type of {@link Resource} that is owned. + */ +@SuppressWarnings("InterfaceMayBeAnnotatedFunctional") +public interface Owned { + /** + * Transfer the ownership of the resource, to the calling thread. The resource instance is invalidated but without + * disposing of its internal state. Then a new resource instance with the given owner is produced in its stead. + *

    + * This method is called by {@link Send} implementations. These implementations will ensure that the transfer of + * ownership (the calling of this method) happens-before the new owner begins accessing the new object. This ensures + * that the new resource instance is safely published to the new owners. + * + * @param drop The drop object that knows how to dispose of the state represented by this {@link Resource}. + * @return A new resource instance that is exactly the same as this resource. + */ + T transferOwnership(Drop drop); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/ReadableComponent.java b/buffer/src/main/java/io/netty/buffer/api/ReadableComponent.java new file mode 100644 index 00000000000..67d5b6d1f92 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/ReadableComponent.java @@ -0,0 +1,100 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import java.nio.ByteBuffer; + +/** + * A view onto the buffer component being processed in a given iteration of + * {@link Buffer#forEachReadable(int, ReadableComponentProcessor)}. + */ +public interface ReadableComponent { + + /** + * Check if this component is backed by a cached byte array that can be accessed cheaply. + *

    + * Note that regardless of what this method returns, the array should not be used to modify the + * contents of this buffer component. + * + * @return {@code true} if {@link #readableArray()} is a cheap operation, otherwise {@code false}. + */ + boolean hasReadableArray(); + + /** + * Get a byte array of the contents of this component. + *

    + * Note that the array is meant to be read-only. It may either be a direct reference to the + * concrete array instance that is backing this component, or it is a fresh copy. Writing to the array may produce + * undefined behaviour. + * + * @return A byte array of the contents of this component. + * @throws UnsupportedOperationException if {@link #hasReadableArray()} returns {@code false}. + * @see #readableArrayOffset() + * @see #readableArrayLength() + */ + byte[] readableArray(); + + /** + * An offset into the {@link #readableArray()} where this component starts. + * + * @return An offset into {@link #readableArray()}. + * @throws UnsupportedOperationException if {@link #hasReadableArray()} returns {@code false}. + */ + int readableArrayOffset(); + + /** + * The number of bytes in the {@link #readableArray()} that belong to this component. + * + * @return The number of bytes, from the {@link #readableArrayOffset()} into the {@link #readableArray()}, + * that belong to this component. + * @throws UnsupportedOperationException if {@link #hasReadableArray()} returns {@code false}. + */ + int readableArrayLength(); + + /** + * Give the native memory address backing this buffer, or return 0 if this buffer has no native memory address. + *

    + * Note that the address should not be used for writing to the buffer memory, and doing so may + * produce undefined behaviour. + * + * @return The native memory address, if any, otherwise 0. + */ + long readableNativeAddress(); + + /** + * Get a {@link ByteBuffer} instance for this memory component. + *

    + * Note that the {@link ByteBuffer} is read-only, to prevent write accesses to the memory, + * when the buffer component is obtained through {@link Buffer#forEachReadable(int, ReadableComponentProcessor)}. + * + * @return A new {@link ByteBuffer}, with its own position and limit, for this memory component. + */ + ByteBuffer readableBuffer(); + + /** + * Open a cursor to iterate the readable bytes of this component. + * Any offsets internal to the component are not modified by the cursor. + *

    + * Care should be taken to ensure that the buffers lifetime extends beyond the cursor and the iteration, and that + * the internal offsets of the component (such as {@link Buffer#readerOffset()} and {@link Buffer#writerOffset()}) + * are not modified while the iteration takes place. Otherwise unpredictable behaviour might result. + * + * @return A {@link ByteCursor} for iterating the readable bytes of this buffer. + * @see Buffer#openCursor() + */ + ByteCursor openCursor(); + // todo for Unsafe-based impl, DBB.attachment needs to keep underlying memory alive +} diff --git a/buffer/src/main/java/io/netty/buffer/api/ReadableComponentProcessor.java b/buffer/src/main/java/io/netty/buffer/api/ReadableComponentProcessor.java new file mode 100644 index 00000000000..50b6e756b9a --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/ReadableComponentProcessor.java @@ -0,0 +1,40 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import java.nio.ByteBuffer; + +/** + * A processor of {@linkplain ReadableComponent readable components}. + */ +@FunctionalInterface +public interface ReadableComponentProcessor { + /** + * Process the given component at the given index in the + * {@link Buffer#forEachReadable(int, ReadableComponentProcessor) iteration}. + *

    + * The component object itself is only valid during this call, but the {@link ByteBuffer byte buffers}, arrays, and + * native address pointers obtained from it, will be valid until any operation is performed on the buffer, which + * changes the internal memory. + * + * @param index The current index of the given buffer component, based on the initial index passed to the + * {@link Buffer#forEachReadable(int, ReadableComponentProcessor)} method. + * @param component The current buffer component being processed. + * @return {@code true} if the iteration should continue and more components should be processed, otherwise + * {@code false} to stop the iteration early. + */ + boolean process(int index, ReadableComponent component) throws E; +} diff --git a/buffer/src/main/java/io/netty/buffer/api/Resource.java b/buffer/src/main/java/io/netty/buffer/api/Resource.java new file mode 100644 index 00000000000..49b4931ef6f --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/Resource.java @@ -0,0 +1,56 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +/** + * A resource that has a life-time, and can be {@linkplain #close() closed}. + * Resources are initially {@linkplain #isAccessible() accessible}, but closing them makes them inaccessible. + */ +public interface Resource> extends AutoCloseable { + /** + * Send this object instance to another Thread, transferring the ownership to the recipient. + *

    + * The object must be in a state where it can be sent, which includes at least being + * {@linkplain #isAccessible() accessible}. + *

    + * When sent, this instance will immediately become inaccessible, as if by {@linkplain #close() closing} it. + * All attempts at accessing an object that has been sent, even if that object has not yet been received, should + * cause an exception to be thrown. + *

    + * Calling {@link #close()} on an object that has been sent will have no effect, so this method is safe to call + * within a try-with-resources statement. + */ + Send send(); + + /** + * Close the resource, making it inaccessible. + *

    + * Note, this method is not thread-safe unless otherwise specified. + * + * @throws IllegalStateException If this {@code Resource} has already been closed. + */ + @Override + void close(); + + /** + * Check if this object is accessible. + * + * @return {@code true} if this object is still valid and can be accessed, + * otherwise {@code false} if, for instance, this object has been dropped/deallocated, + * or been {@linkplain #send() sent} elsewhere. + */ + boolean isAccessible(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/Send.java b/buffer/src/main/java/io/netty/buffer/api/Send.java new file mode 100644 index 00000000000..1ef6977e197 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/Send.java @@ -0,0 +1,103 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import io.netty.buffer.api.internal.SendFromSupplier; + +import java.util.function.Function; +import java.util.function.Supplier; + +/** + * A temporary holder of a {@link Resource}, used for transferring the ownership of the + * resource from one thread to another. + *

    + * Prior to the {@code Send} being created, the originating resource is invalidated, to prevent access while it is being + * sent. This means it cannot be accessed, closed, or disposed of, while it is in-flight. Once the resource is + * {@linkplain #receive() received}, the new ownership is established. + *

    + * Care must be taken to ensure that the resource is always received by some thread. + * Failure to do so can result in a resource leak. + * + * @param + */ +public interface Send> extends AutoCloseable { + /** + * Construct a {@link Send} based on the given {@link Supplier}. The supplier will be called only once, in the + * receiving thread. + * + * @param concreteObjectType The concrete type of the object being sent. Specifically, the object returned from the + * {@link Supplier#get()} method must be an instance of this class. + * @param supplier The supplier of the object being sent, which will be called when the object is ready to + * be received. + * @param The type of object being sent. + * @return A {@link Send} which will deliver an object of the given type, from the supplier. + */ + static > Send sending(Class concreteObjectType, Supplier supplier) { + return new SendFromSupplier<>(concreteObjectType, supplier); + } + + /** + * Determine if the given candidate object is an instance of a {@link Send} from which an object of the given type + * can be received. + * + * @param type The type of object we wish to receive. + * @param candidate The candidate object that might be a {@link Send} of an object of the given type. + * @return {@code true} if the candidate object is a {@link Send} that would deliver an object of the given type, + * otherwise {@code false}. + */ + static boolean isSendOf(Class type, Object candidate) { + return candidate instanceof Send && ((Send) candidate).referentIsInstanceOf(type); + } + + /** + * Receive the {@link Resource} instance being sent, and bind its ownership to the calling thread. + * The invalidation of the sent resource in the sending thread happens-before the return of this method. + *

    + * This method can only be called once, and will throw otherwise. + * + * @return The sent resource instance, never {@code null}. + * @throws IllegalStateException If this method is called more than once. + */ + T receive(); + + /** + * Apply a mapping function to the object being sent. The mapping will occur when the object is received. + * + * @param type The result type of the mapping function. + * @param mapper The mapping function to apply to the object being sent. + * @param The result type of the mapping function. + * @return A new {@link Send} instance that will deliver an object that is the result of the mapping. + */ + default > Send map(Class type, Function mapper) { + return sending(type, () -> mapper.apply(receive())); + } + + /** + * Discard this {@link Send} and the object it contains. + * This has no effect if the send-object has already been received. + */ + @Override + void close(); + + /** + * Determine if the object received from this {@code Send} is an instance of the given class. + * + * @param cls The type to check. + * @return {@code true} if the object received from this {@code Send} can be assigned fields or variables of the + * given type, otherwise false. + */ + boolean referentIsInstanceOf(Class cls); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/StandardAllocationTypes.java b/buffer/src/main/java/io/netty/buffer/api/StandardAllocationTypes.java new file mode 100644 index 00000000000..e3ce0103fc5 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/StandardAllocationTypes.java @@ -0,0 +1,31 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +/** + * Standard implementations of {@link AllocationType} that all {@linkplain MemoryManager memory managers} should + * support. + */ +public enum StandardAllocationTypes implements AllocationType { + /** + * The allocation should use Java heap memory. + */ + ON_HEAP, + /** + * The allocation should use native (non-heap) memory. + */ + OFF_HEAP +} diff --git a/buffer/src/main/java/io/netty/buffer/api/WritableComponent.java b/buffer/src/main/java/io/netty/buffer/api/WritableComponent.java new file mode 100644 index 00000000000..d4a1fa17f26 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/WritableComponent.java @@ -0,0 +1,74 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import java.nio.ByteBuffer; + +/** + * A view onto the buffer component being processed in a given iteration of + * {@link Buffer#forEachWritable(int, WritableComponentProcessor)}. + */ +public interface WritableComponent { + + /** + * Check if this component is backed by a cached byte array that can be accessed cheaply. + * + * @return {@code true} if {@link #writableArray()} is a cheap operation, otherwise {@code false}. + */ + boolean hasWritableArray(); + + /** + * Get a byte array of the contents of this component. + * + * @return A byte array of the contents of this component. + * @throws UnsupportedOperationException if {@link #hasWritableArray()} returns {@code false}. + * @see #writableArrayOffset() + * @see #writableArrayLength() + */ + byte[] writableArray(); + + /** + * An offset into the {@link #writableArray()} where this component starts. + * + * @return An offset into {@link #writableArray()}. + * @throws UnsupportedOperationException if {@link #hasWritableArray()} returns {@code false}. + */ + int writableArrayOffset(); + + /** + * The number of bytes in the {@link #writableArray()} that belong to this component. + * + * @return The number of bytes, from the {@link #writableArrayOffset()} into the {@link #writableArray()}, + * that belong to this component. + * @throws UnsupportedOperationException if {@link #hasWritableArray()} returns {@code false}. + */ + int writableArrayLength(); + + /** + * Give the native memory address backing this buffer, or return 0 if this buffer has no native memory address. + * + * @return The native memory address, if any, otherwise 0. + */ + long writableNativeAddress(); + + /** + * Get a {@link ByteBuffer} instance for this memory component, which can be used for modifying the buffer + * contents. + * + * @return A new {@link ByteBuffer}, with its own position and limit, for this memory component. + */ + ByteBuffer writableBuffer(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/WritableComponentProcessor.java b/buffer/src/main/java/io/netty/buffer/api/WritableComponentProcessor.java new file mode 100644 index 00000000000..a10c67f61bc --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/WritableComponentProcessor.java @@ -0,0 +1,40 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api; + +import java.nio.ByteBuffer; + +/** + * A processor of {@linkplain WritableComponent writable components}. + */ +@FunctionalInterface +public interface WritableComponentProcessor { + /** + * Process the given component at the given index in the + * {@link Buffer#forEachWritable(int, WritableComponentProcessor)} iteration}. + *

    + * The component object itself is only valid during this call, but the {@link ByteBuffer byte buffers}, arrays, and + * native address pointers obtained from it, will be valid until any {@link Buffer#isOwned() ownership} requiring + * operation is performed on the buffer. + * + * @param index The current index of the given buffer component, based on the initial index passed to the + * {@link Buffer#forEachWritable(int, WritableComponentProcessor)} method. + * @param component The current buffer component being processed. + * @return {@code true} if the iteration should continue and more components should be processed, otherwise + * {@code false} to stop the iteration early. + */ + boolean process(int index, WritableComponent component) throws E; +} diff --git a/buffer/src/main/java/io/netty/buffer/api/adaptor/BufferIntegratable.java b/buffer/src/main/java/io/netty/buffer/api/adaptor/BufferIntegratable.java new file mode 100644 index 00000000000..fe9a73d6755 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/adaptor/BufferIntegratable.java @@ -0,0 +1,25 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.adaptor; + +import io.netty.buffer.ByteBufConvertible; +import io.netty.util.ReferenceCounted; + +/** + * Interfaces that are required for an object to stand-in for a {@link io.netty.buffer.ByteBuf} in Netty. + */ +public interface BufferIntegratable extends ByteBufConvertible, ReferenceCounted { +} diff --git a/buffer/src/main/java/io/netty/buffer/api/adaptor/ByteBufAdaptor.java b/buffer/src/main/java/io/netty/buffer/api/adaptor/ByteBufAdaptor.java new file mode 100644 index 00000000000..9df2f24e538 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/adaptor/ByteBufAdaptor.java @@ -0,0 +1,1674 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.adaptor; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.ByteBufConvertible; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.DuplicatedByteBuf; +import io.netty.buffer.SlicedByteBuf; +import io.netty.buffer.SwappedByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.internal.ResourceSupport; +import io.netty.buffer.api.internal.Statics; +import io.netty.util.ByteProcessor; +import io.netty.util.IllegalReferenceCountException; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.channels.FileChannel; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.nio.charset.Charset; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicReference; + +import static io.netty.buffer.api.internal.Statics.acquire; +import static io.netty.buffer.api.internal.Statics.isOwned; + +public final class ByteBufAdaptor extends ByteBuf { + private final ByteBufAllocatorAdaptor alloc; + private final Buffer buffer; + private final boolean hasMemoryAddress; + private final int maxCapacity; + + public ByteBufAdaptor(ByteBufAllocatorAdaptor alloc, Buffer buffer, int maxCapacity) { + if (buffer.capacity() > maxCapacity) { + throw new IllegalArgumentException( + "Max capacity (" + maxCapacity + ") cannot be less than buffer capacity: " + buffer.capacity()); + } + this.alloc = Objects.requireNonNull(alloc, "The ByteBuf allocator adaptor cannot be null."); + this.buffer = Objects.requireNonNull(buffer, "The buffer being adapted cannot be null."); + hasMemoryAddress = buffer.nativeAddress() != 0; + this.maxCapacity = maxCapacity; + } + + /** + * Extracts the underlying {@link Buffer} instance that is backing this {@link ByteBuf}, if any. + * This is similar to {@link #unwrap()} except the return type is a {@link Buffer}. + * If this {@link ByteBuf} does not wrap a {@link Buffer}, then {@code null} is returned. + * + * @param byteBuf The {@link ByteBuf} to extract the {@link Buffer} from. + * @return The {@link Buffer} instance that is backing the given {@link ByteBuf}, or {@code null} if the given + * {@link ByteBuf} is not backed by a {@link Buffer}. + */ + public static Buffer extract(ByteBuf byteBuf) { + if (byteBuf instanceof ByteBufAdaptor) { + ByteBufAdaptor bba = (ByteBufAdaptor) byteBuf; + return bba.buffer; + } + return null; + } + + /** + * Extracts the underlying {@link Buffer} instance that is backing this {@link ByteBuf}, if any. + * This is similar to {@link #unwrap()} except the return type is a {@link Buffer}. + * If this {@link ByteBuf} does not wrap a {@link Buffer}, then a new {@code Buffer} instance is returned with the + * contents copied from the given {@link ByteBuf}. + * + * @param byteBuf The {@link ByteBuf} to extract the {@link Buffer} from. + * @return The {@link Buffer} instance that is backing the given {@link ByteBuf}, or a new {@code Buffer} + * containing the contents copied from the given {@link ByteBuf}. If the data is copied, the passed {@link ByteBuf} + * will be {@link ByteBuf#release() released}. + */ + public static Buffer extractOrCopy(BufferAllocator allocator, ByteBuf byteBuf) { + final Buffer extracted = extract(byteBuf); + if (extracted != null) { + return extracted; + } + try { + return allocator.allocate(byteBuf.capacity()).writeBytes(ByteBufUtil.getBytes(byteBuf)); + } finally { + byteBuf.release(); + } + } + + @Override + public int capacity() { + return buffer.capacity(); + } + + @Override + public ByteBuf capacity(int newCapacity) { + int diff = newCapacity - capacity() - buffer.writableBytes(); + if (diff > 0) { + try { + buffer.ensureWritable(diff); + } catch (IllegalStateException e) { + if (!isOwned((ResourceSupport) buffer)) { + throw new UnsupportedOperationException(e); + } + throw e; + } + } + return this; + } + + @Override + public int maxCapacity() { + return capacity(); + } + + @Override + public ByteBufAllocator alloc() { + return alloc; + } + + @Override + public ByteOrder order() { + return ByteOrder.BIG_ENDIAN; + } + + @SuppressWarnings("deprecation") + @Override + public ByteBuf order(ByteOrder endianness) { + if (endianness == ByteOrder.LITTLE_ENDIAN) { + return new SwappedByteBuf(this); + } + return this; + } + + @Override + public ByteBuf unwrap() { + return null; + } + + @Override + public boolean isDirect() { + return hasMemoryAddress; + } + + @Override + public boolean isReadOnly() { + return buffer.readOnly(); + } + + @Override + public ByteBuf asReadOnly() { + return Unpooled.unreleasableBuffer(this); + } + + @Override + public int readerIndex() { + return buffer.readerOffset(); + } + + @Override + public ByteBuf readerIndex(int readerIndex) { + buffer.readerOffset(readerIndex); + return this; + } + + @Override + public int writerIndex() { + return buffer.writerOffset(); + } + + @Override + public ByteBuf writerIndex(int writerIndex) { + buffer.writerOffset(writerIndex); + return this; + } + + @Override + public ByteBuf setIndex(int readerIndex, int writerIndex) { + buffer.resetOffsets().writerOffset(writerIndex).readerOffset(readerIndex); + return this; + } + + @Override + public int readableBytes() { + return buffer.readableBytes(); + } + + @Override + public int writableBytes() { + return buffer.writableBytes(); + } + + @Override + public int maxWritableBytes() { + return writableBytes(); + } + + @Override + public boolean isReadable() { + return readableBytes() > 0; + } + + @Override + public boolean isReadable(int size) { + return readableBytes() >= size; + } + + @Override + public boolean isWritable() { + return writableBytes() > 0; + } + + @Override + public boolean isWritable(int size) { + return writableBytes() >= size; + } + + @Override + public ByteBuf clear() { + return setIndex(0, 0); + } + + @Override + public ByteBuf discardReadBytes() { + checkAccess(); + buffer.compact(); + return this; + } + + @Override + public ByteBuf discardSomeReadBytes() { + return discardReadBytes(); + } + + @Override + public ByteBuf ensureWritable(int minWritableBytes) { + ensureWritable(minWritableBytes, true); + return this; + } + + @Override + public int ensureWritable(int minWritableBytes, boolean force) { + checkAccess(); + if (writableBytes() < minWritableBytes) { + if (maxCapacity < capacity() + minWritableBytes - writableBytes()) { + return 1; + } + try { + if (isOwned((ResourceSupport) buffer)) { + // Good place. + buffer.ensureWritable(minWritableBytes); + } else { + // Highly questionable place, but ByteBuf technically allows this, so we have to emulate. + int borrows = countBorrows(); + release(borrows); + try { + buffer.ensureWritable(minWritableBytes); + } finally { + retain(borrows); + } + } + return 2; + } catch (IllegalArgumentException e) { + throw new IndexOutOfBoundsException(e.getMessage()); + } + } + return 0; + } + + @Override + public boolean getBoolean(int index) { + return getByte(index) != 0; + } + + @Override + public byte getByte(int index) { + try { + return buffer.getByte(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public short getUnsignedByte(int index) { + try { + return (short) buffer.getUnsignedByte(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public short getShort(int index) { + try { + return buffer.getShort(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public short getShortLE(int index) { + try { + return Short.reverseBytes(buffer.getShort(index)); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int getUnsignedShort(int index) { + try { + return buffer.getUnsignedShort(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int getUnsignedShortLE(int index) { + try { + return Integer.reverseBytes(buffer.getUnsignedShort(index)) >>> Short.SIZE; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int getMedium(int index) { + try { + return buffer.getMedium(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int getMediumLE(int index) { + try { + return Integer.reverseBytes(buffer.getMedium(index)) >> Byte.SIZE; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int getUnsignedMedium(int index) { + try { + return buffer.getUnsignedMedium(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int getUnsignedMediumLE(int index) { + try { + return Integer.reverseBytes(buffer.getUnsignedMedium(index)) >>> Byte.SIZE; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int getInt(int index) { + try { + return buffer.getInt(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int getIntLE(int index) { + try { + return Integer.reverseBytes(buffer.getInt(index)); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public long getUnsignedInt(int index) { + try { + return buffer.getUnsignedInt(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public long getUnsignedIntLE(int index) { + try { + return Long.reverseBytes(buffer.getUnsignedInt(index)) >>> Integer.SIZE; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public long getLong(int index) { + try { + return buffer.getLong(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public long getLongLE(int index) { + try { + return Long.reverseBytes(buffer.getLong(index)); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public char getChar(int index) { + try { + return buffer.getChar(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public float getFloat(int index) { + try { + return buffer.getFloat(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public double getDouble(int index) { + try { + return buffer.getDouble(index); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf getBytes(int index, ByteBuf dst) { + while (dst.isWritable()) { + dst.writeByte(getByte(index++)); + } + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuf dst, int length) { + for (int i = 0; i < length; i++) { + dst.writeByte(getByte(index + i)); + } + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuf dst, int dstIndex, int length) { + for (int i = 0; i < length; i++) { + dst.setByte(dstIndex + i, getByte(index + i)); + } + return this; + } + + @Override + public ByteBuf getBytes(int index, byte[] dst) { + return getBytes(index, dst, 0, dst.length); + } + + @Override + public ByteBuf getBytes(int index, byte[] dst, int dstIndex, int length) { + checkAccess(); + if (index < 0 || capacity() < index + length || dst.length < dstIndex + length) { + throw new IndexOutOfBoundsException(); + } + for (int i = 0; i < length; i++) { + dst[dstIndex + i] = getByte(index + i); + } + return this; + } + + @Override + public ByteBuf getBytes(int index, ByteBuffer dst) { + checkAccess(); + if (index < 0 || capacity() < index + dst.remaining()) { + throw new IndexOutOfBoundsException(); + } + while (dst.hasRemaining()) { + dst.put(getByte(index)); + index++; + } + return this; + } + + @Override + public ByteBuf getBytes(int index, OutputStream out, int length) throws IOException { + for (int i = 0; i < length; i++) { + out.write(getByte(index + i)); + } + return this; + } + + @Override + public int getBytes(int index, GatheringByteChannel out, int length) throws IOException { + checkAccess(); + ByteBuffer transfer = ByteBuffer.allocate(length); + buffer.copyInto(index, transfer, 0, length); + return out.write(transfer); + } + + @Override + public int getBytes(int index, FileChannel out, long position, int length) throws IOException { + checkAccess(); + ByteBuffer transfer = ByteBuffer.allocate(length); + buffer.copyInto(index, transfer, 0, length); + return out.write(transfer, position); + } + + @Override + public CharSequence getCharSequence(int index, int length, Charset charset) { + byte[] bytes = new byte[length]; + getBytes(index, bytes); + return new String(bytes, charset); + } + + @Override + public ByteBuf setBoolean(int index, boolean value) { + return setByte(index, value? 1 : 0); + } + + @Override + public ByteBuf setByte(int index, int value) { + try { + buffer.setByte(index, (byte) value); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + return this; + } + + @Override + public ByteBuf setShort(int index, int value) { + try { + buffer.setShort(index, (short) value); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setShortLE(int index, int value) { + try { + buffer.setShort(index, Short.reverseBytes((short) value)); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setMedium(int index, int value) { + try { + buffer.setMedium(index, value); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setMediumLE(int index, int value) { + try { + buffer.setMedium(index, Integer.reverseBytes(value) >>> Byte.SIZE); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setInt(int index, int value) { + try { + buffer.setInt(index, value); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setIntLE(int index, int value) { + try { + buffer.setInt(index, Integer.reverseBytes(value)); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setLong(int index, long value) { + try { + buffer.setLong(index, value); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setLongLE(int index, long value) { + try { + buffer.setLong(index, Long.reverseBytes(value)); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setChar(int index, int value) { + try { + buffer.setChar(index, (char) value); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setFloat(int index, float value) { + try { + buffer.setFloat(index, value); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setDouble(int index, double value) { + try { + buffer.setDouble(index, value); + return this; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src) { + checkAccess(); + while (src.isReadable() && index < capacity()) { + setByte(index++, src.readByte()); + } + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src, int length) { + checkAccess(); + for (int i = 0; i < length; i++) { + setByte(index + i, src.readByte()); + } + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuf src, int srcIndex, int length) { + for (int i = 0; i < length; i++) { + setByte(index + i, src.getByte(srcIndex + i)); + } + return this; + } + + @Override + public ByteBuf setBytes(int index, byte[] src) { + return setBytes(index, src, 0, src.length); + } + + @Override + public ByteBuf setBytes(int index, byte[] src, int srcIndex, int length) { + for (int i = 0; i < length; i++) { + setByte(index + i, src[srcIndex + i]); + } + return this; + } + + @Override + public ByteBuf setBytes(int index, ByteBuffer src) { + while (src.hasRemaining()) { + setByte(index, src.get()); + index++; + } + return this; + } + + @Override + public int setBytes(int index, InputStream in, int length) throws IOException { + checkAccess(); + byte[] bytes = in.readNBytes(length); + setBytes(index, bytes, 0, length); + return bytes.length; + } + + @Override + public int setBytes(int index, ScatteringByteChannel in, int length) throws IOException { + checkAccess(); + ByteBuffer transfer = ByteBuffer.allocate(length); + int bytes = in.read(transfer); + transfer.flip(); + setBytes(index, transfer); + return bytes; + } + + @Override + public int setBytes(int index, FileChannel in, long position, int length) throws IOException { + checkAccess(); + ByteBuffer transfer = ByteBuffer.allocate(length); + int bytes = in.read(transfer, position); + transfer.flip(); + setBytes(index, transfer); + return bytes; + } + + @Override + public ByteBuf setZero(int index, int length) { + for (int i = 0; i < length; i++) { + setByte(index + i, 0); + } + return this; + } + + @Override + public int setCharSequence(int index, CharSequence sequence, Charset charset) { + byte[] bytes = sequence.toString().getBytes(charset); + for (int i = 0; i < bytes.length; i++) { + setByte(index + i, bytes[i]); + } + return bytes.length; + } + + @Override + public boolean readBoolean() { + return readByte() != 0; + } + + @Override + public byte readByte() { + try { + return buffer.readByte(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public short readUnsignedByte() { + try { + return (short) buffer.readUnsignedByte(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public short readShort() { + try { + return buffer.readShort(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public short readShortLE() { + try { + return Short.reverseBytes(buffer.readShort()); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int readUnsignedShort() { + try { + return buffer.readUnsignedShort(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int readUnsignedShortLE() { + try { + return Integer.reverseBytes(buffer.readUnsignedShort()) >>> Short.SIZE; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int readMedium() { + try { + return buffer.readMedium(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int readMediumLE() { + try { + return Integer.reverseBytes(buffer.readMedium()) >> Byte.SIZE; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int readUnsignedMedium() { + try { + return buffer.readUnsignedMedium(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int readUnsignedMediumLE() { + try { + return Integer.reverseBytes(buffer.readUnsignedMedium()) >>> Byte.SIZE; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int readInt() { + try { + return buffer.readInt(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public int readIntLE() { + try { + return Integer.reverseBytes(buffer.readInt()); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public long readUnsignedInt() { + try { + return buffer.readUnsignedInt(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public long readUnsignedIntLE() { + try { + return Long.reverseBytes(buffer.readUnsignedInt()) >>> Integer.SIZE; + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public long readLong() { + try { + return buffer.readLong(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public long readLongLE() { + try { + return Long.reverseBytes(buffer.readLong()); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public char readChar() { + try { + return buffer.readChar(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public float readFloat() { + try { + return buffer.readFloat(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public double readDouble() { + try { + return buffer.readDouble(); + } catch (IllegalStateException e) { + throw new IllegalReferenceCountException(e); + } + } + + @Override + public ByteBuf readBytes(int length) { + checkAccess(); + Buffer copy = preferredBufferAllocator().allocate(length); + buffer.copyInto(readerIndex(), copy, 0, length); + readerIndex(readerIndex() + length); + return wrap(copy).writerIndex(length); + } + + @Override + public ByteBuf readSlice(int length) { + ByteBuf slice = slice(readerIndex(), length); + buffer.readerOffset(buffer.readerOffset() + length); + return slice; + } + + @Override + public ByteBuf readRetainedSlice(int length) { + ByteBuf slice = retainedSlice(readerIndex(), length); + buffer.readerOffset(buffer.readerOffset() + length); + return slice; + } + + @Override + public ByteBuf readBytes(ByteBuf dst) { + while (dst.isWritable()) { + dst.writeByte(readByte()); + } + return this; + } + + @Override + public ByteBuf readBytes(ByteBuf dst, int length) { + for (int i = 0; i < length; i++) { + dst.writeByte(readByte()); + } + return this; + } + + @Override + public ByteBuf readBytes(ByteBuf dst, int dstIndex, int length) { + for (int i = 0; i < length; i++) { + dst.setByte(dstIndex + i, readByte()); + } + return this; + } + + @Override + public ByteBuf readBytes(byte[] dst) { + return readBytes(dst, 0, dst.length); + } + + @Override + public ByteBuf readBytes(byte[] dst, int dstIndex, int length) { + for (int i = 0; i < length; i++) { + dst[dstIndex + i] = readByte(); + } + return this; + } + + @Override + public ByteBuf readBytes(ByteBuffer dst) { + while (dst.hasRemaining()) { + dst.put(readByte()); + } + return this; + } + + @Override + public ByteBuf readBytes(OutputStream out, int length) throws IOException { + for (int i = 0; i < length; i++) { + out.write(readByte()); + } + return this; + } + + @Override + public int readBytes(GatheringByteChannel out, int length) throws IOException { + checkAccess(); + ByteBuffer[] components = new ByteBuffer[buffer.countReadableComponents()]; + buffer.forEachReadable(0, (i, component) -> { + components[i] = component.readableBuffer(); + return true; + }); + int written = (int) out.write(components); + skipBytes(written); + return written; + } + + @Override + public CharSequence readCharSequence(int length, Charset charset) { + byte[] bytes = new byte[length]; + readBytes(bytes); + return new String(bytes, charset); + } + + @Override + public int readBytes(FileChannel out, long position, int length) throws IOException { + ByteBuffer[] components = new ByteBuffer[buffer.countReadableComponents()]; + buffer.forEachReadable(0, (i, component) -> { + components[i] = component.readableBuffer(); + return true; + }); + int written = 0; + for (ByteBuffer component : components) { + written += out.write(component, position + written); + if (component.hasRemaining()) { + break; + } + } + skipBytes(written); + return written; + } + + @Override + public ByteBuf skipBytes(int length) { + buffer.readerOffset(length + buffer.readerOffset()); + return this; + } + + @Override + public ByteBuf writeBoolean(boolean value) { + return writeByte(value? 1 : 0); + } + + @Override + public ByteBuf writeByte(int value) { + ensureWritable(1); + buffer.writeByte((byte) value); + return this; + } + + @Override + public ByteBuf writeShort(int value) { + ensureWritable(2); + buffer.writeShort((short) value); + return this; + } + + @Override + public ByteBuf writeShortLE(int value) { + ensureWritable(2); + buffer.writeShort((short) (Integer.reverseBytes(value) >>> Short.SIZE)); + return this; + } + + @Override + public ByteBuf writeMedium(int value) { + ensureWritable(3); + buffer.writeMedium(value); + return this; + } + + @Override + public ByteBuf writeMediumLE(int value) { + ensureWritable(3); + buffer.writeMedium(Integer.reverseBytes(value) >> Byte.SIZE); + return this; + } + + @Override + public ByteBuf writeInt(int value) { + ensureWritable(4); + buffer.writeInt(value); + return this; + } + + @Override + public ByteBuf writeIntLE(int value) { + ensureWritable(4); + buffer.writeInt(Integer.reverseBytes(value)); + return this; + } + + @Override + public ByteBuf writeLong(long value) { + ensureWritable(8); + buffer.writeLong(value); + return this; + } + + @Override + public ByteBuf writeLongLE(long value) { + ensureWritable(8); + buffer.writeLong(Long.reverseBytes(value)); + return this; + } + + @Override + public ByteBuf writeChar(int value) { + ensureWritable(2); + buffer.writeChar((char) value); + return this; + } + + @Override + public ByteBuf writeFloat(float value) { + ensureWritable(4); + buffer.writeFloat(value); + return this; + } + + @Override + public ByteBuf writeDouble(double value) { + ensureWritable(8); + buffer.writeDouble(value); + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuf src) { + return writeBytes(src, src.readableBytes()); + } + + @Override + public ByteBuf writeBytes(ByteBuf src, int length) { + ensureWritable(length); + for (int i = 0; i < length; i++) { + writeByte(src.readByte()); + } + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuf src, int srcIndex, int length) { + ensureWritable(length); + for (int i = 0; i < length; i++) { + writeByte(src.getByte(srcIndex + i)); + } + return this; + } + + @Override + public ByteBuf writeBytes(byte[] src) { + ensureWritable(src.length); + for (byte b : src) { + writeByte(b); + } + return this; + } + + @Override + public ByteBuf writeBytes(byte[] src, int srcIndex, int length) { + ensureWritable(length); + for (int i = 0; i < length; i++) { + writeByte(src[srcIndex + i]); + } + return this; + } + + @Override + public ByteBuf writeBytes(ByteBuffer src) { + ensureWritable(src.remaining()); + while (src.hasRemaining()) { + writeByte(src.get()); + } + return this; + } + + @Override + public int writeBytes(InputStream in, int length) throws IOException { + byte[] bytes = in.readNBytes(length); + writeBytes(bytes); + return bytes.length; + } + + @Override + public int writeBytes(ScatteringByteChannel in, int length) throws IOException { + ensureWritable(length); + ByteBuffer[] components = new ByteBuffer[buffer.countWritableComponents()]; + buffer.forEachWritable(0, (i, component) -> { + components[i] = component.writableBuffer(); + return true; + }); + + int read = (int) in.read(components); + + if (read > 0) { + writerIndex(read + writerIndex()); + } + return read; + } + + @Override + public int writeBytes(FileChannel in, long position, int length) throws IOException { + ensureWritable(length); + ByteBuffer[] components = new ByteBuffer[buffer.countWritableComponents()]; + buffer.forEachWritable(0, (i, component) -> { + components[i] = component.writableBuffer(); + return true; + }); + int read = 0; + for (ByteBuffer component : components) { + int r = in.read(component, position + read); + if (r > 0) { + read += r; + } + if (component.hasRemaining()) { + break; + } + } + writerIndex(read + writerIndex()); + return read; + } + + @Override + public ByteBuf writeZero(int length) { + if (length < 0) { + throw new IllegalArgumentException(); + } + ensureWritable(length); + for (int i = 0; i < length; i++) { + writeByte(0); + } + return this; + } + + @Override + public int writeCharSequence(CharSequence sequence, Charset charset) { + byte[] bytes = sequence.toString().getBytes(charset); + writeBytes(bytes); + return bytes.length; + } + + @Override + public int indexOf(int fromIndex, int toIndex, byte value) { + if (!buffer.isAccessible()) { + return -1; + } + if (fromIndex <= toIndex) { + if (fromIndex < 0) { + fromIndex = 0; // Required to pass regression tests. + } + if (capacity() < toIndex) { + throw new IndexOutOfBoundsException(); + } + for (; fromIndex < toIndex; fromIndex++) { + if (getByte(fromIndex) == value) { + return fromIndex; + } + } + } else { + if (capacity() < fromIndex) { + fromIndex = capacity(); // Required to pass regression tests. + } + fromIndex--; + if (toIndex < 0) { + throw new IndexOutOfBoundsException(); + } + for (; fromIndex > toIndex; fromIndex--) { + if (getByte(fromIndex) == value) { + return fromIndex; + } + } + } + return -1; + } + + @Override + public int bytesBefore(byte value) { + return bytesBefore(readerIndex(), writerIndex(), value); + } + + @Override + public int bytesBefore(int length, byte value) { + return bytesBefore(readerIndex(), readerIndex() + length, value); + } + + @Override + public int bytesBefore(int index, int length, byte value) { + int i = indexOf(index, index + length, value); + if (i != -1) { + i -= index; + } + return i; + } + + @Override + public int forEachByte(ByteProcessor processor) { + checkAccess(); + int index = readerIndex(); + int bytes = buffer.openCursor().process(processor); + return bytes == -1 ? -1 : index + bytes; + } + + @Override + public int forEachByte(int index, int length, ByteProcessor processor) { + checkAccess(); + int bytes = buffer.openCursor(index, length).process(processor); + return bytes == -1 ? -1 : index + bytes; + } + + @Override + public int forEachByteDesc(ByteProcessor processor) { + checkAccess(); + int index = readerIndex(); + int bytes = buffer.openReverseCursor().process(processor); + return bytes == -1 ? -1 : index - bytes; + } + + @Override + public int forEachByteDesc(int index, int length, ByteProcessor processor) { + checkAccess(); + int bytes = buffer.openReverseCursor(index + length - 1, length).process(processor); + return bytes == -1 ? -1 : index - bytes; + } + + @Override + public ByteBuf copy() { + return copy(readerIndex(), readableBytes()); + } + + @Override + public ByteBuf copy(int index, int length) { + checkAccess(); + try { + BufferAllocator allocator = preferredBufferAllocator(); + Buffer copy = allocator.allocate(length); + buffer.copyInto(index, copy, 0, length); + copy.writerOffset(length); + return wrap(copy); + } catch (IllegalArgumentException e) { + throw new IndexOutOfBoundsException(e.getMessage()); + } + } + + @Override + public ByteBuf slice() { + return slice(readerIndex(), readableBytes()); + } + + @Override + public ByteBuf retainedSlice() { + return retainedSlice(readerIndex(), readableBytes()); + } + + @Override + public ByteBuf slice(int index, int length) { + checkAccess(); + return new Slice(this, index, length); + } + + @Override + public ByteBuf retainedSlice(int index, int length) { + checkAccess(); + Slice slice = new Slice(this, index, length); + retain(); + return slice; + } + + @SuppressWarnings("deprecation") + private static final class Slice extends SlicedByteBuf { + private final int indexAdjustment; + private final int lengthAdjustment; + + Slice(ByteBuf buffer, int index, int length) { + super(buffer, index, length); + indexAdjustment = index; + lengthAdjustment = length; + } + + @Override + public ByteBuf retainedDuplicate() { + return new Slice(unwrap().retainedDuplicate(), indexAdjustment, lengthAdjustment) + .setIndex(readerIndex(), writerIndex()); + } + + @Override + public ByteBuf retainedSlice(int index, int length) { + checkIndex(index, length); + return unwrap().retainedSlice(indexAdjustment + index, length); + } + } + + @SuppressWarnings("deprecation") + private static final class Duplicate extends DuplicatedByteBuf { + Duplicate(ByteBufAdaptor byteBuf) { + super(byteBuf); + } + + @Override + public ByteBuf duplicate() { + ((ByteBufAdaptor) unwrap()).checkAccess(); + return new Duplicate((ByteBufAdaptor) unwrap()) + .setIndex(readerIndex(), writerIndex()); + } + + @Override + public ByteBuf retainedDuplicate() { + return unwrap().retainedDuplicate(); + } + + @Override + public ByteBuf retainedSlice(int index, int length) { + return unwrap().retainedSlice(index, length); + } + } + + @Override + public ByteBuf duplicate() { + checkAccess(); + Duplicate duplicatedByteBuf = new Duplicate(this); + return duplicatedByteBuf.setIndex(readerIndex(), writerIndex()); + } + + @Override + public ByteBuf retainedDuplicate() { + checkAccess(); + retain(); + Duplicate duplicatedByteBuf = new Duplicate(this); + return duplicatedByteBuf.setIndex(readerIndex(), writerIndex()); + } + + @Override + public int nioBufferCount() { + return 1; + } + + @Override + public ByteBuffer nioBuffer() { + return nioBuffer(readerIndex(), readableBytes()); + } + + @Override + public ByteBuffer nioBuffer(int index, int length) { + checkAccess(); + ByteBuffer copy = isDirect() ? ByteBuffer.allocateDirect(length) : ByteBuffer.allocate(length); + int endB = index + length; + int endL = endB - Long.BYTES; + while (index < endL) { + copy.putLong(buffer.getLong(index)); + index += Long.BYTES; + } + while (index < endB) { + copy.put(buffer.getByte(index)); + index++; + } + return copy.flip(); + } + + @Override + public ByteBuffer internalNioBuffer(int index, int length) { + checkAccess(); + if (readerIndex() <= index && index < writerIndex() && length <= readableBytes()) { + // We wish to read from the internal buffer. + if (buffer.countReadableComponents() != 1) { + throw new UnsupportedOperationException( + "Unsupported number of readable components: " + buffer.countReadableComponents() + '.'); + } + AtomicReference bufRef = new AtomicReference<>(); + buffer.forEachReadable(0, (i, component) -> { + bufRef.set(component.readableBuffer()); + return false; + }); + ByteBuffer buffer = bufRef.get(); + if (index != readerIndex() || length != readableBytes()) { + buffer = Statics.bbslice(buffer, index - readerIndex(), length); + } + return buffer; + } else if (writerIndex() <= index && length <= writableBytes()) { + // We wish to write to the internal buffer. + if (buffer.countWritableComponents() != 1) { + throw new UnsupportedOperationException( + "Unsupported number of writable components: " + buffer.countWritableComponents() + '.'); + } + AtomicReference bufRef = new AtomicReference<>(); + buffer.forEachWritable(0, (i, component) -> { + bufRef.set(component.writableBuffer()); + return false; + }); + ByteBuffer buffer = bufRef.get(); + if (index != writerIndex() || length != writableBytes()) { + buffer = Statics.bbslice(buffer, index - writerIndex(), length); + } + return buffer; + } else { + String message = "Cannot provide internal NIO buffer for range from " + index + " for length " + length + + ", when writerIndex() is " + writerIndex() + " and writable bytes are " + writableBytes() + + ", and readerIndex() is " + readerIndex() + " and readable bytes are " + readableBytes() + + ". The requested range must fall within EITHER the readable area OR the writable area. " + + "Straddling the two areas, or reaching outside of their bounds, is not allowed."; + throw new UnsupportedOperationException(message); + } + } + + @Override + public ByteBuffer[] nioBuffers() { + return new ByteBuffer[] { nioBuffer() }; + } + + @Override + public ByteBuffer[] nioBuffers(int index, int length) { + return new ByteBuffer[] { nioBuffer(index, length) }; + } + + @Override + public boolean hasArray() { + return false; + } + + @Override + public byte[] array() { + throw new UnsupportedOperationException("This buffer has no array."); + } + + @Override + public int arrayOffset() { + throw new UnsupportedOperationException("This buffer has no array."); + } + + @Override + public boolean hasMemoryAddress() { + return hasMemoryAddress; + } + + @Override + public long memoryAddress() { + if (!hasMemoryAddress()) { + throw new UnsupportedOperationException("No memory address associated with this buffer."); + } + return buffer.nativeAddress(); + } + + @Override + public String toString(Charset charset) { + return toString(readerIndex(), readableBytes(), charset); + } + + @Override + public String toString(int index, int length, Charset charset) { + byte[] bytes = new byte[length]; + getBytes(index, bytes); + return new String(bytes, charset); + } + + @Override + public int hashCode() { + return ByteBufUtil.hashCode(this); + } + + @Override + public boolean equals(Object obj) { + if (obj instanceof ByteBufConvertible) { + ByteBuf other = ((ByteBufConvertible) obj).asByteBuf(); + return this == other || ByteBufUtil.equals(this, other); + } + return false; + } + + @SuppressWarnings("deprecation") + @Override + public int compareTo(ByteBuf buffer) { + // Little-ending implementation of the compare seems to be broken. + return ByteBufUtil.compare(order(ByteOrder.BIG_ENDIAN), buffer.order(ByteOrder.BIG_ENDIAN)); + } + + @Override + public String toString() { + return "ByteBuf(" + readerIndex() + ", " + writerIndex() + ", " + capacity() + ')'; + } + + @Override + public ByteBuf retain(int increment) { + for (int i = 0; i < increment; i++) { + acquire((ResourceSupport) buffer); + } + return this; + } + + @Override + public int refCnt() { + return 1 + countBorrows(); + } + + private int countBorrows() { + if (!buffer.isAccessible()) { + return -1; + } + if (buffer instanceof ResourceSupport) { + var rc = (ResourceSupport) buffer; + return Statics.countBorrows(rc); + } + return isOwned((ResourceSupport) buffer)? 0 : 1; + } + + @Override + public ByteBuf retain() { + return retain(1); + } + + @Override + public ByteBuf touch() { + return this; + } + + @Override + public ByteBuf touch(Object hint) { + return this; + } + + @Override + public boolean release() { + return release(1); + } + + @Override + public boolean release(int decrement) { + int refCount = 1 + Statics.countBorrows((ResourceSupport) buffer); + if (!buffer.isAccessible() || decrement > refCount) { + throw new IllegalReferenceCountException(refCount, -decrement); + } + for (int i = 0; i < decrement; i++) { + try { + buffer.close(); + } catch (RuntimeException e) { + throw new IllegalReferenceCountException(e); + } + } + return !buffer.isAccessible(); + } + + private void checkAccess() { + if (!buffer.isAccessible()) { + throw new IllegalReferenceCountException(); + } + } + + private ByteBufAdaptor wrap(Buffer copy) { + return new ByteBufAdaptor(alloc, copy, maxCapacity); + } + + private BufferAllocator preferredBufferAllocator() { + return isDirect()? alloc.getOffHeap() : alloc.getOnHeap(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/adaptor/ByteBufAllocatorAdaptor.java b/buffer/src/main/java/io/netty/buffer/api/adaptor/ByteBufAllocatorAdaptor.java new file mode 100644 index 00000000000..26bc4c0191b --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/adaptor/ByteBufAllocatorAdaptor.java @@ -0,0 +1,176 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.adaptor; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.CompositeByteBuf; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.internal.AdaptableBuffer; +import io.netty.util.internal.PlatformDependent; + +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import java.util.Objects; + +public class ByteBufAllocatorAdaptor implements ByteBufAllocator, AutoCloseable { + private static final int DEFAULT_MAX_CAPACITY = Integer.MAX_VALUE; + private final BufferAllocator onheap; + private final BufferAllocator offheap; + private boolean closed; + + public ByteBufAllocatorAdaptor() { + this(BufferAllocator.onHeapPooled(), BufferAllocator.offHeapPooled()); + } + + public ByteBufAllocatorAdaptor(BufferAllocator onheap, BufferAllocator offheap) { + this.onheap = Objects.requireNonNull(onheap, "The on-heap allocator cannot be null."); + this.offheap = Objects.requireNonNull(offheap, "The off-heap allocator cannot be null."); + } + + @Override + public ByteBuf buffer() { + return buffer(256); + } + + public BufferAllocator getOnHeap() { + return onheap; + } + + public BufferAllocator getOffHeap() { + return offheap; + } + + public boolean isClosed() { + return closed; + } + + @Override + public ByteBuf buffer(int initialCapacity) { + return buffer(initialCapacity, DEFAULT_MAX_CAPACITY); + } + + @Override + public ByteBuf buffer(int initialCapacity, int maxCapacity) { + return initialise(onheap.allocate(initialCapacity), maxCapacity); + } + + @Override + public ByteBuf ioBuffer() { + return directBuffer(); + } + + @Override + public ByteBuf ioBuffer(int initialCapacity) { + return directBuffer(initialCapacity); + } + + @Override + public ByteBuf ioBuffer(int initialCapacity, int maxCapacity) { + return directBuffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf heapBuffer() { + return buffer(); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity) { + return buffer(initialCapacity); + } + + @Override + public ByteBuf heapBuffer(int initialCapacity, int maxCapacity) { + return buffer(initialCapacity, maxCapacity); + } + + @Override + public ByteBuf directBuffer() { + return directBuffer(256); + } + + @Override + public ByteBuf directBuffer(int initialCapacity) { + return directBuffer(initialCapacity, DEFAULT_MAX_CAPACITY); + } + + @Override + public ByteBuf directBuffer(int initialCapacity, int maxCapacity) { + return initialise(offheap.allocate(initialCapacity), maxCapacity); + } + + private ByteBuf initialise(Buffer buffer, int maxCapacity) { + AdaptableBuffer adaptableBuffer = (AdaptableBuffer) buffer; + return adaptableBuffer.initialise(this, maxCapacity); + } + + @Override + public CompositeByteBuf compositeBuffer() { + return compositeHeapBuffer(); + } + + @Override + public CompositeByteBuf compositeBuffer(int maxNumComponents) { + return compositeHeapBuffer(maxNumComponents); + } + + @Override + public CompositeByteBuf compositeHeapBuffer() { + return compositeHeapBuffer(1024); + } + + @Override + public CompositeByteBuf compositeHeapBuffer(int maxNumComponents) { + return new CompositeByteBuf(this, false, maxNumComponents, heapBuffer()); + } + + @Override + public CompositeByteBuf compositeDirectBuffer() { + return compositeDirectBuffer(1024); + } + + @Override + public CompositeByteBuf compositeDirectBuffer(int maxNumComponents) { + return new CompositeByteBuf(this, true, maxNumComponents, directBuffer()); + } + + @Override + public boolean isDirectBufferPooled() { + return true; + } + + @Override + public int calculateNewCapacity(int minNewCapacity, int maxCapacity) { + checkPositiveOrZero(minNewCapacity, "minNewCapacity"); + if (minNewCapacity > maxCapacity) { + throw new IllegalArgumentException(String.format( + "minNewCapacity: %d (expected: not greater than maxCapacity(%d)", + minNewCapacity, maxCapacity)); + } + int newCapacity = PlatformDependent.roundToPowerOfTwo(minNewCapacity); + return Math.min(maxCapacity, newCapacity); + } + + @Override + public void close() throws Exception { + try (onheap) { + try (offheap) { + closed = true; + } + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/adaptor/package-info.java b/buffer/src/main/java/io/netty/buffer/api/adaptor/package-info.java new file mode 100644 index 00000000000..12964152cb9 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/adaptor/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * Helpers for integrating with the existing {@link io.netty.buffer.ByteBuf} API. + */ +package io.netty.buffer.api.adaptor; diff --git a/buffer/src/main/java/io/netty/buffer/api/bytebuffer/ByteBufferMemoryManager.java b/buffer/src/main/java/io/netty/buffer/api/bytebuffer/ByteBufferMemoryManager.java new file mode 100644 index 00000000000..20a061a5b4f --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/bytebuffer/ByteBufferMemoryManager.java @@ -0,0 +1,81 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.bytebuffer; + +import io.netty.buffer.api.AllocationType; +import io.netty.buffer.api.AllocatorControl; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.MemoryManager; +import io.netty.buffer.api.StandardAllocationTypes; +import io.netty.buffer.api.internal.Statics; + +import java.lang.ref.Cleaner; +import java.nio.ByteBuffer; + +import static io.netty.buffer.api.internal.Statics.bbslice; +import static io.netty.buffer.api.internal.Statics.convert; + +public class ByteBufferMemoryManager implements MemoryManager { + @Override + public Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop drop, Cleaner cleaner, + AllocationType allocationType) { + int capacity = Math.toIntExact(size); + final ByteBuffer buffer; + if (allocationType == StandardAllocationTypes.OFF_HEAP) { + buffer = ByteBuffer.allocateDirect(capacity); + } else if (allocationType == StandardAllocationTypes.ON_HEAP) { + buffer = ByteBuffer.allocate(capacity); + } else { + throw new IllegalArgumentException("Unknown allocation type: " + allocationType); + } + return new NioBuffer(buffer, buffer, allocatorControl, convert(drop)); + } + + @Override + public Buffer allocateConstChild(Buffer readOnlyConstParent) { + assert readOnlyConstParent.readOnly(); + NioBuffer buf = (NioBuffer) readOnlyConstParent; + return new NioBuffer(buf); + } + + @Override + public Drop drop() { + return Statics.NO_OP_DROP; + } + + @Override + public Object unwrapRecoverableMemory(Buffer buf) { + return ((NioBuffer) buf).recoverable(); + } + + @Override + public Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop drop) { + ByteBuffer memory = (ByteBuffer) recoverableMemory; + return new NioBuffer(memory, memory, allocatorControl, convert(drop)); + } + + @Override + public Object sliceMemory(Object memory, int offset, int length) { + var buffer = (ByteBuffer) memory; + return bbslice(buffer, offset, length); + } + + @Override + public String implementationName() { + return "ByteBuffer"; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/bytebuffer/NioBuffer.java b/buffer/src/main/java/io/netty/buffer/api/bytebuffer/NioBuffer.java new file mode 100644 index 00000000000..8f76691add4 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/bytebuffer/NioBuffer.java @@ -0,0 +1,1107 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.bytebuffer; + +import io.netty.buffer.api.AllocatorControl; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferReadOnlyException; +import io.netty.buffer.api.ByteCursor; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.Owned; +import io.netty.buffer.api.ReadableComponent; +import io.netty.buffer.api.ReadableComponentProcessor; +import io.netty.buffer.api.WritableComponent; +import io.netty.buffer.api.WritableComponentProcessor; +import io.netty.buffer.api.internal.AdaptableBuffer; +import io.netty.buffer.api.internal.ArcDrop; +import io.netty.buffer.api.internal.Statics; +import io.netty.util.internal.PlatformDependent; + +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.ReadOnlyBufferException; + +import static io.netty.buffer.api.internal.Statics.bbput; +import static io.netty.buffer.api.internal.Statics.bbslice; +import static io.netty.buffer.api.internal.Statics.bufferIsClosed; +import static io.netty.buffer.api.internal.Statics.bufferIsReadOnly; + +class NioBuffer extends AdaptableBuffer implements ReadableComponent, WritableComponent { + private static final ByteBuffer CLOSED_BUFFER = ByteBuffer.allocate(0); + + private final AllocatorControl control; + private ByteBuffer base; + private ByteBuffer rmem; // For reading. + private ByteBuffer wmem; // For writing. + + private int roff; + private int woff; + private boolean constBuffer; + + NioBuffer(ByteBuffer base, ByteBuffer memory, AllocatorControl control, Drop drop) { + super(new MakeInaccessibleOnDrop(ArcDrop.wrap(drop))); + this.base = base; + rmem = memory; + wmem = memory; + this.control = control; + } + + /** + * Constructor for {@linkplain BufferAllocator#constBufferSupplier(byte[]) const buffers}. + */ + NioBuffer(NioBuffer parent) { + super(new MakeInaccessibleOnDrop(new ArcDrop<>(ArcDrop.acquire(parent.unsafeGetDrop())))); + control = parent.control; + base = parent.base; + rmem = bbslice(parent.rmem, 0, parent.rmem.capacity()); // Need to slice to get independent byte orders. + assert parent.wmem == CLOSED_BUFFER; + wmem = CLOSED_BUFFER; + roff = parent.roff; + woff = parent.woff; + constBuffer = true; + } + + private static final class MakeInaccessibleOnDrop implements Drop { + final Drop delegate; + + private MakeInaccessibleOnDrop(Drop delegate) { + this.delegate = delegate; + } + + @Override + public void drop(NioBuffer buf) { + try { + delegate.drop(buf); + } finally { + buf.makeInaccessible(); + } + } + + @Override + public void attach(NioBuffer buf) { + delegate.attach(buf); + } + + @Override + public String toString() { + return "MakeInaccessibleOnDrop(" + delegate + ')'; + } + } + + @Override + protected Drop unsafeGetDrop() { + MakeInaccessibleOnDrop drop = (MakeInaccessibleOnDrop) super.unsafeGetDrop(); + return drop.delegate; + } + + @Override + protected void unsafeSetDrop(Drop replacement) { + super.unsafeSetDrop(new MakeInaccessibleOnDrop(replacement)); + } + + @Override + public String toString() { + return "Buffer[roff:" + roff + ", woff:" + woff + ", cap:" + rmem.capacity() + ']'; + } + + @Override + protected RuntimeException createResourceClosedException() { + return bufferIsClosed(this); + } + + @Override + public int capacity() { + return rmem.capacity(); + } + + @Override + public int readerOffset() { + return roff; + } + + @Override + public Buffer readerOffset(int offset) { + checkRead(offset, 0); + roff = offset; + return this; + } + + @Override + public int writerOffset() { + return woff; + } + + @Override + public Buffer writerOffset(int offset) { + checkWrite(offset, 0); + woff = offset; + return this; + } + + @Override + public Buffer fill(byte value) { + int capacity = capacity(); + checkSet(0, capacity); + if (rmem == CLOSED_BUFFER) { + throw bufferIsClosed(this); + } + for (int i = 0; i < capacity; i++) { + wmem.put(i, value); + } + return this; + } + + @Override + public long nativeAddress() { + return rmem.isDirect() && PlatformDependent.hasUnsafe()? PlatformDependent.directBufferAddress(rmem) : 0; + } + + @Override + public Buffer makeReadOnly() { + wmem = CLOSED_BUFFER; + return this; + } + + @Override + public boolean readOnly() { + return wmem == CLOSED_BUFFER && rmem != CLOSED_BUFFER; + } + + @Override + public Buffer copy(int offset, int length) { + checkGet(offset, length); + int allocSize = Math.max(length, 1); // Allocators don't support allocating zero-sized buffers. + AllocatorControl.UntetheredMemory memory = control.allocateUntethered(this, allocSize); + ByteBuffer base = memory.memory(); + ByteBuffer buffer = length == 0? bbslice(base, 0, 0) : base; + Buffer copy = new NioBuffer(base, buffer, control, memory.drop()); + copyInto(offset, copy, 0, length); + copy.writerOffset(length); + return copy; + } + + @Override + public void copyInto(int srcPos, byte[] dest, int destPos, int length) { + copyInto(srcPos, ByteBuffer.wrap(dest), destPos, length); + } + + @Override + public void copyInto(int srcPos, ByteBuffer dest, int destPos, int length) { + if (rmem == CLOSED_BUFFER) { + throw bufferIsClosed(this); + } + if (srcPos < 0) { + throw new IllegalArgumentException("The srcPos cannot be negative: " + srcPos + '.'); + } + if (length < 0) { + throw new IllegalArgumentException("The length cannot be negative: " + length + '.'); + } + if (capacity() < srcPos + length) { + throw new IllegalArgumentException("The srcPos + length is beyond the end of the buffer: " + + "srcPos = " + srcPos + ", length = " + length + '.'); + } + dest = dest.duplicate().clear(); + bbput(dest, destPos, rmem, srcPos, length); + } + + @Override + public void copyInto(int srcPos, Buffer dest, int destPos, int length) { + if (dest.readOnly()) { + throw bufferIsReadOnly(dest); + } + if (dest instanceof NioBuffer) { + var nb = (NioBuffer) dest; + nb.checkSet(destPos, length); + copyInto(srcPos, nb.wmem, destPos, length); + return; + } + + Statics.copyToViaReverseLoop(this, srcPos, dest, destPos, length); + } + + @Override + public ByteCursor openCursor() { + return openCursor(readerOffset(), readableBytes()); + } + + @Override + public ByteCursor openCursor(int fromOffset, int length) { + if (rmem == CLOSED_BUFFER) { + throw bufferIsClosed(this); + } + if (fromOffset < 0) { + throw new IllegalArgumentException("The fromOffset cannot be negative: " + fromOffset + '.'); + } + if (length < 0) { + throw new IllegalArgumentException("The length cannot be negative: " + length + '.'); + } + if (capacity() < fromOffset + length) { + throw new IllegalArgumentException("The fromOffset + length is beyond the end of the buffer: " + + "fromOffset = " + fromOffset + ", length = " + length + '.'); + } + return new ByteCursor() { + // Duplicate source buffer to keep our own byte order state. + final ByteBuffer buffer = rmem.duplicate().order(ByteOrder.BIG_ENDIAN); + int index = fromOffset; + final int end = index + length; + byte byteValue = -1; + + @Override + public boolean readByte() { + if (index < end) { + byteValue = buffer.get(index); + index++; + return true; + } + return false; + } + + @Override + public byte getByte() { + return byteValue; + } + + @Override + public int currentOffset() { + return index; + } + + @Override + public int bytesLeft() { + return end - index; + } + }; + } + + @Override + public ByteCursor openReverseCursor(int fromOffset, int length) { + if (rmem == CLOSED_BUFFER) { + throw bufferIsClosed(this); + } + if (fromOffset < 0) { + throw new IllegalArgumentException("The fromOffset cannot be negative: " + fromOffset + '.'); + } + if (length < 0) { + throw new IllegalArgumentException("The length cannot be negative: " + length + '.'); + } + if (capacity() <= fromOffset) { + throw new IllegalArgumentException("The fromOffset is beyond the end of the buffer: " + fromOffset + '.'); + } + if (fromOffset - length < -1) { + throw new IllegalArgumentException("The fromOffset - length would underflow the buffer: " + + "fromOffset = " + fromOffset + ", length = " + length + '.'); + } + return new ByteCursor() { + final ByteBuffer buffer = rmem.duplicate().order(ByteOrder.LITTLE_ENDIAN); + int index = fromOffset; + final int end = index - length; + byte byteValue = -1; + + @Override + public boolean readByte() { + if (index > end) { + byteValue = buffer.get(index); + index--; + return true; + } + return false; + } + + @Override + public byte getByte() { + return byteValue; + } + + @Override + public int currentOffset() { + return index; + } + + @Override + public int bytesLeft() { + return index - end; + } + }; + } + + @Override + public Buffer ensureWritable(int size, int minimumGrowth, boolean allowCompaction) { + if (!isAccessible()) { + throw bufferIsClosed(this); + } + if (!isOwned()) { + throw attachTrace(new IllegalStateException( + "Buffer is not owned. Only owned buffers can call ensureWritable.")); + } + if (size < 0) { + throw new IllegalArgumentException("Cannot ensure writable for a negative size: " + size + '.'); + } + if (minimumGrowth < 0) { + throw new IllegalArgumentException("The minimum growth cannot be negative: " + minimumGrowth + '.'); + } + if (rmem != wmem) { + throw bufferIsReadOnly(this); + } + if (writableBytes() >= size) { + // We already have enough space. + return this; + } + + if (allowCompaction && writableBytes() + readerOffset() >= size) { + // We can solve this with compaction. + return compact(); + } + + // Allocate a bigger buffer. + long newSize = capacity() + (long) Math.max(size - writableBytes(), minimumGrowth); + Statics.assertValidBufferSize(newSize); + var untethered = control.allocateUntethered(this, (int) newSize); + ByteBuffer buffer = untethered.memory(); + + // Copy contents. + copyInto(0, buffer, 0, capacity()); + + // Release the old memory and install the new: + Drop drop = untethered.drop(); + disconnectDrop(drop); + attachNewBuffer(buffer, drop); + return this; + } + + private void disconnectDrop(Drop newDrop) { + var drop = (Drop) unsafeGetDrop(); + int roff = this.roff; + int woff = this.woff; + drop.drop(this); + unsafeSetDrop(new ArcDrop<>(newDrop)); + this.roff = roff; + this.woff = woff; + } + + private void attachNewBuffer(ByteBuffer buffer, Drop drop) { + base = buffer; + rmem = buffer; + wmem = buffer; + constBuffer = false; + drop.attach(this); + } + + @Override + public Buffer split(int splitOffset) { + if (splitOffset < 0) { + throw new IllegalArgumentException("The split offset cannot be negative: " + splitOffset + '.'); + } + if (capacity() < splitOffset) { + throw new IllegalArgumentException("The split offset cannot be greater than the buffer capacity, " + + "but the split offset was " + splitOffset + ", and capacity is " + capacity() + '.'); + } + if (!isAccessible()) { + throw attachTrace(bufferIsClosed(this)); + } + if (!isOwned()) { + throw attachTrace(new IllegalStateException("Cannot split a buffer that is not owned.")); + } + var drop = (ArcDrop) unsafeGetDrop(); + unsafeSetDrop(new ArcDrop<>(drop)); + var splitByteBuffer = bbslice(rmem, 0, splitOffset); + // TODO maybe incrementing the existing ArcDrop is enough; maybe we don't need to wrap it in another ArcDrop. + var splitBuffer = new NioBuffer(base, splitByteBuffer, control, new ArcDrop<>(drop.increment())); + splitBuffer.woff = Math.min(woff, splitOffset); + splitBuffer.roff = Math.min(roff, splitOffset); + boolean readOnly = readOnly(); + if (readOnly) { + splitBuffer.makeReadOnly(); + } + // Split preserves const-state. + splitBuffer.constBuffer = constBuffer; + rmem = bbslice(rmem, splitOffset, rmem.capacity() - splitOffset); + if (!readOnly) { + wmem = rmem; + } + woff = Math.max(woff, splitOffset) - splitOffset; + roff = Math.max(roff, splitOffset) - splitOffset; + return splitBuffer; + } + + @Override + public Buffer compact() { + if (!isOwned()) { + throw attachTrace(new IllegalStateException("Buffer must be owned in order to compact.")); + } + if (readOnly()) { + throw new BufferReadOnlyException("Buffer must be writable in order to compact, but was read-only."); + } + if (roff == 0) { + return this; + } + rmem.limit(woff).position(roff).compact().clear(); + woff -= roff; + roff = 0; + return this; + } + + @Override + public int countComponents() { + return 1; + } + + @Override + public int countReadableComponents() { + return readableBytes() > 0? 1 : 0; + } + + @Override + public int countWritableComponents() { + return writableBytes() > 0? 1 : 0; + } + + // + @Override + public boolean hasReadableArray() { + return rmem.hasArray(); + } + + @Override + public byte[] readableArray() { + return rmem.array(); + } + + @Override + public int readableArrayOffset() { + return rmem.arrayOffset() + roff; + } + + @Override + public int readableArrayLength() { + return woff - roff; + } + + @Override + public long readableNativeAddress() { + return nativeAddress(); + } + + @Override + public ByteBuffer readableBuffer() { + return bbslice(rmem.asReadOnlyBuffer(), readerOffset(), readableBytes()); + } + + @Override + public boolean hasWritableArray() { + return wmem.hasArray(); + } + + @Override + public byte[] writableArray() { + return wmem.array(); + } + + @Override + public int writableArrayOffset() { + return wmem.arrayOffset() + woff; + } + + @Override + public int writableArrayLength() { + return capacity() - woff; + } + + @Override + public long writableNativeAddress() { + return nativeAddress(); + } + + @Override + public ByteBuffer writableBuffer() { + return bbslice(wmem, writerOffset(), writableBytes()); + } + // + + @Override + public int forEachReadable(int initialIndex, ReadableComponentProcessor processor) + throws E { + checkRead(readerOffset(), Math.max(1, readableBytes())); + return processor.process(initialIndex, this)? 1 : -1; + } + + @Override + public int forEachWritable(int initialIndex, WritableComponentProcessor processor) + throws E { + checkWrite(writerOffset(), Math.max(1, writableBytes())); + return processor.process(initialIndex, this)? 1 : -1; + } + + // + @Override + public byte readByte() { + checkRead(roff, Byte.BYTES); + var value = rmem.get(roff); + roff += Byte.BYTES; + return value; + } + + @Override + public byte getByte(int roff) { + checkGet(roff, Byte.BYTES); + return rmem.get(roff); + } + + @Override + public int readUnsignedByte() { + return readByte() & 0xFF; + } + + @Override + public int getUnsignedByte(int roff) { + return getByte(roff) & 0xFF; + } + + @Override + public Buffer writeByte(byte value) { + try { + wmem.put(woff, value); + woff += Byte.BYTES; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setByte(int woff, byte value) { + try { + wmem.put(woff, value); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer writeUnsignedByte(int value) { + try { + wmem.put(woff, (byte) (value & 0xFF)); + woff += Byte.BYTES; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setUnsignedByte(int woff, int value) { + try { + wmem.put(woff, (byte) (value & 0xFF)); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public char readChar() { + checkRead(roff, 2); + var value = rmem.getChar(roff); + roff += 2; + return value; + } + + @Override + public char getChar(int roff) { + checkGet(roff, 2); + return rmem.getChar(roff); + } + + @Override + public Buffer writeChar(char value) { + try { + wmem.putChar(woff, value); + woff += 2; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setChar(int woff, char value) { + try { + wmem.putChar(woff, value); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public short readShort() { + checkRead(roff, Short.BYTES); + var value = rmem.getShort(roff); + roff += 2; + return value; + } + + @Override + public short getShort(int roff) { + checkGet(roff, Short.BYTES); + return rmem.getShort(roff); + } + + @Override + public int readUnsignedShort() { + checkRead(roff, Short.BYTES); + var value = rmem.getShort(roff) & 0xFFFF; + roff += 2; + return value; + } + + @Override + public int getUnsignedShort(int roff) { + checkGet(roff, Short.BYTES); + return rmem.getShort(roff) & 0xFFFF; + } + + @Override + public Buffer writeShort(short value) { + try { + wmem.putShort(woff, value); + woff += Short.BYTES; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setShort(int woff, short value) { + try { + wmem.putShort(woff, value); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer writeUnsignedShort(int value) { + try { + wmem.putShort(woff, (short) (value & 0xFFFF)); + woff += Short.BYTES; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setUnsignedShort(int woff, int value) { + try { + wmem.putShort(woff, (short) (value & 0xFFFF)); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public int readMedium() { + checkRead(roff, 3); + int value = rmem.get(roff) << 16 | (rmem.get(roff + 1) & 0xFF) << 8 | rmem.get(roff + 2) & 0xFF; + roff += 3; + return value; + } + + @Override + public int getMedium(int roff) { + checkGet(roff, 3); + return rmem.get(roff) << 16 | (rmem.get(roff + 1) & 0xFF) << 8 | rmem.get(roff + 2) & 0xFF; + } + + @Override + public int readUnsignedMedium() { + checkRead(roff, 3); + int value = (rmem.get(roff) << 16 | (rmem.get(roff + 1) & 0xFF) << 8 | rmem.get(roff + 2) & 0xFF) & 0xFFFFFF; + roff += 3; + return value; + } + + @Override + public int getUnsignedMedium(int roff) { + checkGet(roff, 3); + return (rmem.get(roff) << 16 | (rmem.get(roff + 1) & 0xFF) << 8 | rmem.get(roff + 2) & 0xFF) & 0xFFFFFF; + } + + @Override + public Buffer writeMedium(int value) { + checkWrite(woff, 3); + wmem.put(woff, (byte) (value >> 16)); + wmem.put(woff + 1, (byte) (value >> 8 & 0xFF)); + wmem.put(woff + 2, (byte) (value & 0xFF)); + woff += 3; + return this; + } + + @Override + public Buffer setMedium(int woff, int value) { + checkSet(woff, 3); + wmem.put(woff, (byte) (value >> 16)); + wmem.put(woff + 1, (byte) (value >> 8 & 0xFF)); + wmem.put(woff + 2, (byte) (value & 0xFF)); + return this; + } + + @Override + public Buffer writeUnsignedMedium(int value) { + checkWrite(woff, 3); + wmem.put(woff, (byte) (value >> 16)); + wmem.put(woff + 1, (byte) (value >> 8 & 0xFF)); + wmem.put(woff + 2, (byte) (value & 0xFF)); + woff += 3; + return this; + } + + @Override + public Buffer setUnsignedMedium(int woff, int value) { + checkSet(woff, 3); + wmem.put(woff, (byte) (value >> 16)); + wmem.put(woff + 1, (byte) (value >> 8 & 0xFF)); + wmem.put(woff + 2, (byte) (value & 0xFF)); + return this; + } + + @Override + public int readInt() { + checkRead(roff, Integer.BYTES); + var value = rmem.getInt(roff); + roff += Integer.BYTES; + return value; + } + + @Override + public int getInt(int roff) { + checkGet(roff, Integer.BYTES); + return rmem.getInt(roff); + } + + @Override + public long readUnsignedInt() { + checkRead(roff, Integer.BYTES); + var value = rmem.getInt(roff) & 0xFFFFFFFFL; + roff += Integer.BYTES; + return value; + } + + @Override + public long getUnsignedInt(int roff) { + checkGet(roff, Integer.BYTES); + return rmem.getInt(roff) & 0xFFFFFFFFL; + } + + @Override + public Buffer writeInt(int value) { + try { + wmem.putInt(woff, value); + woff += Integer.BYTES; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setInt(int woff, int value) { + try { + wmem.putInt(woff, value); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, this.woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer writeUnsignedInt(long value) { + try { + wmem.putInt(woff, (int) (value & 0xFFFFFFFFL)); + woff += Integer.BYTES; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setUnsignedInt(int woff, long value) { + try { + wmem.putInt(woff, (int) (value & 0xFFFFFFFFL)); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, this.woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public float readFloat() { + checkRead(roff, Float.BYTES); + var value = rmem.getFloat(roff); + roff += Float.BYTES; + return value; + } + + @Override + public float getFloat(int roff) { + checkGet(roff, Float.BYTES); + return rmem.getFloat(roff); + } + + @Override + public Buffer writeFloat(float value) { + try { + wmem.putFloat(woff, value); + woff += Float.BYTES; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setFloat(int woff, float value) { + try { + wmem.putFloat(woff, value); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public long readLong() { + checkRead(roff, Long.BYTES); + var value = rmem.getLong(roff); + roff += Long.BYTES; + return value; + } + + @Override + public long getLong(int roff) { + checkGet(roff, Long.BYTES); + return rmem.getLong(roff); + } + + @Override + public Buffer writeLong(long value) { + try { + wmem.putLong(woff, value); + woff += Long.BYTES; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setLong(int woff, long value) { + try { + wmem.putLong(woff, value); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public double readDouble() { + checkRead(roff, Double.BYTES); + var value = rmem.getDouble(roff); + roff += Double.BYTES; + return value; + } + + @Override + public double getDouble(int roff) { + checkGet(roff, Double.BYTES); + return rmem.getDouble(roff); + } + + @Override + public Buffer writeDouble(double value) { + try { + wmem.putDouble(woff, value); + woff += Double.BYTES; + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + + @Override + public Buffer setDouble(int woff, double value) { + try { + wmem.putDouble(woff, value); + return this; + } catch (IndexOutOfBoundsException e) { + throw checkWriteState(e, woff); + } catch (ReadOnlyBufferException e) { + throw bufferIsReadOnly(this); + } + } + // + + @Override + protected Owned prepareSend() { + var roff = this.roff; + var woff = this.woff; + var readOnly = readOnly(); + var isConst = constBuffer; + ByteBuffer base = this.base; + ByteBuffer rmem = this.rmem; + makeInaccessible(); + return new Owned() { + @Override + public NioBuffer transferOwnership(Drop drop) { + NioBuffer copy = new NioBuffer(base, rmem, control, drop); + copy.roff = roff; + copy.woff = woff; + if (readOnly) { + copy.makeReadOnly(); + } + copy.constBuffer = isConst; + return copy; + } + }; + } + + void makeInaccessible() { + base = CLOSED_BUFFER; + rmem = CLOSED_BUFFER; + wmem = CLOSED_BUFFER; + roff = 0; + woff = 0; + } + + @Override + public boolean isOwned() { + return super.isOwned() && ((ArcDrop) unsafeGetDrop()).isOwned(); + } + + @Override + public int countBorrows() { + return super.countBorrows() + ((ArcDrop) unsafeGetDrop()).countBorrows(); + } + + private void checkRead(int index, int size) { + if (index < 0 || woff < index + size) { + throw readAccessCheckException(index); + } + } + + private void checkGet(int index, int size) { + if (index < 0 || capacity() < index + size) { + throw readAccessCheckException(index); + } + } + + private void checkWrite(int index, int size) { + if (index < roff || wmem.capacity() < index + size) { + throw writeAccessCheckException(index); + } + } + + private void checkSet(int index, int size) { + if (index < 0 || wmem.capacity() < index + size) { + throw writeAccessCheckException(index); + } + } + + private RuntimeException checkWriteState(IndexOutOfBoundsException ioobe, int offset) { + if (rmem == CLOSED_BUFFER) { + return bufferIsClosed(this); + } + if (wmem != rmem) { + return bufferIsReadOnly(this); + } + + IndexOutOfBoundsException exception = outOfBounds(offset); + exception.addSuppressed(ioobe); + return exception; + } + + private RuntimeException readAccessCheckException(int index) { + if (rmem == CLOSED_BUFFER) { + throw bufferIsClosed(this); + } + return outOfBounds(index); + } + + private RuntimeException writeAccessCheckException(int index) { + if (rmem == CLOSED_BUFFER) { + throw bufferIsClosed(this); + } + if (wmem != rmem) { + return bufferIsReadOnly(this); + } + return outOfBounds(index); + } + + private IndexOutOfBoundsException outOfBounds(int index) { + return new IndexOutOfBoundsException( + "Index " + index + " is out of bounds: [read 0 to " + woff + ", write 0 to " + + rmem.capacity() + "]."); + } + + ByteBuffer recoverable() { + return base; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/bytebuffer/package-info.java b/buffer/src/main/java/io/netty/buffer/api/bytebuffer/package-info.java new file mode 100644 index 00000000000..b49f8186e85 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/bytebuffer/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * Safe ByteBuffer based implementation. + */ +package io.netty.buffer.api.bytebuffer; diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/AdaptableBuffer.java b/buffer/src/main/java/io/netty/buffer/api/internal/AdaptableBuffer.java new file mode 100644 index 00000000000..1317ac40106 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/AdaptableBuffer.java @@ -0,0 +1,98 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufAllocator; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.adaptor.BufferIntegratable; +import io.netty.buffer.api.adaptor.ByteBufAdaptor; +import io.netty.buffer.api.adaptor.ByteBufAllocatorAdaptor; +import io.netty.util.IllegalReferenceCountException; +import io.netty.util.ReferenceCounted; + +public abstract class AdaptableBuffer> + extends ResourceSupport implements BufferIntegratable, Buffer { + protected AdaptableBuffer(Drop drop) { + super(drop); + } + + private volatile ByteBufAdaptor adaptor; + + public ByteBuf initialise(ByteBufAllocatorAdaptor alloc, int maxCapacity) { + return new ByteBufAdaptor(alloc, this, maxCapacity); + } + + @Override + public ByteBuf asByteBuf() { + ByteBufAdaptor bba = adaptor; + if (bba == null) { + ByteBufAllocatorAdaptor alloc = (ByteBufAllocatorAdaptor) ByteBufAllocator.DEFAULT; + return adaptor = new ByteBufAdaptor(alloc, this, Integer.MAX_VALUE); + } + return bba; + } + + @Override + public int refCnt() { + return isAccessible()? 1 + countBorrows() : 0; + } + + @Override + public ReferenceCounted retain() { + return retain(1); + } + + @Override + public ReferenceCounted retain(int increment) { + for (int i = 0; i < increment; i++) { + acquire(); + } + return this; + } + + @Override + public ReferenceCounted touch() { + return this; + } + + @Override + public ReferenceCounted touch(Object hint) { + return this; + } + + @Override + public boolean release() { + return release(1); + } + + @Override + public boolean release(int decrement) { + int refCount = 1 + countBorrows(); + if (!isAccessible() || decrement > refCount) { + throw new IllegalReferenceCountException(refCount, -decrement); + } + for (int i = 0; i < decrement; i++) { + try { + close(); + } catch (RuntimeException e) { + throw new IllegalReferenceCountException(e); + } + } + return !isAccessible(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/ArcDrop.java b/buffer/src/main/java/io/netty/buffer/api/internal/ArcDrop.java new file mode 100644 index 00000000000..6180b71202f --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/ArcDrop.java @@ -0,0 +1,115 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.api.Drop; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; + +public final class ArcDrop implements Drop { + private static final VarHandle COUNT; + static { + try { + COUNT = MethodHandles.lookup().findVarHandle(ArcDrop.class, "count", int.class); + } catch (Exception e) { + throw new ExceptionInInitializerError(e); + } + } + + private final Drop delegate; + @SuppressWarnings("FieldMayBeFinal") + private volatile int count; + + public ArcDrop(Drop delegate) { + this.delegate = delegate; + count = 1; + } + + public static Drop wrap(Drop drop) { + if (drop.getClass() == ArcDrop.class) { + return drop; + } + return new ArcDrop<>(drop); + } + + public static Drop acquire(Drop drop) { + if (drop.getClass() == ArcDrop.class) { + ((ArcDrop) drop).increment(); + return drop; + } + return new ArcDrop<>(drop); + } + + public ArcDrop increment() { + int c; + do { + c = count; + checkValidState(c); + } while (!COUNT.compareAndSet(this, c, c + 1)); + return this; + } + + @Override + public void drop(T obj) { + int c; + int n; + do { + c = count; + n = c - 1; + checkValidState(c); + } while (!COUNT.compareAndSet(this, c, n)); + if (n == 0) { + delegate.drop(obj); + } + } + + @Override + public void attach(T obj) { + delegate.attach(obj); + } + + public boolean isOwned() { + return count <= 1; + } + + public int countBorrows() { + return count - 1; + } + + public Drop unwrap() { + return delegate; + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder() + .append("ArcDrop@") + .append(Integer.toHexString(System.identityHashCode(this))) + .append('(').append(count).append(", "); + Drop drop = this; + while ((drop = ((ArcDrop) drop).unwrap()) instanceof ArcDrop) { + builder.append(((ArcDrop) drop).count).append(", "); + } + return builder.append(drop).append(')').toString(); + } + + private static void checkValidState(int count) { + if (count == 0) { + throw new IllegalStateException("Underlying resources have already been freed."); + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/CleanerDrop.java b/buffer/src/main/java/io/netty/buffer/api/internal/CleanerDrop.java new file mode 100644 index 00000000000..75eae0c70a5 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/CleanerDrop.java @@ -0,0 +1,79 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.api.Drop; + +import java.lang.ref.Cleaner; +import java.util.concurrent.atomic.AtomicReference; + +/** + * A drop implementation that delegates to another drop instance, either when called directly, or when it becomes + * cleanable. This ensures that objects are dropped even if they leak. + */ +public final class CleanerDrop implements Drop { + private Cleaner.Cleanable cleanable; + private GatedRunner runner; + + /** + * Wrap the given drop instance, and produce a new drop instance that will also call the delegate drop instance if + * it becomes cleanable. + */ + public static Drop wrap(Drop drop) { + CleanerDrop cleanerDrop = new CleanerDrop<>(); + GatedRunner runner = new GatedRunner<>(drop); + cleanerDrop.cleanable = Statics.CLEANER.register(cleanerDrop, runner); + cleanerDrop.runner = runner; + return cleanerDrop; + } + + private CleanerDrop() { + } + + @Override + public void attach(T obj) { + runner.set(obj); + runner.drop.attach(obj); + } + + @Override + public void drop(T obj) { + attach(obj); + cleanable.clean(); + } + + @Override + public String toString() { + return "CleanerDrop(" + runner.drop + ')'; + } + + private static final class GatedRunner extends AtomicReference implements Runnable { + private static final long serialVersionUID = 2685535951915798850L; + final Drop drop; + + private GatedRunner(Drop drop) { + this.drop = drop; + } + + @Override + public void run() { + T obj = getAndSet(null); // Make absolutely sure we only delegate once. + if (obj != null) { + drop.drop(obj); + } + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/LifecycleTracer.java b/buffer/src/main/java/io/netty/buffer/api/internal/LifecycleTracer.java new file mode 100644 index 00000000000..5c73d7563fe --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/LifecycleTracer.java @@ -0,0 +1,236 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.Owned; +import io.netty.buffer.api.Resource; + +import java.util.ArrayDeque; +import java.util.Set; +import java.util.function.Function; +import java.util.stream.Stream; + +/** + * Instances of this class record life cycle events of resources, to help debug life-cycle errors. + */ +public abstract class LifecycleTracer { + /** + * Get a tracer for a newly allocated resource. + * + * @return A new tracer for a resource. + */ + public static LifecycleTracer get() { + if (Trace.TRACE_LIFECYCLE_DEPTH == 0) { + return NoOpTracer.INSTANCE; + } + StackTracer stackTracer = new StackTracer(); + stackTracer.addTrace(StackTracer.WALKER.walk(new Trace("allocate", 0))); + return stackTracer; + } + + /** + * Add to the trace log that the object has been acquired, in other words the reference count has been incremented. + * + * @param acquires The new current number of acquires on the traced object. + */ + public abstract void acquire(int acquires); + + /** + * Add to the trace log that the object has been dropped. + * + * @param acquires The new current number of acquires on the traced object. + */ + public abstract void drop(int acquires); + + /** + * Add to the trace log that the object has been closed, in other words, the reference count has been decremented. + * + * @param acquires The new current number of acquires on the traced object. + */ + public abstract void close(int acquires); + + /** + * Add to the trace log that the object is being sent. + * + * @param instance The owned instance being sent. + * @param acquires The current number of acquires on this object. + * @param The resource interface for the object. + * @param The concrete type of the object. + * @return An {@link Owned} instance that may trace the reception of the object. + */ + public abstract , T extends ResourceSupport> Owned send( + Owned instance, int acquires); + + /** + * Attach a life cycle trace log to the given exception. + * + * @param throwable The exception that will receive the trace log in the form of + * {@linkplain Throwable#addSuppressed(Throwable) suppressed exceptions}. + * @param The concrete exception type. + * @return The same exception instance, that can then be thrown. + */ + public abstract E attachTrace(E throwable); + + private static final class NoOpTracer extends LifecycleTracer { + private static final NoOpTracer INSTANCE = new NoOpTracer(); + + @Override + public void acquire(int acquires) { + } + + @Override + public void drop(int acquires) { + } + + @Override + public void close(int acquires) { + } + + @Override + public , T extends ResourceSupport> Owned send(Owned instance, int acquires) { + return instance; + } + + @Override + public E attachTrace(E throwable) { + return throwable; + } + } + + private static final class StackTracer extends LifecycleTracer { + private static final int MAX_TRACE_POINTS = Math.min(Integer.getInteger( + "io.netty.buffer.api.internal.LifecycleTracer.MAX_TRACE_POINTS", 50), 1000); + private static final StackWalker WALKER; + static { + int depth = Trace.TRACE_LIFECYCLE_DEPTH; + WALKER = depth > 0 ? StackWalker.getInstance(Set.of(), depth + 2) : null; + } + + private final ArrayDeque traces = new ArrayDeque<>(); + private boolean dropped; + + @Override + public void acquire(int acquires) { + Trace trace = WALKER.walk(new Trace("acquire", acquires)); + addTrace(trace); + } + + void addTrace(Trace trace) { + synchronized (traces) { + if (traces.size() == MAX_TRACE_POINTS) { + traces.pollFirst(); + } + traces.addLast(trace); + } + } + + @Override + public void drop(int acquires) { + dropped = true; + addTrace(WALKER.walk(new Trace("drop", acquires))); + } + + @Override + public void close(int acquires) { + if (!dropped) { + addTrace(WALKER.walk(new Trace("close", acquires))); + } + } + + @Override + public , T extends ResourceSupport> Owned send(Owned instance, int acquires) { + Trace sendTrace = new Trace("send", acquires); + sendTrace.sent = true; + addTrace(WALKER.walk(sendTrace)); + return new Owned() { + @Override + public T transferOwnership(Drop drop) { + sendTrace.received = WALKER.walk(new Trace("received", acquires)); + return instance.transferOwnership(drop); + } + }; + } + + @Override + public E attachTrace(E throwable) { + synchronized (traces) { + long timestamp = System.nanoTime(); + for (Trace trace : traces) { + trace.attach(throwable, timestamp); + } + } + return throwable; + } + } + + private static final class Trace implements Function, Trace> { + private static final int TRACE_LIFECYCLE_DEPTH; + static { + int traceDefault = 0; + TRACE_LIFECYCLE_DEPTH = Math.max(Integer.getInteger( + "io.netty.buffer.api.internal.LifecycleTracer.TRACE_LIFECYCLE_DEPTH", traceDefault), 0); + } + + final String name; + final int acquires; + final long timestamp; + boolean sent; + volatile Trace received; + StackWalker.StackFrame[] frames; + + Trace(String name, int acquires) { + this.name = name; + this.acquires = acquires; + timestamp = System.nanoTime(); + } + + @Override + public Trace apply(Stream frames) { + this.frames = frames.limit(TRACE_LIFECYCLE_DEPTH + 1).toArray(StackWalker.StackFrame[]::new); + return this; + } + + public void attach(E throwable, long timestamp) { + Trace recv = received; + String message = sent && recv == null ? name + " (sent but not received)" : name; + message += " (current acquires = " + acquires + ") T" + (this.timestamp - timestamp) / 1000 + "Âĩs."; + Traceback exception = new Traceback(message); + StackTraceElement[] stackTrace = new StackTraceElement[frames.length]; + for (int i = 0; i < frames.length; i++) { + stackTrace[i] = frames[i].toStackTraceElement(); + } + exception.setStackTrace(stackTrace); + if (recv != null) { + recv.attach(exception, timestamp); + } + throwable.addSuppressed(exception); + } + } + + private static final class Traceback extends Throwable { + private static final long serialVersionUID = 941453986194634605L; + + Traceback(String message) { + super(message); + } + + @Override + public Throwable fillInStackTrace() { + return this; + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/MemoryManagerLoader.java b/buffer/src/main/java/io/netty/buffer/api/internal/MemoryManagerLoader.java new file mode 100644 index 00000000000..57099fa887d --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/MemoryManagerLoader.java @@ -0,0 +1,41 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.api.MemoryManager; + +import java.util.ServiceLoader; +import java.util.ServiceLoader.Provider; +import java.util.stream.Stream; + +public final class MemoryManagerLoader { + /** + * Cache the service loader to reduce cost of repeated calls. + * However, also place the cached loader field in a dedicated class, so the service loading is performed lazily, + * on class initialisation, when (and if) needed. + */ + private static final ServiceLoader LOADER = ServiceLoader.load(MemoryManager.class); + + private MemoryManagerLoader() { + } + + /** + * @see MemoryManager#availableManagers() + */ + public static Stream> stream() { + return LOADER.stream(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/MemoryManagerOverride.java b/buffer/src/main/java/io/netty/buffer/api/internal/MemoryManagerOverride.java new file mode 100644 index 00000000000..dff0050cf85 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/MemoryManagerOverride.java @@ -0,0 +1,75 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.api.MemoryManager; +import io.netty.buffer.api.bytebuffer.ByteBufferMemoryManager; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.util.Collections; +import java.util.IdentityHashMap; +import java.util.Map; +import java.util.Optional; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Supplier; + +public final class MemoryManagerOverride { + private static final MemoryManager DEFAULT = createDefaultMemoryManagerInstance(); + private static final AtomicInteger OVERRIDES_AVAILABLE = new AtomicInteger(); + private static final Map OVERRIDES = Collections.synchronizedMap(new IdentityHashMap<>()); + + private MemoryManagerOverride() { + } + + private static MemoryManager createDefaultMemoryManagerInstance() { + String systemProperty = "io.netty.buffer.api.MemoryManager"; + String configured = System.getProperty(systemProperty); + if (configured != null) { + Optional candidateManager = MemoryManager.lookupImplementation(configured); + InternalLogger logger = InternalLoggerFactory.getInstance(MemoryManagerOverride.class); + if (candidateManager.isPresent()) { + logger.debug("{} configured: {}", systemProperty, configured); + return candidateManager.get(); + } else { + MemoryManager fallback = new ByteBufferMemoryManager(); + logger.debug("{} requested implementation is unavailable: {} (using default {} implementation instead)", + systemProperty, configured, fallback.implementationName()); + return fallback; + } + } + return new ByteBufferMemoryManager(); + } + + public static MemoryManager configuredOrDefaultManager() { + if (OVERRIDES_AVAILABLE.get() > 0) { + return OVERRIDES.getOrDefault(Thread.currentThread(), DEFAULT); + } + return DEFAULT; + } + + public static T using(MemoryManager managers, Supplier supplier) { + Thread thread = Thread.currentThread(); + OVERRIDES.put(thread, managers); + OVERRIDES_AVAILABLE.incrementAndGet(); + try { + return supplier.get(); + } finally { + OVERRIDES_AVAILABLE.decrementAndGet(); + OVERRIDES.remove(thread); + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/ResourceSupport.java b/buffer/src/main/java/io/netty/buffer/api/internal/ResourceSupport.java new file mode 100644 index 00000000000..17eb7f09c9d --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/ResourceSupport.java @@ -0,0 +1,231 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.Owned; +import io.netty.buffer.api.Resource; +import io.netty.buffer.api.Send; + +import java.util.Objects; + +/** + * Internal support class for resources. + * + * @param The public interface for the resource. + * @param The concrete implementation of the resource. + */ +public abstract class ResourceSupport, T extends ResourceSupport> implements Resource { + private int acquires; // Closed if negative. + private Drop drop; + private final LifecycleTracer tracer; + + protected ResourceSupport(Drop drop) { + this.drop = drop; + tracer = LifecycleTracer.get(); + } + + /** + * Encapsulation bypass for calling {@link #acquire()} on the given object. + *

    + * Note: this {@code acquire} method does not check the type of the return value from acquire at compile time. + * The type is instead checked at runtime, and will cause a {@link ClassCastException} to be thrown if done + * incorrectly. + * + * @param obj The object we wish to acquire (increment reference count) on. + * @param The type of the acquired object, given by target-typing. + * @return The acquired object. + */ + @SuppressWarnings("unchecked") + static T acquire(ResourceSupport obj) { + return (T) obj.acquire(); + } + + /** + * Increment the reference count. + *

    + * Note, this method is not thread-safe because Resources are meant to thread-confined. + * + * @return This {@link Resource} instance. + */ + protected final I acquire() { + if (acquires < 0) { + throw attachTrace(createResourceClosedException()); + } + if (acquires == Integer.MAX_VALUE) { + throw new IllegalStateException("Reached maximum allowed acquires (" + Integer.MAX_VALUE + ")."); + } + acquires++; + tracer.acquire(acquires); + return self(); + } + + protected abstract RuntimeException createResourceClosedException(); + + /** + * Decrement the reference count, and dispose of the resource if the last reference is closed. + *

    + * Note, this method is not thread-safe because Resources are meant to be thread-confined. + * + * @throws IllegalStateException If this Resource has already been closed. + */ + @Override + public final void close() { + if (acquires == -1) { + throw attachTrace(new IllegalStateException("Double-free: Resource already closed and dropped.")); + } + if (acquires == 0) { + tracer.drop(acquires); + drop.drop(impl()); + } + acquires--; + tracer.close(acquires); + } + + /** + * Send this Resource instance to another Thread, transferring the ownership to the recipient. + * This method can be used when the receiving thread is not known up front. + *

    + * This instance immediately becomes inaccessible, and all attempts at accessing this resource will throw. + * Calling {@link #close()} will have no effect, so this method is safe to call within a try-with-resources + * statement. + * + * @throws IllegalStateException if this object has any outstanding acquires; that is, if this object has been + * {@link #acquire() acquired} more times than it has been {@link #close() closed}. + */ + @Override + public final Send send() { + if (acquires < 0) { + throw attachTrace(createResourceClosedException()); + } + if (!isOwned()) { + throw notSendableException(); + } + var owned = tracer.send(prepareSend(), acquires); + acquires = -2; // Close without dropping. This also ignore future double-free attempts. + return new SendFromOwned(owned, drop, getClass()); + } + + /** + * Attach a trace of the life-cycle of this object as suppressed exceptions to the given throwable. + * + * @param throwable The throwable to attach a life-cycle trace to. + * @param The concrete exception type. + * @return The given exception, which can then be thrown. + */ + protected E attachTrace(E throwable) { + return tracer.attachTrace(throwable); + } + + /** + * Create an {@link IllegalStateException} with a custom message, tailored to this particular + * {@link Resource} instance, for when the object cannot be sent for some reason. + * @return An {@link IllegalStateException} to be thrown when this object cannot be sent. + */ + protected IllegalStateException notSendableException() { + return new IllegalStateException( + "Cannot send() a reference counted object with " + countBorrows() + " borrows: " + this + '.'); + } + + /** + * Encapsulation bypass to call {@link #isOwned()} on the given object. + * + * @param obj The object to query the ownership state on. + * @return {@code true} if the given object is owned, otherwise {@code false}. + */ + static boolean isOwned(ResourceSupport obj) { + return obj.isOwned(); + } + + /** + * Query if this object is in an "owned" state, which means no other references have been + * {@linkplain #acquire() acquired} to it. + * + * This would usually be the case, since there are no public methods for acquiring references to these objects. + * + * @return {@code true} if this object is in an owned state, otherwise {@code false}. + */ + protected boolean isOwned() { + return acquires == 0; + } + + /** + * Encapsulation bypass to call {@link #countBorrows()} on the given object. + * + * @param obj The object to count borrows on. + * @return The number of borrows, or outstanding {@linkplain #acquire() acquires}, if any, of the given object. + */ + static int countBorrows(ResourceSupport obj) { + return obj.countBorrows(); + } + + /** + * Count the number of borrows of this object. + * Note that even if the number of borrows is {@code 0}, this object might not be {@linkplain #isOwned() owned} + * because there could be other restrictions involved in ownership. + * + * @return The number of borrows, if any, of this object. + */ + protected int countBorrows() { + return Math.max(acquires, 0); + } + + @Override + public boolean isAccessible() { + return acquires >= 0; + } + + /** + * Prepare this instance for ownership transfer. This method is called from {@link #send()} in the sending thread. + * This method should put this resource in a deactivated state where it is no longer accessible from the currently + * owning thread. + * In this state, the resource instance should only allow a call to {@link Owned#transferOwnership(Drop)} in the + * recipient thread. + * + * @return This resource instance in a deactivated state. + */ + protected abstract Owned prepareSend(); + + /** + * Get access to the underlying {@link Drop} object. + * This method is unsafe because it opens the possibility of bypassing and overriding resource lifetimes. + * + * @return The {@link Drop} object used by this reference counted object. + */ + protected Drop unsafeGetDrop() { + return drop; + } + + /** + * Replace the current underlying {@link Drop} object with the given one. + * This method is unsafe because it opens the possibility of bypassing and overriding resource lifetimes. + * + * @param replacement The new {@link Drop} object to use instead of the current one. + */ + protected void unsafeSetDrop(Drop replacement) { + drop = Objects.requireNonNull(replacement, "Replacement drop cannot be null."); + } + + @SuppressWarnings("unchecked") + private I self() { + return (I) this; + } + + @SuppressWarnings("unchecked") + private T impl() { + return (T) this; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/SendFromOwned.java b/buffer/src/main/java/io/netty/buffer/api/internal/SendFromOwned.java new file mode 100644 index 00000000000..f86b78eb6e1 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/SendFromOwned.java @@ -0,0 +1,70 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.Owned; +import io.netty.buffer.api.Resource; +import io.netty.buffer.api.Send; + +import java.lang.invoke.VarHandle; + +import static io.netty.buffer.api.internal.Statics.findVarHandle; +import static java.lang.invoke.MethodHandles.lookup; + +public class SendFromOwned, T extends ResourceSupport> implements Send { + private static final VarHandle RECEIVED = findVarHandle(lookup(), SendFromOwned.class, "received", boolean.class); + private final Owned outgoing; + private final Drop drop; + private final Class concreteType; + @SuppressWarnings("unused") + private volatile boolean received; // Accessed via VarHandle + + public SendFromOwned(Owned outgoing, Drop drop, Class concreteType) { + this.outgoing = outgoing; + this.drop = drop; + this.concreteType = concreteType; + } + + @SuppressWarnings("unchecked") + @Override + public I receive() { + gateReception(); + var copy = outgoing.transferOwnership(drop); + drop.attach(copy); + return (I) copy; + } + + private void gateReception() { + if ((boolean) RECEIVED.getAndSet(this, true)) { + throw new IllegalStateException("This object has already been received."); + } + } + + @Override + public boolean referentIsInstanceOf(Class cls) { + return cls.isAssignableFrom(concreteType); + } + + @Override + public void close() { + if (!(boolean) RECEIVED.getAndSet(this, true)) { + var copy = outgoing.transferOwnership(drop); + drop.attach(copy); + copy.close(); + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/SendFromSupplier.java b/buffer/src/main/java/io/netty/buffer/api/internal/SendFromSupplier.java new file mode 100644 index 00000000000..0b102f8f3c1 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/SendFromSupplier.java @@ -0,0 +1,70 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.api.Resource; +import io.netty.buffer.api.Send; + +import java.lang.invoke.VarHandle; +import java.util.Objects; +import java.util.function.Supplier; + +import static io.netty.buffer.api.internal.Statics.findVarHandle; +import static java.lang.invoke.MethodHandles.lookup; + +public class SendFromSupplier> implements Send { + private static final VarHandle GATE = findVarHandle(lookup(), SendFromSupplier.class, "gate", boolean.class); + private final Class concreteObjectType; + private final Supplier supplier; + + @SuppressWarnings("unused") // Accessed via VarHandle GATE. + private volatile boolean gate; + + public SendFromSupplier(Class concreteObjectType, Supplier supplier) { + this.concreteObjectType = Objects.requireNonNull(concreteObjectType, "Concrete type cannot be null."); + this.supplier = Objects.requireNonNull(supplier, "Supplier cannot be null."); + } + + @Override + public T receive() { + if (passGate()) { + throw new IllegalStateException("This object has already been received."); + } + return supplier.get(); + } + + @Override + public boolean referentIsInstanceOf(Class cls) { + return cls.isAssignableFrom(concreteObjectType); + } + + @Override + public void close() { + if (!passGate()) { + supplier.get().close(); + } + } + + /** + * Atomically check and pass through the gate. + * + * @return {@code true} if the gate has already been passed, + * otherwise {@code false} if we got through the gate first. + */ + private boolean passGate() { + return (boolean) GATE.getAndSet(this, true); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/Statics.java b/buffer/src/main/java/io/netty/buffer/api/internal/Statics.java new file mode 100644 index 00000000000..40429459aa9 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/Statics.java @@ -0,0 +1,209 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.internal; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferClosedException; +import io.netty.buffer.api.BufferReadOnlyException; +import io.netty.buffer.api.Drop; + +import java.lang.invoke.MethodHandle; +import java.lang.invoke.MethodHandles; +import java.lang.invoke.MethodHandles.Lookup; +import java.lang.invoke.MethodType; +import java.lang.invoke.VarHandle; +import java.lang.ref.Cleaner; +import java.nio.ByteBuffer; +import java.util.concurrent.atomic.LongAdder; + +public interface Statics { + LongAdder MEM_USAGE_NATIVE = new LongAdder(); + Cleaner CLEANER = Cleaner.create(); + Drop NO_OP_DROP = new Drop() { + @Override + public void drop(Buffer obj) { + } + + @Override + public String toString() { + return "NO_OP_DROP"; + } + }; + MethodHandle BB_SLICE_OFFSETS = getByteBufferSliceOffsetsMethodHandle(); + MethodHandle BB_PUT_OFFSETS = getByteBufferPutOffsetsMethodHandle(); + + static MethodHandle getByteBufferSliceOffsetsMethodHandle() { + try { + Lookup lookup = MethodHandles.lookup(); + MethodType type = MethodType.methodType(ByteBuffer.class, int.class, int.class); + return lookup.findVirtual(ByteBuffer.class, "slice", type); + } catch (Exception ignore) { + return null; + } + } + + @SuppressWarnings("JavaLangInvokeHandleSignature") + static MethodHandle getByteBufferPutOffsetsMethodHandle() { + try { + Lookup lookup = MethodHandles.lookup(); + MethodType type = MethodType.methodType( + ByteBuffer.class, int.class, ByteBuffer.class, int.class, int.class); + return lookup.findVirtual(ByteBuffer.class, "put", type); + } catch (Exception ignore) { + return null; + } + } + + @SuppressWarnings({"unchecked", "unused"}) + static Drop noOpDrop() { + return (Drop) NO_OP_DROP; + } + + static VarHandle findVarHandle(Lookup lookup, Class recv, String name, Class type) { + try { + return lookup.findVarHandle(recv, name, type); + } catch (Exception e) { + throw new ExceptionInInitializerError(e); + } + } + + @SuppressWarnings("unchecked") + static Drop convert(Drop drop) { + return (Drop) drop; + } + + /** + * Check the given {@code size} argument is a valid buffer size, or throw an {@link IllegalArgumentException}. + * + * @param size The size to check. + * @throws IllegalArgumentException if the size is not positive, or if the size is too big (over ~2 GB) for a + * buffer to accommodate. + */ + static void assertValidBufferSize(long size) { + if (size < 0) { + throw new IllegalArgumentException("Buffer size must not be negative, but was " + size + '.'); + } + // We use max array size because on-heap buffers will be backed by byte-arrays. + int maxArraySize = Integer.MAX_VALUE - 8; + if (size > maxArraySize) { + throw new IllegalArgumentException( + "Buffer size cannot be greater than " + maxArraySize + ", but was " + size + '.'); + } + } + + static void copyToViaReverseLoop(Buffer src, int srcPos, Buffer dest, int destPos, int length) { + if (length == 0) { + return; + } + // Iterate in reverse to account for src and dest buffer overlap. + int i = length; + while (i >= Long.BYTES) { + i -= Long.BYTES; + dest.setLong(destPos + i, src.getLong(srcPos + i)); + } + while (i > 0) { + i--; + dest.setByte(destPos + i, src.getByte(srcPos + i)); + } + } + + /** + * The ByteBuffer slice-with-offset-and-length method is only available from Java 13 and onwards, but we need to + * support Java 11. + */ + static ByteBuffer bbslice(ByteBuffer buffer, int fromOffset, int length) { + if (BB_SLICE_OFFSETS != null) { + return bbsliceJdk13(buffer, fromOffset, length); + } + return bbsliceFallback(buffer, fromOffset, length); + } + + private static ByteBuffer bbsliceJdk13(ByteBuffer buffer, int fromOffset, int length) { + try { + return (ByteBuffer) BB_SLICE_OFFSETS.invokeExact(buffer, fromOffset, length); + } catch (RuntimeException re) { + throw re; + } catch (Throwable throwable) { + throw new LinkageError("Unexpected exception from ByteBuffer.slice(int,int).", throwable); + } + } + + private static ByteBuffer bbsliceFallback(ByteBuffer buffer, int fromOffset, int length) { + if (fromOffset < 0) { + throw new IndexOutOfBoundsException("The fromOffset must be positive: " + fromOffset + '.'); + } + int newLimit = fromOffset + length; + if (newLimit > buffer.capacity()) { + throw new IndexOutOfBoundsException( + "The limit of " + newLimit + " would be greater than capacity: " + buffer.capacity() + '.'); + } + try { + return buffer.position(fromOffset).limit(newLimit).slice(); + } finally { + buffer.clear(); + } + } + + /** + * The ByteBuffer put-buffer-with-offset-and-length method is not available in Java 11. + */ + static void bbput(ByteBuffer dest, int destPos, ByteBuffer src, int srcPos, int length) { + if (BB_PUT_OFFSETS != null) { + bbputJdk16(dest, destPos, src, srcPos, length); + } else { + bbputFallback(dest, destPos, src, srcPos, length); + } + } + + private static void bbputJdk16(ByteBuffer dest, int destPos, ByteBuffer src, int srcPos, int length) { + try { + @SuppressWarnings("unused") // We need to cast the return type in order to invokeExact. + ByteBuffer ignore = (ByteBuffer) BB_PUT_OFFSETS.invokeExact(dest, destPos, src, srcPos, length); + } catch (RuntimeException re) { + throw re; + } catch (Throwable throwable) { + throw new LinkageError("Unexpected exception from ByteBuffer.put(int,ByteBuffer,int,int).", throwable); + } + } + + private static void bbputFallback(ByteBuffer dest, int destPos, ByteBuffer src, int srcPos, int length) { + dest.position(destPos).put(bbslice(src, srcPos, length)); + } + + static BufferClosedException bufferIsClosed(Buffer buffer) { + return new BufferClosedException("This buffer is closed: " + buffer); + } + + static BufferReadOnlyException bufferIsReadOnly(Buffer buffer) { + return new BufferReadOnlyException("This buffer is read-only: " + buffer); + } + + static IllegalStateException allocatorClosedException() { + return new IllegalStateException("This allocator has been closed."); + } + + static T acquire(ResourceSupport obj) { + return ResourceSupport.acquire(obj); + } + + static boolean isOwned(ResourceSupport obj) { + return ResourceSupport.isOwned(obj); + } + + static int countBorrows(ResourceSupport obj) { + return ResourceSupport.countBorrows(obj); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/internal/package-info.java b/buffer/src/main/java/io/netty/buffer/api/internal/package-info.java new file mode 100644 index 00000000000..5087850e822 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/internal/package-info.java @@ -0,0 +1,24 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * Internal implementation details that can be shared among Buffer implementations. + *

    + * + * Note: everything in this package is internal, and is not subject to backwards compatibility constraints. + * + */ +package io.netty.buffer.api.internal; diff --git a/buffer/src/main/java/io/netty/buffer/api/package-info.java b/buffer/src/main/java/io/netty/buffer/api/package-info.java new file mode 100644 index 00000000000..920b173c8b6 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * Incubating {@code Buffer} API, as a proposed alternative to {@code ByteBuf}. + */ +package io.netty.buffer.api; diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/BufferAllocatorMetric.java b/buffer/src/main/java/io/netty/buffer/api/pool/BufferAllocatorMetric.java new file mode 100644 index 00000000000..eeaa67627a5 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/BufferAllocatorMetric.java @@ -0,0 +1,25 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.BufferAllocator; + +public interface BufferAllocatorMetric { + /** + * Returns the number of bytes of heap memory used by a {@link BufferAllocator} or {@code -1} if unknown. + */ + long usedMemory(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/BufferAllocatorMetricProvider.java b/buffer/src/main/java/io/netty/buffer/api/pool/BufferAllocatorMetricProvider.java new file mode 100644 index 00000000000..1b19e732a0b --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/BufferAllocatorMetricProvider.java @@ -0,0 +1,26 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.BufferAllocator; + +public interface BufferAllocatorMetricProvider { + + /** + * Returns a {@link BufferAllocatorMetric} for a {@link BufferAllocator}. + */ + BufferAllocatorMetric metric(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PoolArena.java b/buffer/src/main/java/io/netty/buffer/api/pool/PoolArena.java new file mode 100644 index 00000000000..983d6b5d2a2 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PoolArena.java @@ -0,0 +1,467 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.AllocationType; +import io.netty.buffer.api.AllocatorControl; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.MemoryManager; +import io.netty.util.internal.StringUtil; + +import java.lang.invoke.MethodHandles; +import java.lang.invoke.VarHandle; +import java.util.ArrayList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.LongAdder; + +import static io.netty.buffer.api.pool.PoolChunk.isSubpage; +import static java.lang.Math.max; + +class PoolArena extends SizeClasses implements PoolArenaMetric, AllocatorControl { + private static final VarHandle SUBPAGE_ARRAY = MethodHandles.arrayElementVarHandle(PoolSubpage[].class); + enum SizeClass { + Small, + Normal + } + + final PooledBufferAllocator parent; + final MemoryManager manager; + final AllocationType allocationType; + + final int numSmallSubpagePools; + final int directMemoryCacheAlignment; + private final PoolSubpage[] smallSubpagePools; + + private final PoolChunkList q050; + private final PoolChunkList q025; + private final PoolChunkList q000; + private final PoolChunkList qInit; + private final PoolChunkList q075; + private final PoolChunkList q100; + + private final List chunkListMetrics; + + // Metrics for allocations and deallocations + private long allocationsNormal; + + // We need to use the LongAdder here as this is not guarded via synchronized block. + private final LongAdder allocationsSmall = new LongAdder(); + private final LongAdder allocationsHuge = new LongAdder(); + private final LongAdder activeBytesHuge = new LongAdder(); + + private long deallocationsSmall; + private long deallocationsNormal; + + // We need to use the LongAdder here as this is not guarded via synchronized block. + private final LongAdder deallocationsHuge = new LongAdder(); + + // Number of thread caches backed by this arena. + final AtomicInteger numThreadCaches = new AtomicInteger(); + + protected PoolArena(PooledBufferAllocator parent, MemoryManager manager, AllocationType allocationType, + int pageSize, int pageShifts, int chunkSize, int cacheAlignment) { + super(pageSize, pageShifts, chunkSize, cacheAlignment); + this.parent = parent; + this.manager = manager; + this.allocationType = allocationType; + directMemoryCacheAlignment = cacheAlignment; + + numSmallSubpagePools = nSubpages; + smallSubpagePools = newSubpagePoolArray(numSmallSubpagePools); + + q100 = new PoolChunkList(this, null, 100, Integer.MAX_VALUE, chunkSize); + q075 = new PoolChunkList(this, q100, 75, 100, chunkSize); + q050 = new PoolChunkList(this, q075, 50, 100, chunkSize); + q025 = new PoolChunkList(this, q050, 25, 75, chunkSize); + q000 = new PoolChunkList(this, q025, 1, 50, chunkSize); + qInit = new PoolChunkList(this, q000, Integer.MIN_VALUE, 25, chunkSize); + + q100.prevList(q075); + q075.prevList(q050); + q050.prevList(q025); + q025.prevList(q000); + q000.prevList(null); + qInit.prevList(qInit); + + chunkListMetrics = List.of(qInit, q000, q025, q050, q075, q100); + } + + private static PoolSubpage newSubpagePoolHead() { + PoolSubpage head = new PoolSubpage(); + head.prev = head; + head.next = head; + return head; + } + + private static PoolSubpage[] newSubpagePoolArray(int size) { + return new PoolSubpage[size]; + } + + UntetheredMemory allocate(PooledAllocatorControl control, PoolThreadCache cache, int size) { + final int sizeIdx = size2SizeIdx(size); + + if (sizeIdx <= smallMaxSizeIdx) { + return tcacheAllocateSmall(control, cache, size, sizeIdx); + } else if (sizeIdx < nSizes) { + return tcacheAllocateNormal(control, cache, size, sizeIdx); + } else { + int normCapacity = directMemoryCacheAlignment > 0 + ? normalizeSize(size) : size; + // Huge allocations are never served via the cache so just call allocateHuge + return allocateHuge(normCapacity); + } + } + + private UntetheredMemory tcacheAllocateSmall(PooledAllocatorControl control, PoolThreadCache cache, final int size, + final int sizeIdx) { + UntetheredMemory memory = cache.allocateSmall(control, size, sizeIdx); + if (memory != null) { + // was able to allocate out of the cache so move on + return memory; + } + + /* + * Synchronize on the head. This is needed as {@link PoolChunk#allocateSubpage(int)} and + * {@link PoolChunk#free(long)} may modify the doubly linked list as well. + */ + PoolSubpage head = findSubpagePoolHead(sizeIdx); + final boolean needsNormalAllocation; + synchronized (head) { + final PoolSubpage s = head.next; + needsNormalAllocation = s == head; + if (!needsNormalAllocation) { + assert s.doNotDestroy && s.elemSize == sizeIdx2size(sizeIdx); + long handle = s.allocate(); + assert handle >= 0; + memory = s.chunk.allocateBufferWithSubpage(handle, size, cache, control); + } + } + + if (needsNormalAllocation) { + synchronized (this) { + memory = allocateNormal(size, sizeIdx, cache, control); + } + } + + incSmallAllocation(); + return memory; + } + + private UntetheredMemory tcacheAllocateNormal( + PooledAllocatorControl control, PoolThreadCache cache, int size, int sizeIdx) { + UntetheredMemory memory = cache.allocateNormal(this, control, size, sizeIdx); + if (memory != null) { + // was able to allocate out of the cache so move on + return memory; + } + synchronized (this) { + memory = allocateNormal(size, sizeIdx, cache, control); + allocationsNormal++; + } + return memory; + } + + // Method must be called inside synchronized(this) { ... } block + private UntetheredMemory allocateNormal( + int size, int sizeIdx, PoolThreadCache threadCache, PooledAllocatorControl control) { + UntetheredMemory memory = q050.allocate(size, sizeIdx, threadCache, control); + if (memory != null) { + return memory; + } + memory = q025.allocate(size, sizeIdx, threadCache, control); + if (memory != null) { + return memory; + } + memory = q000.allocate(size, sizeIdx, threadCache, control); + if (memory != null) { + return memory; + } + memory = qInit.allocate(size, sizeIdx, threadCache, control); + if (memory != null) { + return memory; + } + memory = q075.allocate(size, sizeIdx, threadCache, control); + if (memory != null) { + return memory; + } + + // Add a new chunk. + PoolChunk c = newChunk(pageSize, nPSizes, pageShifts, chunkSize); + memory = c.allocate(size, sizeIdx, threadCache, control); + assert memory != null; + qInit.add(c); + return memory; + } + + private void incSmallAllocation() { + allocationsSmall.increment(); + } + + private UntetheredMemory allocateHuge(int size) { + activeBytesHuge.add(size); + allocationsHuge.increment(); + return new UnpooledUnthetheredMemory(parent, manager, allocationType, size); + } + + void free(PoolChunk chunk, long handle, int normCapacity, PoolThreadCache cache) { + SizeClass sizeClass = sizeClass(handle); + if (cache != null && cache.add(this, chunk, handle, normCapacity, sizeClass)) { + // cached so not free it. + return; + } + freeChunk(chunk, handle, normCapacity, sizeClass); + } + + private static SizeClass sizeClass(long handle) { + return isSubpage(handle) ? SizeClass.Small : SizeClass.Normal; + } + + void freeChunk(PoolChunk chunk, long handle, int normCapacity, SizeClass sizeClass) { + final boolean destroyChunk; + synchronized (this) { + if (sizeClass == SizeClass.Normal) { + ++deallocationsNormal; + } else if (sizeClass == SizeClass.Small) { + ++deallocationsSmall; + } else { + throw new AssertionError("Unexpected size class: " + sizeClass); + } + destroyChunk = !chunk.parent.free(chunk, handle, normCapacity); + } + if (destroyChunk) { + // destroyChunk not need to be called while holding the synchronized lock. + chunk.destroy(); + } + } + + PoolSubpage findSubpagePoolHead(int sizeIdx) { + PoolSubpage head = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(smallSubpagePools, sizeIdx); + if (head == null) { + head = newSubpagePoolHead(); + if (!SUBPAGE_ARRAY.compareAndSet(smallSubpagePools, sizeIdx, null, head)) { + // We lost the race. Read the winning value. + head = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(smallSubpagePools, sizeIdx); + } + } + return head; + } + + @Override + public UntetheredMemory allocateUntethered(Buffer originator, int size) { + throw new AssertionError("PoolChunk base buffers should never need to reallocate."); + } + + @Override + public int numThreadCaches() { + return numThreadCaches.get(); + } + + @Override + public int numSmallSubpages() { + return smallSubpagePools.length; + } + + @Override + public int numChunkLists() { + return chunkListMetrics.size(); + } + + @Override + public List smallSubpages() { + return subPageMetricList(smallSubpagePools); + } + + @Override + public List chunkLists() { + return chunkListMetrics; + } + + private static List subPageMetricList(PoolSubpage[] pages) { + List metrics = new ArrayList<>(); + for (int i = 0, len = pages.length; i < len; i++) { + PoolSubpage head = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(pages, i); + if (head == null || head.next == head) { + continue; + } + PoolSubpage s = head.next; + do { + metrics.add(s); + s = s.next; + } while (s != head); + } + return metrics; + } + + @Override + public long numAllocations() { + final long allocsNormal; + synchronized (this) { + allocsNormal = allocationsNormal; + } + + return allocationsSmall.longValue() + allocsNormal + allocationsHuge.longValue(); + } + + @Override + public long numSmallAllocations() { + return allocationsSmall.longValue(); + } + + @Override + public synchronized long numNormalAllocations() { + return allocationsNormal; + } + + @Override + public long numDeallocations() { + final long deallocs; + synchronized (this) { + deallocs = deallocationsSmall + deallocationsNormal; + } + return deallocs + deallocationsHuge.longValue(); + } + + @Override + public synchronized long numSmallDeallocations() { + return deallocationsSmall; + } + + @Override + public synchronized long numNormalDeallocations() { + return deallocationsNormal; + } + + @Override + public long numHugeAllocations() { + return allocationsHuge.longValue(); + } + + @Override + public long numHugeDeallocations() { + return deallocationsHuge.longValue(); + } + + @Override + public long numActiveAllocations() { + long val = allocationsSmall.longValue() + allocationsHuge.longValue() + - deallocationsHuge.longValue(); + synchronized (this) { + val += allocationsNormal - (deallocationsSmall + deallocationsNormal); + } + return max(val, 0); + } + + @Override + public long numActiveSmallAllocations() { + return max(numSmallAllocations() - numSmallDeallocations(), 0); + } + + @Override + public long numActiveNormalAllocations() { + final long val; + synchronized (this) { + val = allocationsNormal - deallocationsNormal; + } + return max(val, 0); + } + + @Override + public long numActiveHugeAllocations() { + return max(numHugeAllocations() - numHugeDeallocations(), 0); + } + + @Override + public long numActiveBytes() { + long val = activeBytesHuge.longValue(); + synchronized (this) { + for (int i = 0; i < chunkListMetrics.size(); i++) { + for (PoolChunkMetric m: chunkListMetrics.get(i)) { + val += m.chunkSize(); + } + } + } + return max(0, val); + } + + protected final PoolChunk newChunk(int pageSize, int maxPageIdx, int pageShifts, int chunkSize) { + return new PoolChunk(this, pageSize, pageShifts, chunkSize, maxPageIdx); + } + + @Override + public synchronized String toString() { + StringBuilder buf = new StringBuilder() + .append("Chunk(s) at 0~25%:") + .append(StringUtil.NEWLINE) + .append(qInit) + .append(StringUtil.NEWLINE) + .append("Chunk(s) at 0~50%:") + .append(StringUtil.NEWLINE) + .append(q000) + .append(StringUtil.NEWLINE) + .append("Chunk(s) at 25~75%:") + .append(StringUtil.NEWLINE) + .append(q025) + .append(StringUtil.NEWLINE) + .append("Chunk(s) at 50~100%:") + .append(StringUtil.NEWLINE) + .append(q050) + .append(StringUtil.NEWLINE) + .append("Chunk(s) at 75~100%:") + .append(StringUtil.NEWLINE) + .append(q075) + .append(StringUtil.NEWLINE) + .append("Chunk(s) at 100%:") + .append(StringUtil.NEWLINE) + .append(q100) + .append(StringUtil.NEWLINE) + .append("small subpages:"); + appendPoolSubPages(buf, smallSubpagePools); + buf.append(StringUtil.NEWLINE); + + return buf.toString(); + } + + private static void appendPoolSubPages(StringBuilder buf, PoolSubpage[] subpages) { + for (int i = 0; i < subpages.length; i ++) { + PoolSubpage head = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(subpages, i); + if (head == null || head.next == head) { + continue; + } + + buf.append(StringUtil.NEWLINE) + .append(i) + .append(": "); + PoolSubpage s = head.next; + do { + buf.append(s); + s = s.next; + } while (s != head); + } + } + + public void close() { + for (int i = 0, len = smallSubpagePools.length; i < len; i++) { + PoolSubpage page = (PoolSubpage) SUBPAGE_ARRAY.getVolatile(smallSubpagePools, i); + if (page != null) { + page.destroy(); + } + } + for (PoolChunkList list : new PoolChunkList[] {qInit, q000, q025, q050, q100}) { + list.destroy(); + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PoolArenaMetric.java b/buffer/src/main/java/io/netty/buffer/api/pool/PoolArenaMetric.java new file mode 100644 index 00000000000..754dd7d2b2c --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PoolArenaMetric.java @@ -0,0 +1,114 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import java.util.List; + +/** + * Expose metrics for an arena. + */ +public interface PoolArenaMetric extends SizeClassesMetric { + + /** + * Returns the number of thread caches backed by this arena. + */ + int numThreadCaches(); + + /** + * Returns the number of small sub-pages for the arena. + */ + int numSmallSubpages(); + + /** + * Returns the number of chunk lists for the arena. + */ + int numChunkLists(); + + /** + * Returns an unmodifiable {@link List} which holds {@link PoolSubpageMetric}s for small sub-pages. + */ + List smallSubpages(); + + /** + * Returns an unmodifiable {@link List} which holds {@link PoolChunkListMetric}s. + */ + List chunkLists(); + + /** + * Return the number of allocations done via the arena. This includes all sizes. + */ + long numAllocations(); + + /** + * Return the number of small allocations done via the arena. + */ + long numSmallAllocations(); + + /** + * Return the number of normal allocations done via the arena. + */ + long numNormalAllocations(); + + /** + * Return the number of huge allocations done via the arena. + */ + long numHugeAllocations(); + + /** + * Return the number of deallocations done via the arena. This includes all sizes. + */ + long numDeallocations(); + + /** + * Return the number of small deallocations done via the arena. + */ + long numSmallDeallocations(); + + /** + * Return the number of normal deallocations done via the arena. + */ + long numNormalDeallocations(); + + /** + * Return the number of huge deallocations done via the arena. + */ + long numHugeDeallocations(); + + /** + * Return the number of currently active allocations. + */ + long numActiveAllocations(); + + /** + * Return the number of currently active small allocations. + */ + long numActiveSmallAllocations(); + + /** + * Return the number of currently active normal allocations. + */ + long numActiveNormalAllocations(); + + /** + * Return the number of currently active huge allocations. + */ + long numActiveHugeAllocations(); + + /** + * Return the number of active bytes that are currently allocated by the arena. + */ + long numActiveBytes(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunk.java b/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunk.java new file mode 100644 index 00000000000..6285c69e118 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunk.java @@ -0,0 +1,664 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.internal.CleanerDrop; +import io.netty.buffer.api.AllocatorControl.UntetheredMemory; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.MemoryManager; +import io.netty.buffer.api.internal.ArcDrop; +import io.netty.buffer.api.internal.Statics; +import io.netty.util.internal.LongLongHashMap; +import io.netty.util.internal.LongPriorityQueue; + +import java.util.PriorityQueue; + +/** + * Description of algorithm for PageRun/PoolSubpage allocation from PoolChunk + * + * Notation: The following terms are important to understand the code + * > page - a page is the smallest unit of memory chunk that can be allocated + * > run - a run is a collection of pages + * > chunk - a chunk is a collection of runs + * > in this code chunkSize = maxPages * pageSize + * + * To begin we allocate a byte array of size = chunkSize + * Whenever a ByteBuf of given size needs to be created we search for the first position + * in the byte array that has enough empty space to accommodate the requested size and + * return a (long) handle that encodes this offset information, (this memory segment is then + * marked as reserved, so it is always used by exactly one ByteBuf and no more) + * + * For simplicity all sizes are normalized according to {@link PoolArena#size2SizeIdx(int)} method. + * This ensures that when we request for memory segments of size > pageSize the normalizedCapacity + * equals the next nearest size in {@link SizeClasses}. + * + * + * A chunk has the following layout: + * + * /-----------------\ + * | run | + * | | + * | | + * |-----------------| + * | run | + * | | + * |-----------------| + * | unalloctated | + * | (freed) | + * | | + * |-----------------| + * | subpage | + * |-----------------| + * | unallocated | + * | (freed) | + * | ... | + * | ... | + * | ... | + * | | + * | | + * | | + * \-----------------/ + * + * + * handle: + * ------- + * a handle is a long number, the bit layout of a run looks like: + * + * oooooooo ooooooos ssssssss ssssssue bbbbbbbb bbbbbbbb bbbbbbbb bbbbbbbb + * + * o: runOffset (page offset in the chunk), 15bit + * s: size (number of pages) of this run, 15bit + * u: isUsed?, 1bit + * e: isSubpage?, 1bit + * b: bitmapIdx of subpage, zero if it's not subpage, 32bit + * + * runsAvailMap: + * ------ + * a map which manages all runs (used and not in used). + * For each run, the first runOffset and last runOffset are stored in runsAvailMap. + * key: runOffset + * value: handle + * + * runsAvail: + * ---------- + * an array of {@link PriorityQueue}. + * Each queue manages same size of runs. + * Runs are sorted by offset, so that we always allocate runs with smaller offset. + * + * + * Algorithm: + * ---------- + * + * As we allocate runs, we update values stored in runsAvailMap and runsAvail so that the property is maintained. + * + * Initialization - + * In the beginning we store the initial run which is the whole chunk. + * The initial run: + * runOffset = 0 + * size = chunkSize + * isUsed = no + * isSubpage = no + * bitmapIdx = 0 + * + * + * Algorithm: [allocateRun(size)] + * ---------- + * 1) find the first avail run using in runsAvails according to size + * 2) if pages of run is larger than request pages then split it, and save the tailing run + * for later using + * + * Algorithm: [allocateSubpage(size)] + * ---------- + * 1) find a not full subpage according to size. + * if it already exists just return, otherwise allocate a new PoolSubpage and call init() + * note that this subpage object is added to subpagesPool in the PoolArena when we init() it + * 2) call subpage.allocate() + * + * Algorithm: [free(handle, length, nioBuffer)] + * ---------- + * 1) if it is a subpage, return the slab back into this subpage + * 2) if the subpage is not used, or it is a run, then start free this run + * 3) merge continuous avail runs + * 4) save the merged run + * + */ +final class PoolChunk implements PoolChunkMetric { + private static final int SIZE_BIT_LENGTH = 15; + private static final int INUSED_BIT_LENGTH = 1; + private static final int SUBPAGE_BIT_LENGTH = 1; + private static final int BITMAP_IDX_BIT_LENGTH = 32; + + static final int IS_SUBPAGE_SHIFT = BITMAP_IDX_BIT_LENGTH; + static final int IS_USED_SHIFT = SUBPAGE_BIT_LENGTH + IS_SUBPAGE_SHIFT; + static final int SIZE_SHIFT = INUSED_BIT_LENGTH + IS_USED_SHIFT; + static final int RUN_OFFSET_SHIFT = SIZE_BIT_LENGTH + SIZE_SHIFT; + + final PoolArena arena; + final Buffer base; // The buffer that is the source of the memory. Closing it will free the memory. + final Object memory; + final Drop baseDrop; // An ArcDrop that manages references to the base Buffer. + + /** + * store the first page and last page of each avail run + */ + private final LongLongHashMap runsAvailMap; + + /** + * manage all avail runs + */ + private final LongPriorityQueue[] runsAvail; + + /** + * manage all subpages in this chunk + */ + private final PoolSubpage[] subpages; + + private final int pageSize; + private final int pageShifts; + private final int chunkSize; + + int freeBytes; + + PoolChunkList parent; + PoolChunk prev; + PoolChunk next; + + PoolChunk(PoolArena arena, int pageSize, int pageShifts, int chunkSize, + int maxPageIdx) { + this.arena = arena; + MemoryManager manager = arena.manager; + base = manager.allocateShared(arena, chunkSize, manager.drop(), Statics.CLEANER, arena.allocationType); + memory = manager.unwrapRecoverableMemory(base); + baseDrop = ArcDrop.wrap(Buffer::close); + this.pageSize = pageSize; + this.pageShifts = pageShifts; + this.chunkSize = chunkSize; + freeBytes = chunkSize; + + runsAvail = newRunsAvailqueueArray(maxPageIdx); + runsAvailMap = new LongLongHashMap(-1); + subpages = new PoolSubpage[chunkSize >> pageShifts]; + + //insert initial run, offset = 0, pages = chunkSize / pageSize + int pages = chunkSize >> pageShifts; + long initHandle = (long) pages << SIZE_SHIFT; + insertAvailRun(0, pages, initHandle); + } + + private static LongPriorityQueue[] newRunsAvailqueueArray(int size) { + LongPriorityQueue[] queueArray = new LongPriorityQueue[size]; + for (int i = 0; i < queueArray.length; i++) { + queueArray[i] = new LongPriorityQueue(); + } + return queueArray; + } + + private void insertAvailRun(int runOffset, int pages, long handle) { + int pageIdxFloor = arena.pages2pageIdxFloor(pages); + LongPriorityQueue queue = runsAvail[pageIdxFloor]; + queue.offer(handle); + + //insert first page of run + insertAvailRun0(runOffset, handle); + if (pages > 1) { + //insert last page of run + insertAvailRun0(lastPage(runOffset, pages), handle); + } + } + + private void insertAvailRun0(int runOffset, long handle) { + long pre = runsAvailMap.put(runOffset, handle); + assert pre == -1; + } + + private void removeAvailRun(long handle) { + int pageIdxFloor = arena.pages2pageIdxFloor(runPages(handle)); + LongPriorityQueue queue = runsAvail[pageIdxFloor]; + removeAvailRun(queue, handle); + } + + private void removeAvailRun(LongPriorityQueue queue, long handle) { + queue.remove(handle); + + int runOffset = runOffset(handle); + int pages = runPages(handle); + //remove first page of run + runsAvailMap.remove(runOffset); + if (pages > 1) { + //remove last page of run + runsAvailMap.remove(lastPage(runOffset, pages)); + } + } + + private static int lastPage(int runOffset, int pages) { + return runOffset + pages - 1; + } + + private long getAvailRunByOffset(int runOffset) { + return runsAvailMap.get(runOffset); + } + + @Override + public int usage() { + final int freeBytes; + synchronized (arena) { + freeBytes = this.freeBytes; + } + return usage(freeBytes); + } + + private int usage(int freeBytes) { + if (freeBytes == 0) { + return 100; + } + + int freePercentage = (int) (freeBytes * 100L / chunkSize); + if (freePercentage == 0) { + return 99; + } + return 100 - freePercentage; + } + + UntetheredMemory allocate(int size, int sizeIdx, PoolThreadCache cache, PooledAllocatorControl control) { + final long handle; + if (sizeIdx <= arena.smallMaxSizeIdx) { + // small + handle = allocateSubpage(sizeIdx); + if (handle < 0) { + return null; + } + assert isSubpage(handle); + } else { + // normal + // runSize must be multiple of pageSize + int runSize = arena.sizeIdx2size(sizeIdx); + handle = allocateRun(runSize); + if (handle < 0) { + return null; + } + } + + return allocateBuffer(handle, size, cache, control); + } + + private long allocateRun(int runSize) { + int pages = runSize >> pageShifts; + int pageIdx = arena.pages2pageIdx(pages); + + synchronized (runsAvail) { + //find first queue which has at least one big enough run + int queueIdx = runFirstBestFit(pageIdx); + if (queueIdx == -1) { + return -1; + } + + //get run with min offset in this queue + LongPriorityQueue queue = runsAvail[queueIdx]; + long handle = queue.poll(); + + assert handle != LongPriorityQueue.NO_VALUE && !isUsed(handle) : "invalid handle: " + handle; + + removeAvailRun(queue, handle); + + if (handle != -1) { + handle = splitLargeRun(handle, pages); + } + + freeBytes -= runSize(pageShifts, handle); + return handle; + } + } + + private int calculateRunSize(int sizeIdx) { + int maxElements = 1 << pageShifts - SizeClasses.LOG2_QUANTUM; + int runSize = 0; + int nElements; + + final int elemSize = arena.sizeIdx2size(sizeIdx); + + // Find the lowest common multiple of pageSize and elemSize + do { + runSize += pageSize; + nElements = runSize / elemSize; + } while (nElements < maxElements && runSize != nElements * elemSize); + + while (nElements > maxElements) { + runSize -= pageSize; + nElements = runSize / elemSize; + } + + assert nElements > 0; + assert runSize <= chunkSize; + assert runSize >= elemSize; + + return runSize; + } + + private int runFirstBestFit(int pageIdx) { + if (freeBytes == chunkSize) { + return arena.nPSizes - 1; + } + for (int i = pageIdx; i < arena.nPSizes; i++) { + LongPriorityQueue queue = runsAvail[i]; + if (queue != null && !queue.isEmpty()) { + return i; + } + } + return -1; + } + + private long splitLargeRun(long handle, int needPages) { + assert needPages > 0; + + int totalPages = runPages(handle); + assert needPages <= totalPages; + + int remPages = totalPages - needPages; + + if (remPages > 0) { + int runOffset = runOffset(handle); + + // keep track of trailing unused pages for later use + int availOffset = runOffset + needPages; + long availRun = toRunHandle(availOffset, remPages, 0); + insertAvailRun(availOffset, remPages, availRun); + + // not avail + return toRunHandle(runOffset, needPages, 1); + } + + //mark it as used + handle |= 1L << IS_USED_SHIFT; + return handle; + } + + /** + * Create / initialize a new PoolSubpage of normCapacity. Any PoolSubpage created / initialized here is added to + * subpage pool in the PoolArena that owns this PoolChunk + * + * @param sizeIdx sizeIdx of normalized size + * + * @return index in memoryMap + */ + private long allocateSubpage(int sizeIdx) { + // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it. + // This is need as we may add it back and so alter the linked-list structure. + PoolSubpage head = arena.findSubpagePoolHead(sizeIdx); + synchronized (head) { + //allocate a new run + int runSize = calculateRunSize(sizeIdx); + //runSize must be multiples of pageSize + long runHandle = allocateRun(runSize); + if (runHandle < 0) { + return -1; + } + + int runOffset = runOffset(runHandle); + assert subpages[runOffset] == null; + int elemSize = arena.sizeIdx2size(sizeIdx); + + PoolSubpage subpage = new PoolSubpage(head, this, pageShifts, runOffset, + runSize(pageShifts, runHandle), elemSize); + + subpages[runOffset] = subpage; + return subpage.allocate(); + } + } + + /** + * Free a subpage, or a run of pages When a subpage is freed from PoolSubpage, it might be added back to subpage + * pool of the owning PoolArena. If the subpage pool in PoolArena has at least one other PoolSubpage of given + * elemSize, we can completely free the owning Page, so it is available for subsequent allocations. + * + * @param handle handle to free + */ + void free(long handle, int normCapacity) { + baseDrop.drop(base); // Decrement reference count. + if (isSubpage(handle)) { + int sizeIdx = arena.size2SizeIdx(normCapacity); + PoolSubpage head = arena.findSubpagePoolHead(sizeIdx); + + int sIdx = runOffset(handle); + PoolSubpage subpage = subpages[sIdx]; + assert subpage != null && subpage.doNotDestroy; + + // Obtain the head of the PoolSubPage pool that is owned by the PoolArena and synchronize on it. + // This is need as we may add it back and so alter the linked-list structure. + synchronized (head) { + if (subpage.free(head, bitmapIdx(handle))) { + //the subpage is still used, do not free it + return; + } + assert !subpage.doNotDestroy; + // Null out slot in the array as it was freed, and we should not use it anymore. + subpages[sIdx] = null; + } + } + + //start free run + int pages = runPages(handle); + + synchronized (runsAvail) { + // collapse continuous runs, successfully collapsed runs + // will be removed from runsAvail and runsAvailMap + long finalRun = collapseRuns(handle); + + //set run as not used + finalRun &= ~(1L << IS_USED_SHIFT); + //if it is a subpage, set it to run + finalRun &= ~(1L << IS_SUBPAGE_SHIFT); + + insertAvailRun(runOffset(finalRun), runPages(finalRun), finalRun); + freeBytes += pages << pageShifts; + } + } + + private long collapseRuns(long handle) { + return collapseNext(collapsePast(handle)); + } + + private long collapsePast(long handle) { + for (;;) { + int runOffset = runOffset(handle); + int runPages = runPages(handle); + + long pastRun = getAvailRunByOffset(runOffset - 1); + if (pastRun == -1) { + return handle; + } + + int pastOffset = runOffset(pastRun); + int pastPages = runPages(pastRun); + + //is continuous + if (pastRun != handle && pastOffset + pastPages == runOffset) { + //remove past run + removeAvailRun(pastRun); + handle = toRunHandle(pastOffset, pastPages + runPages, 0); + } else { + return handle; + } + } + } + + private long collapseNext(long handle) { + for (;;) { + int runOffset = runOffset(handle); + int runPages = runPages(handle); + + long nextRun = getAvailRunByOffset(runOffset + runPages); + if (nextRun == -1) { + return handle; + } + + int nextOffset = runOffset(nextRun); + int nextPages = runPages(nextRun); + + //is continuous + if (nextRun != handle && runOffset + runPages == nextOffset) { + //remove next run + removeAvailRun(nextRun); + handle = toRunHandle(runOffset, runPages + nextPages, 0); + } else { + return handle; + } + } + } + + private static long toRunHandle(int runOffset, int runPages, int inUsed) { + return (long) runOffset << RUN_OFFSET_SHIFT + | (long) runPages << SIZE_SHIFT + | (long) inUsed << IS_USED_SHIFT; + } + + UntetheredMemory allocateBuffer(long handle, int size, PoolThreadCache threadCache, + PooledAllocatorControl control) { + if (isRun(handle)) { + int offset = runOffset(handle) << pageShifts; + int maxLength = runSize(pageShifts, handle); + PoolThreadCache poolThreadCache = arena.parent.threadCache(); + initAllocatorControl(control, poolThreadCache, handle, maxLength); + ArcDrop.acquire(baseDrop); + return new UntetheredChunkAllocation( + memory, this, poolThreadCache, handle, maxLength, offset, size); + } else { + return allocateBufferWithSubpage(handle, size, threadCache, control); + } + } + + UntetheredMemory allocateBufferWithSubpage(long handle, int size, PoolThreadCache threadCache, + PooledAllocatorControl control) { + int runOffset = runOffset(handle); + int bitmapIdx = bitmapIdx(handle); + + PoolSubpage s = subpages[runOffset]; + assert s.doNotDestroy; + assert size <= s.elemSize; + + int offset = (runOffset << pageShifts) + bitmapIdx * s.elemSize; + initAllocatorControl(control, threadCache, handle, s.elemSize); + ArcDrop.acquire(baseDrop); + return new UntetheredChunkAllocation(memory, this, threadCache, handle, s.elemSize, offset, size); + } + + @SuppressWarnings("unchecked") + private static final class UntetheredChunkAllocation implements UntetheredMemory { + private final Object memory; + private final PoolChunk chunk; + private final PoolThreadCache threadCache; + private final long handle; + private final int maxLength; + private final int offset; + private final int size; + + private UntetheredChunkAllocation( + Object memory, PoolChunk chunk, PoolThreadCache threadCache, + long handle, int maxLength, int offset, int size) { + this.memory = memory; + this.chunk = chunk; + this.threadCache = threadCache; + this.handle = handle; + this.maxLength = maxLength; + this.offset = offset; + this.size = size; + } + + @Override + public Memory memory() { + return (Memory) chunk.arena.manager.sliceMemory(memory, offset, size); + } + + @Override + public Drop drop() { + PooledDrop pooledDrop = new PooledDrop(chunk.arena, chunk, threadCache, handle, maxLength); + return (Drop) CleanerDrop.wrap(pooledDrop); + } + } + + private void initAllocatorControl(PooledAllocatorControl control, PoolThreadCache threadCache, long handle, + int normSize) { + control.arena = arena; + control.chunk = this; + control.threadCache = threadCache; + control.handle = handle; + control.normSize = normSize; + } + + @Override + public int chunkSize() { + return chunkSize; + } + + @Override + public int freeBytes() { + synchronized (arena) { + return freeBytes; + } + } + + @Override + public String toString() { + final int freeBytes; + synchronized (arena) { + freeBytes = this.freeBytes; + } + + return new StringBuilder() + .append("Chunk(") + .append(Integer.toHexString(System.identityHashCode(this))) + .append(": ") + .append(usage(freeBytes)) + .append("%, ") + .append(chunkSize - freeBytes) + .append('/') + .append(chunkSize) + .append(')') + .toString(); + } + + void destroy() { + baseDrop.drop(base); // Decrement reference count from the chunk (allocated buffers may keep the base alive) + } + + static int runOffset(long handle) { + return (int) (handle >> RUN_OFFSET_SHIFT); + } + + static int runSize(int pageShifts, long handle) { + return runPages(handle) << pageShifts; + } + + static int runPages(long handle) { + return (int) (handle >> SIZE_SHIFT & 0x7fff); + } + + static boolean isUsed(long handle) { + return (handle >> IS_USED_SHIFT & 1) == 1L; + } + + static boolean isRun(long handle) { + return !isSubpage(handle); + } + + static boolean isSubpage(long handle) { + return (handle >> IS_SUBPAGE_SHIFT & 1) == 1L; + } + + static int bitmapIdx(long handle) { + return (int) handle; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunkList.java b/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunkList.java new file mode 100644 index 00000000000..03347f6481f --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunkList.java @@ -0,0 +1,250 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.AllocatorControl.UntetheredMemory; +import io.netty.util.internal.StringUtil; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.Iterator; +import java.util.List; + +import static java.lang.Math.max; +import static java.lang.Math.min; + +final class PoolChunkList implements PoolChunkListMetric { + private static final Iterator EMPTY_METRICS = Collections.emptyIterator(); + private final PoolArena arena; + private final PoolChunkList nextList; + private final int minUsage; + private final int maxUsage; + private final int maxCapacity; + private PoolChunk head; + private final int freeMinThreshold; + private final int freeMaxThreshold; + + // This is only update once when create the linked like list of PoolChunkList in PoolArena constructor. + private PoolChunkList prevList; + + PoolChunkList(PoolArena arena, PoolChunkList nextList, int minUsage, int maxUsage, int chunkSize) { + assert minUsage <= maxUsage; + this.arena = arena; + this.nextList = nextList; + this.minUsage = minUsage; + this.maxUsage = maxUsage; + maxCapacity = calculateMaxCapacity(minUsage, chunkSize); + + // the thresholds are aligned with PoolChunk.usage() logic: + // 1) basic logic: usage() = 100 - freeBytes * 100L / chunkSize + // so, for example: (usage() >= maxUsage) condition can be transformed in the following way: + // 100 - freeBytes * 100L / chunkSize >= maxUsage + // freeBytes <= chunkSize * (100 - maxUsage) / 100 + // let freeMinThreshold = chunkSize * (100 - maxUsage) / 100, then freeBytes <= freeMinThreshold + // + // 2) usage() returns an int value and has a floor rounding during a calculation, + // to be aligned absolute thresholds should be shifted for "the rounding step": + // freeBytes * 100 / chunkSize < 1 + // the condition can be converted to: freeBytes < 1 * chunkSize / 100 + // this is why we have + 0.99999999 shifts. A example why just +1 shift cannot be used: + // freeBytes = 16777216 == freeMaxThreshold: 16777216, usage = 0 < minUsage: 1, chunkSize: 16777216 + // At the same time we want to have zero thresholds in case of (maxUsage == 100) and (minUsage == 100). + // + freeMinThreshold = maxUsage == 100 ? 0 : (int) (chunkSize * (100.0 - maxUsage + 0.99999999) / 100L); + freeMaxThreshold = minUsage == 100 ? 0 : (int) (chunkSize * (100.0 - minUsage + 0.99999999) / 100L); + } + + /** + * Calculates the maximum capacity of a buffer that will ever be possible to allocate out of the {@link PoolChunk}s + * that belong to the {@link PoolChunkList} with the given {@code minUsage} and {@code maxUsage} settings. + */ + private static int calculateMaxCapacity(int minUsage, int chunkSize) { + minUsage = minUsage0(minUsage); + + if (minUsage == 100) { + // If the minUsage is 100 we can not allocate anything out of this list. + return 0; + } + + // Calculate the maximum amount of bytes that can be allocated from a PoolChunk in this PoolChunkList. + // + // As an example: + // - If a PoolChunkList has minUsage == 25 we are allowed to allocate at most 75% of the chunkSize because + // this is the maximum amount available in any PoolChunk in this PoolChunkList. + return (int) (chunkSize * (100L - minUsage) / 100L); + } + + void prevList(PoolChunkList prevList) { + assert this.prevList == null; + this.prevList = prevList; + } + + UntetheredMemory allocate(int size, int sizeIdx, PoolThreadCache threadCache, PooledAllocatorControl control) { + int normCapacity = arena.sizeIdx2size(sizeIdx); + if (normCapacity > maxCapacity) { + // Either this PoolChunkList is empty, or the requested capacity is larger than the capacity which can + // be handled by the PoolChunks that are contained in this PoolChunkList. + return null; + } + + for (PoolChunk cur = head; cur != null; cur = cur.next) { + UntetheredMemory memory = cur.allocate(size, sizeIdx, threadCache, control); + if (memory != null) { + if (cur.freeBytes <= freeMinThreshold) { + remove(cur); + nextList.add(cur); + } + return memory; + } + } + return null; + } + + boolean free(PoolChunk chunk, long handle, int normCapacity) { + chunk.free(handle, normCapacity); + if (chunk.freeBytes > freeMaxThreshold) { + remove(chunk); + // Move the PoolChunk down the PoolChunkList linked-list. + return move0(chunk); + } + return true; + } + + private boolean move(PoolChunk chunk) { + if (chunk.freeBytes > freeMaxThreshold) { + // Move the PoolChunk down the PoolChunkList linked-list. + return move0(chunk); + } + + // PoolChunk fits into this PoolChunkList, adding it here. + add0(chunk); + return true; + } + + /** + * Moves the {@link PoolChunk} down the {@link PoolChunkList} linked-list, so it will end up in the right + * {@link PoolChunkList} that has the correct minUsage / maxUsage in respect to {@link PoolChunk#usage()}. + */ + private boolean move0(PoolChunk chunk) { + if (prevList == null) { + // There is no previous PoolChunkList so return false which result in having the PoolChunk destroyed and + // all memory associated with the PoolChunk will be released. + return false; + } + return prevList.move(chunk); + } + + void add(PoolChunk chunk) { + if (chunk.freeBytes <= freeMinThreshold) { + nextList.add(chunk); + return; + } + add0(chunk); + } + + /** + * Adds the {@link PoolChunk} to this {@link PoolChunkList}. + */ + void add0(PoolChunk chunk) { + chunk.parent = this; + if (head == null) { + head = chunk; + chunk.prev = null; + chunk.next = null; + } else { + chunk.prev = null; + chunk.next = head; + head.prev = chunk; + head = chunk; + } + } + + private void remove(PoolChunk cur) { + if (cur == head) { + head = cur.next; + if (head != null) { + head.prev = null; + } + } else { + PoolChunk next = cur.next; + cur.prev.next = next; + if (next != null) { + next.prev = cur.prev; + } + } + } + + @Override + public int minUsage() { + return minUsage0(minUsage); + } + + @Override + public int maxUsage() { + return min(maxUsage, 100); + } + + private static int minUsage0(int value) { + return max(1, value); + } + + @Override + public Iterator iterator() { + synchronized (arena) { + if (head == null) { + return EMPTY_METRICS; + } + List metrics = new ArrayList<>(); + for (PoolChunk cur = head;;) { + metrics.add(cur); + cur = cur.next; + if (cur == null) { + break; + } + } + return metrics.iterator(); + } + } + + @Override + public String toString() { + StringBuilder buf = new StringBuilder(); + synchronized (arena) { + if (head == null) { + return "none"; + } + + for (PoolChunk cur = head;;) { + buf.append(cur); + cur = cur.next; + if (cur == null) { + break; + } + buf.append(StringUtil.NEWLINE); + } + } + return buf.toString(); + } + + void destroy() { + PoolChunk chunk = head; + while (chunk != null) { + chunk.destroy(); + chunk = chunk.next; + } + head = null; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunkListMetric.java b/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunkListMetric.java new file mode 100644 index 00000000000..9a60e1da52c --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunkListMetric.java @@ -0,0 +1,32 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +/** + * Metrics for a list of chunks. + */ +public interface PoolChunkListMetric extends Iterable { + + /** + * Return the minimum usage of the chunk list before which chunks are promoted to the previous list. + */ + int minUsage(); + + /** + * Return the maximum usage of the chunk list after which chunks are promoted to the next list. + */ + int maxUsage(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunkMetric.java b/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunkMetric.java new file mode 100644 index 00000000000..8a90384be34 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PoolChunkMetric.java @@ -0,0 +1,37 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +/** + * Metrics for a chunk. + */ +public interface PoolChunkMetric { + + /** + * Return the percentage of the current usage of the chunk. + */ + int usage(); + + /** + * Return the size of the chunk in bytes, this is the maximum of bytes that can be served out of the chunk. + */ + int chunkSize(); + + /** + * Return the number of free bytes in the chunk. + */ + int freeBytes(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PoolSubpage.java b/buffer/src/main/java/io/netty/buffer/api/pool/PoolSubpage.java new file mode 100644 index 00000000000..c0b558d996b --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PoolSubpage.java @@ -0,0 +1,287 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import static io.netty.buffer.api.pool.PoolChunk.RUN_OFFSET_SHIFT; +import static io.netty.buffer.api.pool.PoolChunk.SIZE_SHIFT; +import static io.netty.buffer.api.pool.PoolChunk.IS_USED_SHIFT; +import static io.netty.buffer.api.pool.PoolChunk.IS_SUBPAGE_SHIFT; +import static io.netty.buffer.api.pool.SizeClasses.LOG2_QUANTUM; + +final class PoolSubpage implements PoolSubpageMetric { + final PoolChunk chunk; + private final int pageShifts; + private final int runOffset; + private final int runSize; + private final long[] bitmap; + + PoolSubpage prev; + PoolSubpage next; + + boolean doNotDestroy; + int elemSize; + private int maxNumElems; + private int bitmapLength; + private int nextAvail; + private int numAvail; + + /** Special constructor that creates a linked list head */ + PoolSubpage() { + chunk = null; + pageShifts = -1; + runOffset = -1; + elemSize = -1; + runSize = -1; + bitmap = null; + } + + PoolSubpage(PoolSubpage head, PoolChunk chunk, int pageShifts, int runOffset, int runSize, int elemSize) { + this.chunk = chunk; + this.pageShifts = pageShifts; + this.runOffset = runOffset; + this.runSize = runSize; + this.elemSize = elemSize; + bitmap = new long[runSize >>> 6 + LOG2_QUANTUM]; // runSize / 64 / QUANTUM + + doNotDestroy = true; + if (elemSize != 0) { + maxNumElems = numAvail = runSize / elemSize; + nextAvail = 0; + bitmapLength = maxNumElems >>> 6; + if ((maxNumElems & 63) != 0) { + bitmapLength ++; + } + + for (int i = 0; i < bitmapLength; i ++) { + bitmap[i] = 0; + } + } + addToPool(head); + } + + /** + * Returns the bitmap index of the subpage allocation. + */ + long allocate() { + if (numAvail == 0 || !doNotDestroy) { + return -1; + } + + final int bitmapIdx = getNextAvail(); + int q = bitmapIdx >>> 6; + int r = bitmapIdx & 63; + assert (bitmap[q] >>> r & 1) == 0; + bitmap[q] |= 1L << r; + + if (-- numAvail == 0) { + removeFromPool(); + } + + return toHandle(bitmapIdx); + } + + /** + * @return {@code true} if this subpage is in use. + * {@code false} if this subpage is not used by its chunk and thus it's OK to be released. + */ + boolean free(PoolSubpage head, int bitmapIdx) { + if (elemSize == 0) { + return true; + } + int q = bitmapIdx >>> 6; + int r = bitmapIdx & 63; + assert (bitmap[q] >>> r & 1) != 0; + bitmap[q] ^= 1L << r; + + setNextAvail(bitmapIdx); + + if (numAvail++ == 0) { + addToPool(head); + // When maxNumElems == 1, the maximum numAvail is also 1. + // Each of these PoolSubpages will go in here when they do free operation. + // If they return true directly from here, then the rest of the code will be unreachable, + // and they will not actually be recycled. So return true only on maxNumElems > 1. + if (maxNumElems > 1) { + return true; + } + } + + if (numAvail != maxNumElems) { + return true; + } else { + // Subpage not in use (numAvail == maxNumElems) + if (prev == next) { + // Do not remove if this subpage is the only one left in the pool. + return true; + } + + // Remove this subpage from the pool if there are other subpages left in the pool. + doNotDestroy = false; + removeFromPool(); + return false; + } + } + + private void addToPool(PoolSubpage head) { + assert prev == null && next == null; + prev = head; + next = head.next; + next.prev = this; + head.next = this; + } + + private void removeFromPool() { + assert prev != null && next != null; + prev.next = next; + next.prev = prev; + next = null; + prev = null; + } + + private void setNextAvail(int bitmapIdx) { + nextAvail = bitmapIdx; + } + + private int getNextAvail() { + int nextAvail = this.nextAvail; + if (nextAvail >= 0) { + this.nextAvail = -1; + return nextAvail; + } + return findNextAvail(); + } + + private int findNextAvail() { + final long[] bitmap = this.bitmap; + final int bitmapLength = this.bitmapLength; + for (int i = 0; i < bitmapLength; i ++) { + long bits = bitmap[i]; + if (~bits != 0) { + return findNextAvail0(i, bits); + } + } + return -1; + } + + private int findNextAvail0(int i, long bits) { + final int maxNumElems = this.maxNumElems; + final int baseVal = i << 6; + + for (int j = 0; j < 64; j ++) { + if ((bits & 1) == 0) { + int val = baseVal | j; + if (val < maxNumElems) { + return val; + } else { + break; + } + } + bits >>>= 1; + } + return -1; + } + + private long toHandle(int bitmapIdx) { + int pages = runSize >> pageShifts; + return (long) runOffset << RUN_OFFSET_SHIFT + | (long) pages << SIZE_SHIFT + | 1L << IS_USED_SHIFT + | 1L << IS_SUBPAGE_SHIFT + | bitmapIdx; + } + + @Override + public String toString() { + final boolean doNotDestroy; + final int maxNumElems; + final int numAvail; + final int elemSize; + if (chunk == null) { + // This is the head so there is no need to synchronize at all as these never change. + doNotDestroy = true; + maxNumElems = 0; + numAvail = 0; + elemSize = -1; + } else { + synchronized (chunk.arena) { + if (!this.doNotDestroy) { + doNotDestroy = false; + // Not used for creating the String. + maxNumElems = numAvail = elemSize = -1; + } else { + doNotDestroy = true; + maxNumElems = this.maxNumElems; + numAvail = this.numAvail; + elemSize = this.elemSize; + } + } + } + + if (!doNotDestroy) { + return "(" + runOffset + ": not in use)"; + } + + return "(" + runOffset + ": " + (maxNumElems - numAvail) + '/' + maxNumElems + + ", offset: " + runOffset + ", length: " + runSize + ", elemSize: " + elemSize + ')'; + } + + @Override + public int maxNumElements() { + if (chunk == null) { + // It's the head. + return 0; + } + + synchronized (chunk.arena) { + return maxNumElems; + } + } + + @Override + public int numAvailable() { + if (chunk == null) { + // It's the head. + return 0; + } + + synchronized (chunk.arena) { + return numAvail; + } + } + + @Override + public int elementSize() { + if (chunk == null) { + // It's the head. + return -1; + } + + synchronized (chunk.arena) { + return elemSize; + } + } + + @Override + public int pageSize() { + return 1 << pageShifts; + } + + void destroy() { + if (chunk != null) { + chunk.destroy(); + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PoolSubpageMetric.java b/buffer/src/main/java/io/netty/buffer/api/pool/PoolSubpageMetric.java new file mode 100644 index 00000000000..5793a0aadd9 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PoolSubpageMetric.java @@ -0,0 +1,42 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +/** + * Metrics for a sub-page. + */ +public interface PoolSubpageMetric { + + /** + * Return the number of maximal elements that can be allocated out of the sub-page. + */ + int maxNumElements(); + + /** + * Return the number of available elements to be allocated. + */ + int numAvailable(); + + /** + * Return the size (in bytes) of the elements that will be allocated. + */ + int elementSize(); + + /** + * Return the page size (in bytes) of this page. + */ + int pageSize(); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PoolThreadCache.java b/buffer/src/main/java/io/netty/buffer/api/pool/PoolThreadCache.java new file mode 100644 index 00000000000..2771b7f5e87 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PoolThreadCache.java @@ -0,0 +1,393 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.AllocatorControl.UntetheredMemory; +import io.netty.buffer.api.pool.PoolArena.SizeClass; +import io.netty.util.internal.MathUtil; +import io.netty.util.internal.ObjectPool; +import io.netty.util.internal.ObjectPool.Handle; +import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.util.ArrayList; +import java.util.List; +import java.util.Queue; + +import static io.netty.buffer.api.pool.PoolArena.SizeClass.Normal; +import static io.netty.buffer.api.pool.PoolArena.SizeClass.Small; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; + +/** + * Acts a Thread cache for allocations. This implementation is modelled after + * jemalloc and the described + * techniques of + * + * Scalable memory allocation using jemalloc. + */ +final class PoolThreadCache { + + private static final InternalLogger logger = InternalLoggerFactory.getInstance(PoolThreadCache.class); + private static final int INTEGER_SIZE_MINUS_ONE = Integer.SIZE - 1; + + final PoolArena arena; + + // Hold the caches for the different size classes, which are tiny, small and normal. + private final MemoryRegionCache[] smallSubPageCaches; + private final MemoryRegionCache[] normalCaches; + + private final int freeSweepAllocationThreshold; + + private int allocations; + + PoolThreadCache(PoolArena arena, + int smallCacheSize, int normalCacheSize, int maxCachedBufferCapacity, + int freeSweepAllocationThreshold) { + checkPositiveOrZero(maxCachedBufferCapacity, "maxCachedBufferCapacity"); + this.freeSweepAllocationThreshold = freeSweepAllocationThreshold; + this.arena = arena; + if (arena != null) { + // Create the caches for the heap allocations + smallSubPageCaches = createSubPageCaches( + smallCacheSize, arena.numSmallSubpagePools); + + normalCaches = createNormalCaches( + normalCacheSize, maxCachedBufferCapacity, arena); + + arena.numThreadCaches.getAndIncrement(); + } else { + // No heapArea is configured so just null out all caches + smallSubPageCaches = null; + normalCaches = null; + } + + // Only check if there are caches in use. + if ((smallSubPageCaches != null || normalCaches != null) + && freeSweepAllocationThreshold < 1) { + throw new IllegalArgumentException("freeSweepAllocationThreshold: " + + freeSweepAllocationThreshold + " (expected: > 0)"); + } + } + + private static MemoryRegionCache[] createSubPageCaches( + int cacheSize, int numCaches) { + if (cacheSize > 0 && numCaches > 0) { + MemoryRegionCache[] cache = new MemoryRegionCache[numCaches]; + for (int i = 0; i < cache.length; i++) { + // TODO: maybe use cacheSize / cache.length + cache[i] = new SubPageMemoryRegionCache(cacheSize); + } + return cache; + } else { + return null; + } + } + + private static MemoryRegionCache[] createNormalCaches( + int cacheSize, int maxCachedBufferCapacity, PoolArena area) { + if (cacheSize > 0 && maxCachedBufferCapacity > 0) { + int max = Math.min(area.chunkSize, maxCachedBufferCapacity); + + // Create as many normal caches as we support based on how many sizeIdx we have and what the upper + // bound is that we want to cache in general. + List cache = new ArrayList<>() ; + for (int idx = area.numSmallSubpagePools; idx < area.nSizes && area.sizeIdx2size(idx) <= max ; idx++) { + cache.add(new NormalMemoryRegionCache(cacheSize)); + } + return cache.toArray(MemoryRegionCache[]::new); + } else { + return null; + } + } + + // val > 0 + static int log2(int val) { + return INTEGER_SIZE_MINUS_ONE - Integer.numberOfLeadingZeros(val); + } + + /** + * Try to allocate a small buffer out of the cache. Returns {@code true} if successful {@code false} otherwise + */ + UntetheredMemory allocateSmall(PooledAllocatorControl control, int size, int sizeIdx) { + return allocate(cacheForSmall(sizeIdx), control, size); + } + + /** + * Try to allocate a normal buffer out of the cache. Returns {@code true} if successful {@code false} otherwise + */ + UntetheredMemory allocateNormal(PoolArena area, PooledAllocatorControl control, int size, int sizeIdx) { + return allocate(cacheForNormal(area, sizeIdx), control, size); + } + + private UntetheredMemory allocate(MemoryRegionCache cache, PooledAllocatorControl control, int size) { + if (cache == null) { + // no cache found so just return false here + return null; + } + UntetheredMemory allocated = cache.allocate(size, this, control); + if (++allocations >= freeSweepAllocationThreshold) { + allocations = 0; + trim(); + } + return allocated; + } + + /** + * Add {@link PoolChunk} and {@code handle} to the cache if there is enough room. + * Returns {@code true} if it fit into the cache {@code false} otherwise. + */ + boolean add(PoolArena area, PoolChunk chunk, + long handle, int normCapacity, SizeClass sizeClass) { + int sizeIdx = area.size2SizeIdx(normCapacity); + MemoryRegionCache cache = cache(area, sizeIdx, sizeClass); + if (cache == null) { + return false; + } + return cache.add(chunk, handle, normCapacity); + } + + private MemoryRegionCache cache(PoolArena area, int sizeIdx, SizeClass sizeClass) { + if (sizeClass == Normal) { + return cacheForNormal(area, sizeIdx); + } + if (sizeClass == Small) { + return cacheForSmall(sizeIdx); + } + throw new AssertionError("Unexpected size class: " + sizeClass); + } + + /** + * Should be called if the Thread that uses this cache is about to exist to release resources out of the cache + */ + void free() { + int numFreed = free(smallSubPageCaches) + free(normalCaches); + + if (numFreed > 0 && logger.isDebugEnabled()) { + logger.debug("Freed {} thread-local buffer(s) from thread: {}", numFreed, + Thread.currentThread().getName()); + } + + if (arena != null) { + arena.numThreadCaches.getAndDecrement(); + } + } + + private static int free(MemoryRegionCache[] caches) { + if (caches == null) { + return 0; + } + + int numFreed = 0; + for (MemoryRegionCache c: caches) { + numFreed += free(c); + } + return numFreed; + } + + private static int free(MemoryRegionCache cache) { + if (cache == null) { + return 0; + } + return cache.free(); + } + + void trim() { + trim(smallSubPageCaches); + trim(normalCaches); + } + + private static void trim(MemoryRegionCache[] caches) { + if (caches == null) { + return; + } + for (MemoryRegionCache c: caches) { + trim(c); + } + } + + private static void trim(MemoryRegionCache cache) { + if (cache == null) { + return; + } + cache.trim(); + } + + private MemoryRegionCache cacheForSmall(int sizeIdx) { + return cache(smallSubPageCaches, sizeIdx); + } + + private MemoryRegionCache cacheForNormal(PoolArena area, int sizeIdx) { + // We need to substract area.numSmallSubpagePools as sizeIdx is the overall index for all sizes. + int idx = sizeIdx - area.numSmallSubpagePools; + return cache(normalCaches, idx); + } + + private static MemoryRegionCache cache(MemoryRegionCache[] cache, int sizeIdx) { + if (cache == null || sizeIdx > cache.length - 1) { + return null; + } + return cache[sizeIdx]; + } + + /** + * Cache used for buffers which are backed by SMALL size. + */ + private static final class SubPageMemoryRegionCache extends MemoryRegionCache { + SubPageMemoryRegionCache(int size) { + super(size, Small); + } + + @Override + protected UntetheredMemory allocBuf(PoolChunk chunk, long handle, int size, PoolThreadCache threadCache, + PooledAllocatorControl control) { + return chunk.allocateBufferWithSubpage(handle, size, threadCache, control); + } + } + + /** + * Cache used for buffers which are backed by NORMAL size. + */ + private static final class NormalMemoryRegionCache extends MemoryRegionCache { + NormalMemoryRegionCache(int size) { + super(size, Normal); + } + + @Override + protected UntetheredMemory allocBuf(PoolChunk chunk, long handle, int size, PoolThreadCache threadCache, + PooledAllocatorControl control) { + return chunk.allocateBuffer(handle, size, threadCache, control); + } + } + + private abstract static class MemoryRegionCache { + private final int size; + private final Queue queue; + private final SizeClass sizeClass; + private int allocations; + + MemoryRegionCache(int size, SizeClass sizeClass) { + this.size = MathUtil.safeFindNextPositivePowerOfTwo(size); + queue = PlatformDependent.newFixedMpscQueue(this.size); + this.sizeClass = sizeClass; + } + + /** + * Allocate a new {@link UntetheredMemory} using the provided chunk and handle with the capacity restrictions. + */ + protected abstract UntetheredMemory allocBuf( + PoolChunk chunk, long handle, int size, PoolThreadCache threadCache, PooledAllocatorControl control); + + /** + * Add to cache if not already full. + */ + public final boolean add(PoolChunk chunk, long handle, int normCapacity) { + Entry entry = newEntry(chunk, handle, normCapacity); + boolean queued = queue.offer(entry); + if (!queued) { + // If it was not possible to cache the chunk, immediately recycle the entry + entry.recycle(); + } + + return queued; + } + + /** + * Allocate something out of the cache if possible and remove the entry from the cache. + */ + public final UntetheredMemory allocate(int size, PoolThreadCache threadCache, PooledAllocatorControl control) { + Entry entry = queue.poll(); + if (entry == null) { + return null; + } + UntetheredMemory buffer = allocBuf(entry.chunk, entry.handle, size, threadCache, control); + entry.recycle(); + + // allocations are not thread-safe which is fine as this is only called from the same thread all time. + allocations++; + return buffer; + } + + /** + * Clear out this cache and free up all previous cached {@link PoolChunk}s and {@code handle}s. + */ + public final int free() { + return free(Integer.MAX_VALUE); + } + + private int free(int max) { + int numFreed = 0; + for (; numFreed < max; numFreed++) { + Entry entry = queue.poll(); + if (entry != null) { + freeEntry(entry); + } else { + // all cleared + return numFreed; + } + } + return numFreed; + } + + /** + * Free up cached {@link PoolChunk}s if not allocated frequently enough. + */ + public final void trim() { + int free = size - allocations; + allocations = 0; + + // We not even allocated all the number that are + if (free > 0) { + free(free); + } + } + + private void freeEntry(Entry entry) { + PoolChunk chunk = entry.chunk; + long handle = entry.handle; + + entry.recycle(); + chunk.arena.freeChunk(chunk, handle, entry.normCapacity, sizeClass); + } + + static final class Entry { + final Handle recyclerHandle; + PoolChunk chunk; + long handle = -1; + int normCapacity; + + Entry(Handle recyclerHandle) { + this.recyclerHandle = recyclerHandle; + } + + void recycle() { + chunk = null; + handle = -1; + recyclerHandle.recycle(this); + } + } + + private static Entry newEntry(PoolChunk chunk, long handle, int normCapacity) { + Entry entry = RECYCLER.get(); + entry.chunk = chunk; + entry.handle = handle; + entry.normCapacity = normCapacity; + return entry; + } + + private static final ObjectPool RECYCLER = ObjectPool.newPool(handle -> new Entry(handle)); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PooledAllocatorControl.java b/buffer/src/main/java/io/netty/buffer/api/pool/PooledAllocatorControl.java new file mode 100644 index 00000000000..394c5923060 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PooledAllocatorControl.java @@ -0,0 +1,33 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.AllocatorControl; +import io.netty.buffer.api.Buffer; + +class PooledAllocatorControl implements AllocatorControl { + PooledBufferAllocator parent; + PoolArena arena; + PoolChunk chunk; + PoolThreadCache threadCache; + long handle; + int normSize; + + @Override + public UntetheredMemory allocateUntethered(Buffer originator, int size) { + return parent.allocate(this, size); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PooledBufferAllocator.java b/buffer/src/main/java/io/netty/buffer/api/pool/PooledBufferAllocator.java new file mode 100644 index 00000000000..034821cfe1c --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PooledBufferAllocator.java @@ -0,0 +1,580 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.AllocationType; +import io.netty.buffer.api.AllocatorControl.UntetheredMemory; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.MemoryManager; +import io.netty.buffer.api.StandardAllocationTypes; +import io.netty.buffer.api.internal.Statics; +import io.netty.util.NettyRuntime; +import io.netty.util.concurrent.EventExecutor; +import io.netty.util.concurrent.FastThreadLocal; +import io.netty.util.concurrent.FastThreadLocalThread; +import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.StringUtil; +import io.netty.util.internal.SystemPropertyUtil; +import io.netty.util.internal.ThreadExecutorMap; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.TimeUnit; +import java.util.function.Supplier; + +import static io.netty.buffer.api.internal.Statics.allocatorClosedException; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import static java.util.Objects.requireNonNull; + +public class PooledBufferAllocator implements BufferAllocator, BufferAllocatorMetricProvider { + + private static final InternalLogger logger = InternalLoggerFactory.getInstance(PooledBufferAllocator.class); + private static final int DEFAULT_NUM_HEAP_ARENA; + private static final int DEFAULT_NUM_DIRECT_ARENA; + + private static final int DEFAULT_PAGE_SIZE; + private static final int DEFAULT_MAX_ORDER; // 8192 << 9 = 4 MiB per chunk + private static final int DEFAULT_SMALL_CACHE_SIZE; + private static final int DEFAULT_NORMAL_CACHE_SIZE; + static final int DEFAULT_MAX_CACHED_BUFFER_CAPACITY; + private static final int DEFAULT_CACHE_TRIM_INTERVAL; + private static final long DEFAULT_CACHE_TRIM_INTERVAL_MILLIS; + private static final boolean DEFAULT_USE_CACHE_FOR_ALL_THREADS; + private static final int DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT; + static final int DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK; + + private static final int MIN_PAGE_SIZE = 4096; + private static final int MAX_CHUNK_SIZE = (int) (((long) Integer.MAX_VALUE + 1) / 2); + + private final Runnable trimTask = this::trimCurrentThreadCache; + + static { + int defaultAlignment = SystemPropertyUtil.getInt( + "io.netty.allocator.directMemoryCacheAlignment", 0); + int defaultPageSize = SystemPropertyUtil.getInt("io.netty.allocator.pageSize", 8192); + Throwable pageSizeFallbackCause = null; + try { + validateAndCalculatePageShifts(defaultPageSize, defaultAlignment); + } catch (Throwable t) { + pageSizeFallbackCause = t; + defaultPageSize = 8192; + defaultAlignment = 0; + } + DEFAULT_PAGE_SIZE = defaultPageSize; + DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT = defaultAlignment; + + int defaultMaxOrder = SystemPropertyUtil.getInt("io.netty.allocator.maxOrder", 9); + Throwable maxOrderFallbackCause = null; + try { + validateAndCalculateChunkSize(DEFAULT_PAGE_SIZE, defaultMaxOrder); + } catch (Throwable t) { + maxOrderFallbackCause = t; + defaultMaxOrder = 11; + } + DEFAULT_MAX_ORDER = defaultMaxOrder; + + // Determine reasonable default for nHeapArena and nDirectArena. + // Assuming each arena has 3 chunks, the pool should not consume more than 50% of max memory. + final Runtime runtime = Runtime.getRuntime(); + + /* + * We use 2 * available processors by default to reduce contention as we use 2 * available processors for the + * number of EventLoops in NIO and EPOLL as well. If we choose a smaller number we will run into hot spots as + * allocation and de-allocation needs to be synchronized on the PoolArena. + * + * See https://github.com/netty/netty/issues/3888. + */ + final int defaultMinNumArena = NettyRuntime.availableProcessors() * 2; + final int defaultChunkSize = DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER; + DEFAULT_NUM_HEAP_ARENA = Math.max(0, + SystemPropertyUtil.getInt( + "io.netty.allocator.numArenas", + (int) Math.min( + defaultMinNumArena, + runtime.maxMemory() / defaultChunkSize / 2 / 3))); + DEFAULT_NUM_DIRECT_ARENA = Math.max(0, + SystemPropertyUtil.getInt( + "io.netty.allocator.numDirectArenas", + (int) Math.min( + defaultMinNumArena, + PlatformDependent.maxDirectMemory() / defaultChunkSize / 2 / 3))); + + // cache sizes + DEFAULT_SMALL_CACHE_SIZE = SystemPropertyUtil.getInt("io.netty.allocator.smallCacheSize", 256); + DEFAULT_NORMAL_CACHE_SIZE = SystemPropertyUtil.getInt("io.netty.allocator.normalCacheSize", 64); + + // 32 kb is the default maximum capacity of the cached buffer. Similar to what is explained in + // 'Scalable memory allocation using jemalloc' + DEFAULT_MAX_CACHED_BUFFER_CAPACITY = SystemPropertyUtil.getInt( + "io.netty.allocator.maxCachedBufferCapacity", 32 * 1024); + + // the number of threshold of allocations when cached entries will be freed up if not frequently used + DEFAULT_CACHE_TRIM_INTERVAL = SystemPropertyUtil.getInt( + "io.netty.allocator.cacheTrimInterval", 8192); + + DEFAULT_CACHE_TRIM_INTERVAL_MILLIS = SystemPropertyUtil.getLong( + "io.netty.allocator.cacheTrimIntervalMillis", 0); + + DEFAULT_USE_CACHE_FOR_ALL_THREADS = SystemPropertyUtil.getBoolean( + "io.netty.allocator.useCacheForAllThreads", false); + + // Use 1023 by default as we use an ArrayDeque as backing storage which will then allocate an internal array + // of 1024 elements. Otherwise, we would allocate 2048 and only use 1024 which is wasteful. + DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK = SystemPropertyUtil.getInt( + "io.netty.allocator.maxCachedByteBuffersPerChunk", 1023); + + if (logger.isDebugEnabled()) { + logger.debug("-Dio.netty.allocator.numArenas: {}", DEFAULT_NUM_HEAP_ARENA); + logger.debug("-Dio.netty.allocator.numDirectArenas: {}", DEFAULT_NUM_DIRECT_ARENA); + if (pageSizeFallbackCause == null) { + logger.debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE); + } else { + logger.debug("-Dio.netty.allocator.pageSize: {}", DEFAULT_PAGE_SIZE, pageSizeFallbackCause); + } + if (maxOrderFallbackCause == null) { + logger.debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER); + } else { + logger.debug("-Dio.netty.allocator.maxOrder: {}", DEFAULT_MAX_ORDER, maxOrderFallbackCause); + } + logger.debug("-Dio.netty.allocator.chunkSize: {}", DEFAULT_PAGE_SIZE << DEFAULT_MAX_ORDER); + logger.debug("-Dio.netty.allocator.smallCacheSize: {}", DEFAULT_SMALL_CACHE_SIZE); + logger.debug("-Dio.netty.allocator.normalCacheSize: {}", DEFAULT_NORMAL_CACHE_SIZE); + logger.debug("-Dio.netty.allocator.maxCachedBufferCapacity: {}", DEFAULT_MAX_CACHED_BUFFER_CAPACITY); + logger.debug("-Dio.netty.allocator.cacheTrimInterval: {}", DEFAULT_CACHE_TRIM_INTERVAL); + logger.debug("-Dio.netty.allocator.cacheTrimIntervalMillis: {}", DEFAULT_CACHE_TRIM_INTERVAL_MILLIS); + logger.debug("-Dio.netty.allocator.useCacheForAllThreads: {}", DEFAULT_USE_CACHE_FOR_ALL_THREADS); + logger.debug("-Dio.netty.allocator.maxCachedByteBuffersPerChunk: {}", + DEFAULT_MAX_CACHED_BYTEBUFFERS_PER_CHUNK); + } + } + + private final MemoryManager manager; + private final AllocationType allocationType; + private final PoolArena[] arenas; + private final int smallCacheSize; + private final int normalCacheSize; + private final List arenaMetrics; + private final List arenaMetricsView; + private final PoolThreadLocalCache threadCache; + private final int chunkSize; + private final PooledBufferAllocatorMetric metric; + private volatile boolean closed; + + public PooledBufferAllocator(MemoryManager manager, boolean direct) { + this(manager, direct, direct? DEFAULT_NUM_DIRECT_ARENA : DEFAULT_NUM_HEAP_ARENA, + DEFAULT_PAGE_SIZE, DEFAULT_MAX_ORDER, DEFAULT_SMALL_CACHE_SIZE, + DEFAULT_NORMAL_CACHE_SIZE, DEFAULT_USE_CACHE_FOR_ALL_THREADS, + DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT); + } + + public PooledBufferAllocator(MemoryManager manager, boolean direct, int numArenas, int pageSize, int maxOrder) { + this(manager, direct, numArenas, pageSize, maxOrder, DEFAULT_SMALL_CACHE_SIZE, + DEFAULT_NORMAL_CACHE_SIZE, DEFAULT_USE_CACHE_FOR_ALL_THREADS, + DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT); + } + + public PooledBufferAllocator(MemoryManager manager, boolean direct, int numArenas, int pageSize, int maxOrder, + int smallCacheSize, int normalCacheSize, + boolean useCacheForAllThreads) { + this(manager, direct, numArenas, pageSize, maxOrder, + smallCacheSize, normalCacheSize, + useCacheForAllThreads, DEFAULT_DIRECT_MEMORY_CACHE_ALIGNMENT); + } + + public PooledBufferAllocator(MemoryManager manager, boolean direct, int numArenas, int pageSize, int maxOrder, + int smallCacheSize, int normalCacheSize, + boolean useCacheForAllThreads, int directMemoryCacheAlignment) { + this.manager = requireNonNull(manager, "MemoryManager"); + allocationType = direct? StandardAllocationTypes.OFF_HEAP : StandardAllocationTypes.ON_HEAP; + threadCache = new PoolThreadLocalCache(useCacheForAllThreads); + this.smallCacheSize = smallCacheSize; + this.normalCacheSize = normalCacheSize; + + if (directMemoryCacheAlignment != 0) { + if (!PlatformDependent.hasAlignDirectByteBuffer()) { + throw new UnsupportedOperationException("Buffer alignment is not supported. " + + "Either Unsafe or ByteBuffer.alignSlice() must be available."); + } + + // Ensure page size is a whole multiple of the alignment, or bump it to the next whole multiple. + pageSize = (int) PlatformDependent.align(pageSize, directMemoryCacheAlignment); + } + + chunkSize = validateAndCalculateChunkSize(pageSize, maxOrder); + + checkPositiveOrZero(numArenas, "numArenas"); + + checkPositiveOrZero(directMemoryCacheAlignment, "directMemoryCacheAlignment"); + if (directMemoryCacheAlignment > 0 && !isDirectMemoryCacheAlignmentSupported()) { + throw new IllegalArgumentException("directMemoryCacheAlignment is not supported"); + } + + if ((directMemoryCacheAlignment & -directMemoryCacheAlignment) != directMemoryCacheAlignment) { + throw new IllegalArgumentException("directMemoryCacheAlignment: " + + directMemoryCacheAlignment + " (expected: power of two)"); + } + + int pageShifts = validateAndCalculatePageShifts(pageSize, directMemoryCacheAlignment); + + if (numArenas > 0) { + arenas = newArenaArray(numArenas); + List metrics = new ArrayList<>(arenas.length); + for (int i = 0; i < arenas.length; i ++) { + PoolArena arena = new PoolArena(this, manager, allocationType, + pageSize, pageShifts, chunkSize, + directMemoryCacheAlignment); + arenas[i] = arena; + metrics.add(arena); + } + arenaMetrics = metrics; + arenaMetricsView = Collections.unmodifiableList(metrics); + } else { + arenas = null; + arenaMetrics = new ArrayList<>(1); + arenaMetricsView = Collections.emptyList(); + } + + metric = new PooledBufferAllocatorMetric(this); + } + + private static PoolArena[] newArenaArray(int size) { + return new PoolArena[size]; + } + + private static int validateAndCalculatePageShifts(int pageSize, int alignment) { + if (pageSize < MIN_PAGE_SIZE) { + throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: " + MIN_PAGE_SIZE + ')'); + } + + if ((pageSize & pageSize - 1) != 0) { + throw new IllegalArgumentException("pageSize: " + pageSize + " (expected: power of 2)"); + } + + if (pageSize < alignment) { + throw new IllegalArgumentException("Alignment cannot be greater than page size. " + + "Alignment: " + alignment + ", page size: " + pageSize + '.'); + } + + // Logarithm base 2. At this point we know that pageSize is a power of two. + return Integer.SIZE - 1 - Integer.numberOfLeadingZeros(pageSize); + } + + private static int validateAndCalculateChunkSize(int pageSize, int maxOrder) { + if (maxOrder > 14) { + throw new IllegalArgumentException("maxOrder: " + maxOrder + " (expected: 0-14)"); + } + + // Ensure the resulting chunkSize does not overflow. + int chunkSize = pageSize; + for (int i = maxOrder; i > 0; i--) { + if (chunkSize > MAX_CHUNK_SIZE / 2) { + throw new IllegalArgumentException(String.format( + "pageSize (%d) << maxOrder (%d) must not exceed %d", pageSize, maxOrder, MAX_CHUNK_SIZE)); + } + chunkSize <<= 1; + } + return chunkSize; + } + + @Override + public Buffer allocate(int size) { + if (closed) { + throw allocatorClosedException(); + } + Statics.assertValidBufferSize(size); + PooledAllocatorControl control = new PooledAllocatorControl(); + control.parent = this; + UntetheredMemory memory = allocate(control, size); + Buffer buffer = manager.recoverMemory(control, memory.memory(), memory.drop()); + return buffer.fill((byte) 0); + } + + @Override + public Supplier constBufferSupplier(byte[] bytes) { + if (closed) { + throw allocatorClosedException(); + } + PooledAllocatorControl control = new PooledAllocatorControl(); + control.parent = this; + Buffer constantBuffer = manager.allocateShared( + control, bytes.length, manager.drop(), Statics.CLEANER, allocationType); + constantBuffer.writeBytes(bytes).makeReadOnly(); + return () -> manager.allocateConstChild(constantBuffer); + } + + UntetheredMemory allocate(PooledAllocatorControl control, int size) { + PoolThreadCache cache = threadCache.get(); + PoolArena arena = cache.arena; + + if (arena != null) { + return arena.allocate(control, cache, size); + } + return allocateUnpooled(size); + } + + private UntetheredMemory allocateUnpooled(int size) { + return new UnpooledUnthetheredMemory(this, manager, allocationType, size); + } + + @Override + public void close() { + closed = true; + trimCurrentThreadCache(); + threadCache.remove(); + for (int i = 0, arenasLength = arenas.length; i < arenasLength; i++) { + PoolArena arena = arenas[i]; + if (arena != null) { + arena.close(); + arenas[i] = null; + } + } + arenaMetrics.clear(); + } + + /** + * Default number of heap arenas - System Property: io.netty.allocator.numHeapArenas - default 2 * cores + */ + public static int defaultNumHeapArena() { + return DEFAULT_NUM_HEAP_ARENA; + } + + /** + * Default number of direct arenas - System Property: io.netty.allocator.numDirectArenas - default 2 * cores + */ + public static int defaultNumDirectArena() { + return DEFAULT_NUM_DIRECT_ARENA; + } + + /** + * Default buffer page size - System Property: io.netty.allocator.pageSize - default 8192 + */ + public static int defaultPageSize() { + return DEFAULT_PAGE_SIZE; + } + + /** + * Default maximum order - System Property: io.netty.allocator.maxOrder - default 11 + */ + public static int defaultMaxOrder() { + return DEFAULT_MAX_ORDER; + } + + /** + * Default thread caching behavior - System Property: io.netty.allocator.useCacheForAllThreads - default true + */ + public static boolean defaultUseCacheForAllThreads() { + return DEFAULT_USE_CACHE_FOR_ALL_THREADS; + } + + /** + * Default prefer direct - System Property: io.netty.noPreferDirect - default false + */ + public static boolean defaultPreferDirect() { + return PlatformDependent.directBufferPreferred(); + } + + /** + * Default small cache size - System Property: io.netty.allocator.smallCacheSize - default 256 + */ + public static int defaultSmallCacheSize() { + return DEFAULT_SMALL_CACHE_SIZE; + } + + /** + * Default normal cache size - System Property: io.netty.allocator.normalCacheSize - default 64 + */ + public static int defaultNormalCacheSize() { + return DEFAULT_NORMAL_CACHE_SIZE; + } + + /** + * Return {@code true} if direct memory cache alignment is supported, {@code false} otherwise. + */ + public static boolean isDirectMemoryCacheAlignmentSupported() { + return PlatformDependent.hasUnsafe(); + } + + public boolean isDirectBufferPooled() { + return allocationType == StandardAllocationTypes.OFF_HEAP; + } + + public int numArenas() { + return arenas.length; + } + + final class PoolThreadLocalCache extends FastThreadLocal { + private final boolean useCacheForAllThreads; + + PoolThreadLocalCache(boolean useCacheForAllThreads) { + this.useCacheForAllThreads = useCacheForAllThreads; + } + + @Override + protected synchronized PoolThreadCache initialValue() { + final PoolArena arena = leastUsedArena(arenas); + + final Thread current = Thread.currentThread(); + if (useCacheForAllThreads || current instanceof FastThreadLocalThread) { + final PoolThreadCache cache = new PoolThreadCache( + arena, smallCacheSize, normalCacheSize, + DEFAULT_MAX_CACHED_BUFFER_CAPACITY, DEFAULT_CACHE_TRIM_INTERVAL); + + if (DEFAULT_CACHE_TRIM_INTERVAL_MILLIS > 0) { + final EventExecutor executor = ThreadExecutorMap.currentExecutor(); + if (executor != null) { + executor.scheduleAtFixedRate(trimTask, DEFAULT_CACHE_TRIM_INTERVAL_MILLIS, + DEFAULT_CACHE_TRIM_INTERVAL_MILLIS, TimeUnit.MILLISECONDS); + } + } + return cache; + } + // No caching so just use 0 as sizes. + return new PoolThreadCache(null, 0, 0, 0, 0); + } + + @Override + protected void onRemoval(PoolThreadCache threadCache) { + threadCache.free(); + } + } + + static PoolArena leastUsedArena(PoolArena[] arenas) { + if (arenas == null || arenas.length == 0) { + return null; + } + + PoolArena minArena = arenas[0]; + for (int i = 1; i < arenas.length; i++) { + PoolArena arena = arenas[i]; + if (arena.numThreadCaches.get() < minArena.numThreadCaches.get()) { + minArena = arena; + } + } + + return minArena; + } + + @Override + public PooledBufferAllocatorMetric metric() { + return metric; + } + + /** + * Return a {@link List} of all heap {@link PoolArenaMetric}s that are provided by this pool. + */ + List arenaMetrics() { + return arenaMetricsView; + } + + /** + * Return the number of thread local caches used by this {@link PooledBufferAllocator}. + */ + int numThreadLocalCaches() { + if (arenas == null) { + return 0; + } + + int total = 0; + for (PoolArena arena : arenas) { + total += arena.numThreadCaches.get(); + } + + return total; + } + + /** + * Return the size of the small cache. + */ + int smallCacheSize() { + return smallCacheSize; + } + + /** + * Return the size of the normal cache. + */ + int normalCacheSize() { + return normalCacheSize; + } + + /** + * Return the chunk size for an arena. + */ + final int chunkSize() { + return chunkSize; + } + + final long usedMemory() { + return usedMemory(arenas); + } + + private static long usedMemory(PoolArena[] arenas) { + if (arenas == null) { + return -1; + } + long used = 0; + for (PoolArena arena : arenas) { + used += arena.numActiveBytes(); + if (used < 0) { + return Long.MAX_VALUE; + } + } + return used; + } + + final PoolThreadCache threadCache() { + PoolThreadCache cache = threadCache.get(); + assert cache != null; + return cache; + } + + /** + * Trim thread local cache for the current {@link Thread}, which will give back any cached memory that was not + * allocated frequently since the last trim operation. + * + * Returns {@code true} if a cache for the current {@link Thread} exists and so was trimmed, false otherwise. + */ + public boolean trimCurrentThreadCache() { + PoolThreadCache cache = threadCache.getIfExists(); + if (cache != null) { + cache.trim(); + return true; + } + return false; + } + + /** + * Returns the status of the allocator (which contains all metrics) as string. Be aware this may be expensive + * and so should not be called too frequently. + */ + public String dumpStats() { + int heapArenasLen = arenas == null ? 0 : arenas.length; + StringBuilder buf = new StringBuilder(512) + .append(heapArenasLen) + .append(" arena(s):") + .append(StringUtil.NEWLINE); + if (heapArenasLen > 0) { + for (PoolArena a: arenas) { + buf.append(a); + } + } + + return buf.toString(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PooledBufferAllocatorMetric.java b/buffer/src/main/java/io/netty/buffer/api/pool/PooledBufferAllocatorMetric.java new file mode 100644 index 00000000000..e4d4f0465e2 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PooledBufferAllocatorMetric.java @@ -0,0 +1,92 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.util.internal.StringUtil; + +import java.util.List; + +/** + * Exposed metric for {@link PooledBufferAllocator}. + */ +final class PooledBufferAllocatorMetric implements BufferAllocatorMetric { + + private final PooledBufferAllocator allocator; + + PooledBufferAllocatorMetric(PooledBufferAllocator allocator) { + this.allocator = allocator; + } + + /** + * Return the number of arenas. + */ + public int numArenas() { + return allocator.numArenas(); + } + + /** + * Return a {@link List} of all {@link PoolArenaMetric}s that are provided by this pool. + */ + public List arenaMetrics() { + return allocator.arenaMetrics(); + } + + /** + * Return the number of thread local caches used by this {@link PooledBufferAllocator}. + */ + public int numThreadLocalCaches() { + return allocator.numThreadLocalCaches(); + } + + /** + * Return the size of the small cache. + */ + public int smallCacheSize() { + return allocator.smallCacheSize(); + } + + /** + * Return the size of the normal cache. + */ + public int normalCacheSize() { + return allocator.normalCacheSize(); + } + + /** + * Return the chunk size for an arena. + */ + public int chunkSize() { + return allocator.chunkSize(); + } + + @Override + public long usedMemory() { + return allocator.usedMemory(); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(256); + sb.append(StringUtil.simpleClassName(this)) + .append("(usedMemory: ").append(usedMemory()) + .append("; numArenas: ").append(numArenas()) + .append("; smallCacheSize: ").append(smallCacheSize()) + .append("; normalCacheSize: ").append(normalCacheSize()) + .append("; numThreadLocalCaches: ").append(numThreadLocalCaches()) + .append("; chunkSize: ").append(chunkSize()).append(')'); + return sb.toString(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/PooledDrop.java b/buffer/src/main/java/io/netty/buffer/api/pool/PooledDrop.java new file mode 100644 index 00000000000..a3191a26d90 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/PooledDrop.java @@ -0,0 +1,40 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; + +class PooledDrop implements Drop { + private final PoolArena arena; + private final PoolChunk chunk; + private final PoolThreadCache threadCache; + private final long handle; + private final int normSize; + + PooledDrop(PoolArena arena, PoolChunk chunk, PoolThreadCache threadCache, long handle, int normSize) { + this.arena = arena; + this.chunk = chunk; + this.threadCache = threadCache; + this.handle = handle; + this.normSize = normSize; + } + + @Override + public void drop(Buffer obj) { + arena.free(chunk, handle, normSize, threadCache); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/SizeClasses.java b/buffer/src/main/java/io/netty/buffer/api/pool/SizeClasses.java new file mode 100644 index 00000000000..534cdf1a227 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/SizeClasses.java @@ -0,0 +1,478 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import java.util.concurrent.ConcurrentHashMap; + +/** + * SizeClasses requires {@code pageShifts} to be defined prior to inclusion, + * and it in turn defines: + *

    + * LOG2_SIZE_CLASS_GROUP: Log of size class count for each size doubling. + * LOG2_MAX_LOOKUP_SIZE: Log of max size class in the lookup table. + * sizeClasses: Complete table of [index, log2Group, log2Delta, nDelta, isMultiPageSize, + * isSubPage, log2DeltaLookup] tuples. + * index: Size class index. + * log2Group: Log of group base size (no deltas added). + * log2Delta: Log of delta to previous size class. + * nDelta: Delta multiplier. + * isMultiPageSize: 'yes' if a multiple of the page size, 'no' otherwise. + * isSubPage: 'yes' if a subpage size class, 'no' otherwise. + * log2DeltaLookup: Same as log2Delta if a lookup table size class, 'no' + * otherwise. + *

    + * nSubpages: Number of subpages size classes. + * nSizes: Number of size classes. + * nPSizes: Number of size classes that are multiples of pageSize. + * + * smallMaxSizeIdx: Maximum small size class index. + * + * lookupMaxclass: Maximum size class included in lookup table. + * log2NormalMinClass: Log of minimum normal size class. + *

    + * The first size class and spacing are 1 << LOG2_QUANTUM. + * Each group has 1 << LOG2_SIZE_CLASS_GROUP of size classes. + * + * size = 1 << log2Group + nDelta * (1 << log2Delta) + * + * The first size class has an unusual encoding, because the size has to be + * split between group and delta*nDelta. + * + * If pageShift = 13, sizeClasses looks like this: + * + * (index, log2Group, log2Delta, nDelta, isMultiPageSize, isSubPage, log2DeltaLookup) + *

    + * ( 0, 4, 4, 0, no, yes, 4) + * ( 1, 4, 4, 1, no, yes, 4) + * ( 2, 4, 4, 2, no, yes, 4) + * ( 3, 4, 4, 3, no, yes, 4) + *

    + * ( 4, 6, 4, 1, no, yes, 4) + * ( 5, 6, 4, 2, no, yes, 4) + * ( 6, 6, 4, 3, no, yes, 4) + * ( 7, 6, 4, 4, no, yes, 4) + *

    + * ( 8, 7, 5, 1, no, yes, 5) + * ( 9, 7, 5, 2, no, yes, 5) + * ( 10, 7, 5, 3, no, yes, 5) + * ( 11, 7, 5, 4, no, yes, 5) + * ... + * ... + * ( 72, 23, 21, 1, yes, no, no) + * ( 73, 23, 21, 2, yes, no, no) + * ( 74, 23, 21, 3, yes, no, no) + * ( 75, 23, 21, 4, yes, no, no) + *

    + * ( 76, 24, 22, 1, yes, no, no) + */ +abstract class SizeClasses implements SizeClassesMetric { + private static final ConcurrentHashMap CACHE = + new ConcurrentHashMap(); + + static final int LOG2_QUANTUM = 4; + + private static final int LOG2_SIZE_CLASS_GROUP = 2; + private static final int LOG2_MAX_LOOKUP_SIZE = 12; + + private static final int LOG2GROUP_IDX = 1; + private static final int LOG2DELTA_IDX = 2; + private static final int NDELTA_IDX = 3; + private static final int PAGESIZE_IDX = 4; + private static final int SUBPAGE_IDX = 5; + private static final int LOG2_DELTA_LOOKUP_IDX = 6; + + private static final byte no = 0, yes = 1; + + protected SizeClasses(int pageSize, int pageShifts, int chunkSize, int directMemoryCacheAlignment) { + this.pageSize = pageSize; + this.pageShifts = pageShifts; + this.chunkSize = chunkSize; + this.directMemoryCacheAlignment = directMemoryCacheAlignment; + + SizeClassValue value = CACHE.computeIfAbsent( + new SizeClassKey(pageSize, pageShifts, chunkSize, directMemoryCacheAlignment), + SizeClassValue::new); + nSizes = value.nSizes; + nSubpages = value.nSubpages; + nPSizes = value.nPSizes; + smallMaxSizeIdx = value.smallMaxSizeIdx; + lookupMaxSize = value.lookupMaxSize; + pageIdx2sizeTab = value.pageIdx2sizeTab; + sizeIdx2sizeTab = value.sizeIdx2sizeTab; + size2idxTab = value.size2idxTab; + } + + protected final int pageSize; + protected final int pageShifts; + protected final int chunkSize; + protected final int directMemoryCacheAlignment; + + final int nSizes; + final int nSubpages; + final int nPSizes; + final int smallMaxSizeIdx; + + private final int lookupMaxSize; + private final int[] pageIdx2sizeTab; + + // lookup table for sizeIdx <= smallMaxSizeIdx + private final int[] sizeIdx2sizeTab; + + // lookup table used for size <= lookupMaxclass + // spacing is 1 << LOG2_QUANTUM, so the size of array is lookupMaxclass >> LOG2_QUANTUM + private final int[] size2idxTab; + + @Override + public int sizeIdx2size(int sizeIdx) { + return sizeIdx2sizeTab[sizeIdx]; + } + + @Override + public int sizeIdx2sizeCompute(int sizeIdx) { + int group = sizeIdx >> LOG2_SIZE_CLASS_GROUP; + int mod = sizeIdx & (1 << LOG2_SIZE_CLASS_GROUP) - 1; + + int groupSize = group == 0? 0 : + 1 << LOG2_QUANTUM + LOG2_SIZE_CLASS_GROUP - 1 << group; + + int shift = group == 0? 1 : group; + int lgDelta = shift + LOG2_QUANTUM - 1; + int modSize = mod + 1 << lgDelta; + + return groupSize + modSize; + } + + @Override + public long pageIdx2size(int pageIdx) { + return pageIdx2sizeTab[pageIdx]; + } + + @Override + public long pageIdx2sizeCompute(int pageIdx) { + int group = pageIdx >> LOG2_SIZE_CLASS_GROUP; + int mod = pageIdx & (1 << LOG2_SIZE_CLASS_GROUP) - 1; + + long groupSize = group == 0? 0 : + 1L << pageShifts + LOG2_SIZE_CLASS_GROUP - 1 << group; + + int shift = group == 0? 1 : group; + int log2Delta = shift + pageShifts - 1; + int modSize = mod + 1 << log2Delta; + + return groupSize + modSize; + } + + @Override + public int size2SizeIdx(int size) { + if (size == 0) { + return 0; + } + if (size > chunkSize) { + return nSizes; + } + + if (directMemoryCacheAlignment > 0) { + size = alignSize(size); + } + + if (size <= lookupMaxSize) { + //size-1 / MIN_TINY + return size2idxTab[size - 1 >> LOG2_QUANTUM]; + } + + int x = PoolThreadCache.log2((size << 1) - 1); + int shift = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1 + ? 0 : x - (LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM); + + int group = shift << LOG2_SIZE_CLASS_GROUP; + + int log2Delta = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1 + ? LOG2_QUANTUM : x - LOG2_SIZE_CLASS_GROUP - 1; + + int deltaInverseMask = -1 << log2Delta; + int mod = (size - 1 & deltaInverseMask) >> log2Delta & + (1 << LOG2_SIZE_CLASS_GROUP) - 1; + + return group + mod; + } + + @Override + public int pages2pageIdx(int pages) { + return pages2pageIdxCompute(pages, false); + } + + @Override + public int pages2pageIdxFloor(int pages) { + return pages2pageIdxCompute(pages, true); + } + + private int pages2pageIdxCompute(int pages, boolean floor) { + int pageSize = pages << pageShifts; + if (pageSize > chunkSize) { + return nPSizes; + } + + int x = PoolThreadCache.log2((pageSize << 1) - 1); + + int shift = x < LOG2_SIZE_CLASS_GROUP + pageShifts + ? 0 : x - (LOG2_SIZE_CLASS_GROUP + pageShifts); + + int group = shift << LOG2_SIZE_CLASS_GROUP; + + int log2Delta = x < LOG2_SIZE_CLASS_GROUP + pageShifts + 1? + pageShifts : x - LOG2_SIZE_CLASS_GROUP - 1; + + int deltaInverseMask = -1 << log2Delta; + int mod = (pageSize - 1 & deltaInverseMask) >> log2Delta & + (1 << LOG2_SIZE_CLASS_GROUP) - 1; + + int pageIdx = group + mod; + + if (floor && pageIdx2sizeTab[pageIdx] > pages << pageShifts) { + pageIdx--; + } + + return pageIdx; + } + + // Round size up to the nearest multiple of alignment. + private int alignSize(int size) { + int delta = size & directMemoryCacheAlignment - 1; + return delta == 0? size : size + directMemoryCacheAlignment - delta; + } + + @Override + public int normalizeSize(int size) { + if (size == 0) { + return sizeIdx2sizeTab[0]; + } + if (directMemoryCacheAlignment > 0) { + size = alignSize(size); + } + + if (size <= lookupMaxSize) { + int ret = sizeIdx2sizeTab[size2idxTab[size - 1 >> LOG2_QUANTUM]]; + assert ret == normalizeSizeCompute(size); + return ret; + } + return normalizeSizeCompute(size); + } + + private static int normalizeSizeCompute(int size) { + int x = PoolThreadCache.log2((size << 1) - 1); + int log2Delta = x < LOG2_SIZE_CLASS_GROUP + LOG2_QUANTUM + 1 + ? LOG2_QUANTUM : x - LOG2_SIZE_CLASS_GROUP - 1; + int delta = 1 << log2Delta; + int delta_mask = delta - 1; + return size + delta_mask & ~delta_mask; + } + + private static final class SizeClassKey { + final int pageSize; + final int pageShifts; + final int chunkSize; + final int directMemoryCacheAlignment; + + private SizeClassKey(int pageSize, int pageShifts, int chunkSize, int directMemoryCacheAlignment) { + this.pageSize = pageSize; + this.pageShifts = pageShifts; + this.chunkSize = chunkSize; + this.directMemoryCacheAlignment = directMemoryCacheAlignment; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + + SizeClassKey that = (SizeClassKey) o; + + if (pageSize != that.pageSize) { + return false; + } + if (pageShifts != that.pageShifts) { + return false; + } + if (chunkSize != that.chunkSize) { + return false; + } + return directMemoryCacheAlignment == that.directMemoryCacheAlignment; + } + + @Override + public int hashCode() { + int result = pageSize; + result = 31 * result + pageShifts; + result = 31 * result + chunkSize; + result = 31 * result + directMemoryCacheAlignment; + return result; + } + } + + private static final class SizeClassValue { + final SizeClassKey key; + final int nSizes; + int nSubpages; + int nPSizes; + int smallMaxSizeIdx; + int lookupMaxSize; + final short[][] sizeClasses; + final int[] pageIdx2sizeTab; + final int[] sizeIdx2sizeTab; + final int[] size2idxTab; + + SizeClassValue(SizeClassKey key) { + this.key = key; + int group = PoolThreadCache.log2(key.chunkSize) + 1 - LOG2_QUANTUM; + + //generate size classes + //[index, log2Group, log2Delta, nDelta, isMultiPageSize, isSubPage, log2DeltaLookup] + sizeClasses = new short[group << LOG2_SIZE_CLASS_GROUP][7]; + nSizes = sizeClasses(); + + //generate lookup table + sizeIdx2sizeTab = new int[nSizes]; + pageIdx2sizeTab = new int[nPSizes]; + idx2SizeTab(sizeIdx2sizeTab, pageIdx2sizeTab); + + size2idxTab = new int[lookupMaxSize >> LOG2_QUANTUM]; + size2idxTab(size2idxTab); + } + + private int sizeClasses() { + int normalMaxSize = -1; + + int index = 0; + int size = 0; + + int log2Group = LOG2_QUANTUM; + int log2Delta = LOG2_QUANTUM; + int ndeltaLimit = 1 << LOG2_SIZE_CLASS_GROUP; + + //First small group, nDelta start at 0. + //first size class is 1 << LOG2_QUANTUM + int nDelta = 0; + while (nDelta < ndeltaLimit) { + size = sizeClass(index++, log2Group, log2Delta, nDelta++); + } + log2Group += LOG2_SIZE_CLASS_GROUP; + + //All remaining groups, nDelta start at 1. + while (size < key.chunkSize) { + nDelta = 1; + + while (nDelta <= ndeltaLimit && size < key.chunkSize) { + size = sizeClass(index++, log2Group, log2Delta, nDelta++); + normalMaxSize = size; + } + + log2Group++; + log2Delta++; + } + + //chunkSize must be normalMaxSize + assert key.chunkSize == normalMaxSize; + + //return number of size index + return index; + } + + //calculate size class + private int sizeClass(int index, int log2Group, int log2Delta, int nDelta) { + short isMultiPageSize; + if (log2Delta >= key.pageShifts) { + isMultiPageSize = yes; + } else { + int pageSize = 1 << key.pageShifts; + int size = (1 << log2Group) + (1 << log2Delta) * nDelta; + + isMultiPageSize = size == size / pageSize * pageSize? yes : no; + } + + int log2Ndelta = nDelta == 0? 0 : PoolThreadCache.log2(nDelta); + + byte remove = 1 << log2Ndelta < nDelta? yes : no; + + int log2Size = log2Delta + log2Ndelta == log2Group? log2Group + 1 : log2Group; + if (log2Size == log2Group) { + remove = yes; + } + + short isSubpage = log2Size < key.pageShifts + LOG2_SIZE_CLASS_GROUP? yes : no; + + int log2DeltaLookup = log2Size < LOG2_MAX_LOOKUP_SIZE || + log2Size == LOG2_MAX_LOOKUP_SIZE && remove == no + ? log2Delta : no; + + short[] sz = { + (short) index, (short) log2Group, (short) log2Delta, + (short) nDelta, isMultiPageSize, isSubpage, (short) log2DeltaLookup + }; + + sizeClasses[index] = sz; + int size = (1 << log2Group) + (nDelta << log2Delta); + + if (sz[PAGESIZE_IDX] == yes) { + nPSizes++; + } + if (sz[SUBPAGE_IDX] == yes) { + nSubpages++; + smallMaxSizeIdx = index; + } + if (sz[LOG2_DELTA_LOOKUP_IDX] != no) { + lookupMaxSize = size; + } + return size; + } + + private void idx2SizeTab(int[] sizeIdx2sizeTab, int[] pageIdx2sizeTab) { + int pageIdx = 0; + + for (int i = 0; i < nSizes; i++) { + short[] sizeClass = sizeClasses[i]; + int log2Group = sizeClass[LOG2GROUP_IDX]; + int log2Delta = sizeClass[LOG2DELTA_IDX]; + int nDelta = sizeClass[NDELTA_IDX]; + + int size = (1 << log2Group) + (nDelta << log2Delta); + sizeIdx2sizeTab[i] = size; + + if (sizeClass[PAGESIZE_IDX] == yes) { + pageIdx2sizeTab[pageIdx++] = size; + } + } + } + + private void size2idxTab(int[] size2idxTab) { + int idx = 0; + int size = 0; + + for (int i = 0; size <= lookupMaxSize; i++) { + int log2Delta = sizeClasses[i][LOG2DELTA_IDX]; + int times = 1 << log2Delta - LOG2_QUANTUM; + + while (size <= lookupMaxSize && times-- > 0) { + size2idxTab[idx++] = i; + size = idx + 1 << LOG2_QUANTUM; + } + } + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/SizeClassesMetric.java b/buffer/src/main/java/io/netty/buffer/api/pool/SizeClassesMetric.java new file mode 100644 index 00000000000..3f3ac3e3835 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/SizeClassesMetric.java @@ -0,0 +1,87 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +/** + * Expose metrics for an SizeClasses. + */ +public interface SizeClassesMetric { + + /** + * Computes size from lookup table according to sizeIdx. + * + * @return size + */ + int sizeIdx2size(int sizeIdx); + + /** + * Computes size according to sizeIdx. + * + * @return size + */ + int sizeIdx2sizeCompute(int sizeIdx); + + /** + * Computes size from lookup table according to pageIdx. + * + * @return size which is multiples of pageSize. + */ + long pageIdx2size(int pageIdx); + + /** + * Computes size according to pageIdx. + * + * @return size which is multiples of pageSize + */ + long pageIdx2sizeCompute(int pageIdx); + + /** + * Normalizes request size up to the nearest size class. + * + * @param size request size + * + * @return sizeIdx of the size class + */ + int size2SizeIdx(int size); + + /** + * Normalizes request size up to the nearest pageSize class. + * + * @param pages multiples of pageSizes + * + * @return pageIdx of the pageSize class + */ + int pages2pageIdx(int pages); + + /** + * Normalizes request size down to the nearest pageSize class. + * + * @param pages multiples of pageSizes + * + * @return pageIdx of the pageSize class + */ + int pages2pageIdxFloor(int pages); + + /** + * Normalizes usable size that would result from allocating an object with the + * specified size and alignment. + * + * @param size request size + * + * @return normalized size + */ + int normalizeSize(int size); +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/UnpooledUnthetheredMemory.java b/buffer/src/main/java/io/netty/buffer/api/pool/UnpooledUnthetheredMemory.java new file mode 100644 index 00000000000..f30e253ea33 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/UnpooledUnthetheredMemory.java @@ -0,0 +1,47 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.pool; + +import io.netty.buffer.api.AllocationType; +import io.netty.buffer.api.AllocatorControl; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.MemoryManager; +import io.netty.buffer.api.internal.Statics; + +@SuppressWarnings("unchecked") +class UnpooledUnthetheredMemory implements AllocatorControl.UntetheredMemory { + private final MemoryManager manager; + private final Buffer buffer; + + UnpooledUnthetheredMemory(PooledBufferAllocator allocator, MemoryManager manager, + AllocationType allocationType, int size) { + this.manager = manager; + PooledAllocatorControl allocatorControl = new PooledAllocatorControl(); + allocatorControl.parent = allocator; + buffer = manager.allocateShared(allocatorControl, size, manager.drop(), Statics.CLEANER, allocationType); + } + + @Override + public Memory memory() { + return (Memory) manager.unwrapRecoverableMemory(buffer); + } + + @Override + public Drop drop() { + return (Drop) manager.drop(); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/pool/package-info.java b/buffer/src/main/java/io/netty/buffer/api/pool/package-info.java new file mode 100644 index 00000000000..ce60c3b685b --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/pool/package-info.java @@ -0,0 +1,19 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +/** + * A pooling {@link io.netty.buffer.api.BufferAllocator} implementation based on jemalloc. + */ +package io.netty.buffer.api.pool; diff --git a/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeBuffer.java b/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeBuffer.java new file mode 100644 index 00000000000..9079a24ccb6 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeBuffer.java @@ -0,0 +1,1514 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.unsafe; + +import io.netty.buffer.api.AllocatorControl; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferReadOnlyException; +import io.netty.buffer.api.ByteCursor; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.Owned; +import io.netty.buffer.api.ReadableComponent; +import io.netty.buffer.api.ReadableComponentProcessor; +import io.netty.buffer.api.WritableComponent; +import io.netty.buffer.api.WritableComponentProcessor; +import io.netty.buffer.api.internal.AdaptableBuffer; +import io.netty.buffer.api.internal.ArcDrop; +import io.netty.buffer.api.internal.Statics; +import io.netty.util.internal.PlatformDependent; + +import java.lang.ref.Reference; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.ReadOnlyBufferException; + +import static io.netty.buffer.api.internal.Statics.bbslice; +import static io.netty.buffer.api.internal.Statics.bufferIsClosed; +import static io.netty.buffer.api.internal.Statics.bufferIsReadOnly; + +class UnsafeBuffer extends AdaptableBuffer implements ReadableComponent, WritableComponent { + private static final int CLOSED_SIZE = -1; + private static final boolean ACCESS_UNALIGNED = PlatformDependent.isUnaligned(); + private static final boolean FLIP_BYTES = ByteOrder.BIG_ENDIAN != ByteOrder.nativeOrder(); + private UnsafeMemory memory; // The memory liveness; monitored by Cleaner. + private Object base; // On-heap address reference object, or null for off-heap. + private long baseOffset; // Offset of this buffer into the memory. + private long address; // Resolved address (baseOffset + memory.address). + private int rsize; + private int wsize; + private final AllocatorControl control; + private boolean readOnly; + private int roff; + private int woff; + private boolean constBuffer; + + UnsafeBuffer(UnsafeMemory memory, long offset, int size, AllocatorControl allocatorControl, + Drop drop) { + super(new MakeInaccessibleOnDrop(ArcDrop.wrap(drop))); + this.memory = memory; + base = memory.base; + baseOffset = offset; + address = memory.address + offset; + rsize = size; + wsize = size; + control = allocatorControl; + } + + UnsafeBuffer(UnsafeBuffer parent) { + super(new MakeInaccessibleOnDrop(new ArcDrop<>(ArcDrop.acquire(parent.unsafeGetDrop())))); + control = parent.control; + memory = parent.memory; + base = parent.base; + baseOffset = parent.baseOffset; + address = parent.address; + rsize = parent.rsize; + wsize = parent.wsize; + readOnly = parent.readOnly; + roff = parent.roff; + woff = parent.woff; + constBuffer = true; + } + + @Override + public String toString() { + return "Buffer[roff:" + roff + ", woff:" + woff + ", cap:" + rsize + ']'; + } + + @Override + protected RuntimeException createResourceClosedException() { + return bufferIsClosed(this); + } + + @Override + public int capacity() { + return Math.max(0, rsize); // Use Math.max to make capacity of closed buffer equal to zero. + } + + @Override + public int readerOffset() { + return roff; + } + + @Override + public Buffer readerOffset(int offset) { + checkRead(offset, 0); + roff = offset; + return this; + } + + @Override + public int writerOffset() { + return woff; + } + + @Override + public Buffer writerOffset(int offset) { + checkWrite(offset, 0); + woff = offset; + return this; + } + + @Override + public Buffer fill(byte value) { + checkSet(0, capacity()); + if (rsize == CLOSED_SIZE) { + throw bufferIsClosed(this); + } + try { + PlatformDependent.setMemory(base, address, rsize, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public long nativeAddress() { + return base == null? address : 0; + } + + @Override + public Buffer makeReadOnly() { + readOnly = true; + wsize = CLOSED_SIZE; + return this; + } + + @Override + public boolean readOnly() { + return readOnly; + } + + @Override + public Buffer copy(int offset, int length) { + checkGet(offset, length); + int allocSize = Math.max(length, 1); // Allocators don't support allocating zero-sized buffers. + AllocatorControl.UntetheredMemory memory = control.allocateUntethered(this, allocSize); + UnsafeMemory unsafeMemory = memory.memory(); + Buffer copy = new UnsafeBuffer(unsafeMemory, 0, length, control, memory.drop()); + copyInto(offset, copy, 0, length); + copy.writerOffset(length); + return copy; + } + + @Override + public void copyInto(int srcPos, byte[] dest, int destPos, int length) { + checkCopyIntoArgs(srcPos, length, destPos, dest.length); + copyIntoArray(srcPos, dest, destPos, length); + } + + private void copyIntoArray(int srcPos, byte[] dest, int destPos, int length) { + long destOffset = PlatformDependent.byteArrayBaseOffset(); + try { + PlatformDependent.copyMemory(base, address + srcPos, dest, destOffset + destPos, length); + } finally { + Reference.reachabilityFence(memory); + Reference.reachabilityFence(dest); + } + } + + @Override + public void copyInto(int srcPos, ByteBuffer dest, int destPos, int length) { + checkCopyIntoArgs(srcPos, length, destPos, dest.capacity()); + if (dest.isReadOnly()) { + throw new ReadOnlyBufferException(); + } + if (dest.hasArray()) { + copyIntoArray(srcPos, dest.array(), dest.arrayOffset() + destPos, length); + } else { + assert dest.isDirect(); + long destAddr = PlatformDependent.directBufferAddress(dest); + try { + PlatformDependent.copyMemory(base, address + srcPos, null, destAddr + destPos, length); + } finally { + Reference.reachabilityFence(memory); + Reference.reachabilityFence(dest); + } + } + } + + private void checkCopyIntoArgs(int srcPos, int length, int destPos, int destLength) { + if (rsize == CLOSED_SIZE) { + throw bufferIsClosed(this); + } + if (srcPos < 0) { + throw new IllegalArgumentException("The srcPos cannot be negative: " + srcPos + '.'); + } + if (length < 0) { + throw new IllegalArgumentException("The length cannot be negative: " + length + '.'); + } + if (rsize < srcPos + length) { + throw new IllegalArgumentException("The srcPos + length is beyond the end of the buffer: " + + "srcPos = " + srcPos + ", length = " + length + '.'); + } + if (destPos < 0) { + throw new IllegalArgumentException("The destPos cannot be negative: " + destPos + '.'); + } + if (destLength < destPos + length) { + throw new IllegalArgumentException("The destPos + length is beyond the end of the destination: " + + "destPos = " + destPos + ", length = " + length + '.'); + } + } + + @Override + public void copyInto(int srcPos, Buffer dest, int destPos, int length) { + if (!dest.isAccessible()) { + throw bufferIsClosed(dest); + } + checkCopyIntoArgs(srcPos, length, destPos, dest.capacity()); + if (dest.readOnly()) { + throw bufferIsReadOnly(this); + } + long nativeAddress = dest.nativeAddress(); + try { + if (nativeAddress != 0) { + PlatformDependent.copyMemory(base, address + srcPos, null, nativeAddress + destPos, length); + } else if (dest instanceof UnsafeBuffer) { + UnsafeBuffer destUnsafe = (UnsafeBuffer) dest; + PlatformDependent.copyMemory( + base, address + srcPos, destUnsafe.base, destUnsafe.address + destPos, length); + } else { + Statics.copyToViaReverseLoop(this, srcPos, dest, destPos, length); + } + } finally { + Reference.reachabilityFence(memory); + Reference.reachabilityFence(dest); + } + } + + @Override + public ByteCursor openCursor() { + return openCursor(readerOffset(), readableBytes()); + } + + @Override + public ByteCursor openCursor(int fromOffset, int length) { + if (rsize == CLOSED_SIZE) { + throw bufferIsClosed(this); + } + if (fromOffset < 0) { + throw new IllegalArgumentException("The fromOffset cannot be negative: " + fromOffset + '.'); + } + if (length < 0) { + throw new IllegalArgumentException("The length cannot be negative: " + length + '.'); + } + if (capacity() < fromOffset + length) { + throw new IllegalArgumentException("The fromOffset + length is beyond the end of the buffer: " + + "fromOffset = " + fromOffset + ", length = " + length + '.'); + } + return new ByteCursor() { + final UnsafeMemory memory = UnsafeBuffer.this.memory; // Keep memory alive. + final Object baseObj = base; + final long baseAddress = address; + int index = fromOffset; + final int end = index + length; + byte byteValue = -1; + + @Override + public boolean readByte() { + if (index < end) { + try { + byteValue = PlatformDependent.getByte(baseObj, baseAddress + index); + } finally { + Reference.reachabilityFence(memory); + } + index++; + return true; + } + return false; + } + + @Override + public byte getByte() { + return byteValue; + } + + @Override + public int currentOffset() { + return index; + } + + @Override + public int bytesLeft() { + return end - index; + } + }; + } + + @Override + public ByteCursor openReverseCursor(int fromOffset, int length) { + if (rsize == CLOSED_SIZE) { + throw bufferIsClosed(this); + } + if (fromOffset < 0) { + throw new IllegalArgumentException("The fromOffset cannot be negative: " + fromOffset + '.'); + } + if (length < 0) { + throw new IllegalArgumentException("The length cannot be negative: " + length + '.'); + } + if (capacity() <= fromOffset) { + throw new IllegalArgumentException("The fromOffset is beyond the end of the buffer: " + fromOffset + '.'); + } + if (fromOffset - length < -1) { + throw new IllegalArgumentException("The fromOffset - length would underflow the buffer: " + + "fromOffset = " + fromOffset + ", length = " + length + '.'); + } + return new ByteCursor() { + final UnsafeMemory memory = UnsafeBuffer.this.memory; // Keep memory alive. + final Object baseObj = base; + final long baseAddress = address; + int index = fromOffset; + final int end = index - length; + byte byteValue = -1; + + @Override + public boolean readByte() { + if (index > end) { + try { + byteValue = PlatformDependent.getByte(baseObj, baseAddress + index); + } finally { + Reference.reachabilityFence(memory); + } + index--; + return true; + } + return false; + } + + @Override + public byte getByte() { + return byteValue; + } + + @Override + public int currentOffset() { + return index; + } + + @Override + public int bytesLeft() { + return index - end; + } + }; + } + + @Override + public Buffer ensureWritable(int size, int minimumGrowth, boolean allowCompaction) { + if (!isAccessible()) { + throw bufferIsClosed(this); + } + if (!isOwned()) { + throw attachTrace(new IllegalStateException( + "Buffer is not owned. Only owned buffers can call ensureWritable.")); + } + if (size < 0) { + throw new IllegalArgumentException("Cannot ensure writable for a negative size: " + size + '.'); + } + if (minimumGrowth < 0) { + throw new IllegalArgumentException("The minimum growth cannot be negative: " + minimumGrowth + '.'); + } + if (rsize != wsize) { + throw bufferIsReadOnly(this); + } + if (writableBytes() >= size) { + // We already have enough space. + return this; + } + + if (allowCompaction && writableBytes() + readerOffset() >= size) { + // We can solve this with compaction. + return compact(); + } + + // Allocate a bigger buffer. + long newSize = capacity() + (long) Math.max(size - writableBytes(), minimumGrowth); + Statics.assertValidBufferSize(newSize); + var untethered = control.allocateUntethered(this, (int) newSize); + UnsafeMemory memory = untethered.memory(); + + // Copy contents. + try { + PlatformDependent.copyMemory(base, address, memory.base, memory.address, rsize); + } finally { + Reference.reachabilityFence(this.memory); + Reference.reachabilityFence(memory); + } + + // Release the old memory, and install the new memory: + Drop drop = untethered.drop(); + disconnectDrop(drop); + attachNewMemory(memory, drop); + return this; + } + + private Drop disconnectDrop(Drop newDrop) { + var drop = (Drop) unsafeGetDrop(); + int roff = this.roff; + int woff = this.woff; + drop.drop(this); + unsafeSetDrop(new ArcDrop<>(newDrop)); + this.roff = roff; + this.woff = woff; + return drop; + } + + private void attachNewMemory(UnsafeMemory memory, Drop drop) { + this.memory = memory; + base = memory.base; + baseOffset = 0; + address = memory.address; + rsize = memory.size; + wsize = memory.size; + constBuffer = false; + drop.attach(this); + } + + @Override + public Buffer split(int splitOffset) { + if (splitOffset < 0) { + throw new IllegalArgumentException("The split offset cannot be negative: " + splitOffset + '.'); + } + if (capacity() < splitOffset) { + throw new IllegalArgumentException("The split offset cannot be greater than the buffer capacity, " + + "but the split offset was " + splitOffset + ", and capacity is " + capacity() + '.'); + } + if (!isAccessible()) { + throw attachTrace(bufferIsClosed(this)); + } + if (!isOwned()) { + throw attachTrace(new IllegalStateException("Cannot split a buffer that is not owned.")); + } + var drop = (ArcDrop) unsafeGetDrop(); + unsafeSetDrop(new ArcDrop<>(drop)); + // TODO maybe incrementing the existing ArcDrop is enough; maybe we don't need to wrap it in another ArcDrop. + var splitBuffer = new UnsafeBuffer(memory, baseOffset, splitOffset, control, new ArcDrop<>(drop.increment())); + splitBuffer.woff = Math.min(woff, splitOffset); + splitBuffer.roff = Math.min(roff, splitOffset); + boolean readOnly = readOnly(); + if (readOnly) { + splitBuffer.makeReadOnly(); + } + // Split preserves const-state. + splitBuffer.constBuffer = constBuffer; + rsize -= splitOffset; + baseOffset += splitOffset; + address += splitOffset; + if (!readOnly) { + wsize = rsize; + } + woff = Math.max(woff, splitOffset) - splitOffset; + roff = Math.max(roff, splitOffset) - splitOffset; + return splitBuffer; + } + + @Override + public Buffer compact() { + if (!isOwned()) { + throw attachTrace(new IllegalStateException("Buffer must be owned in order to compact.")); + } + if (readOnly()) { + throw new BufferReadOnlyException("Buffer must be writable in order to compact, but was read-only."); + } + if (roff == 0) { + return this; + } + try { + PlatformDependent.copyMemory(base, address + roff, base, address, woff - roff); + } finally { + Reference.reachabilityFence(memory); + } + woff -= roff; + roff = 0; + return this; + } + + @Override + public int countComponents() { + return 1; + } + + @Override + public int countReadableComponents() { + return readableBytes() > 0? 1 : 0; + } + + @Override + public int countWritableComponents() { + return writableBytes() > 0? 1 : 0; + } + + // + @Override + public boolean hasReadableArray() { + return base instanceof byte[]; + } + + @Override + public byte[] readableArray() { + checkHasReadableArray(); + return (byte[]) base; + } + + @Override + public int readableArrayOffset() { + checkHasReadableArray(); + return Math.toIntExact(address + roff - PlatformDependent.byteArrayBaseOffset()); + } + + private void checkHasReadableArray() { + if (!hasReadableArray()) { + throw new UnsupportedOperationException("No readable array available."); + } + } + + @Override + public int readableArrayLength() { + return woff - roff; + } + + @Override + public long readableNativeAddress() { + return nativeAddress(); + } + + @Override + public ByteBuffer readableBuffer() { + final ByteBuffer buf; + if (hasReadableArray()) { + buf = bbslice(ByteBuffer.wrap(readableArray()), readableArrayOffset(), readableArrayLength()); + } else { + buf = PlatformDependent.directBuffer(address + roff, readableBytes()); + } + return buf.asReadOnlyBuffer(); + } + + @Override + public boolean hasWritableArray() { + return hasReadableArray(); + } + + @Override + public byte[] writableArray() { + checkHasWritableArray(); + return (byte[]) base; + } + + @Override + public int writableArrayOffset() { + checkHasWritableArray(); + return Math.toIntExact(address + woff - PlatformDependent.byteArrayBaseOffset()); + } + + private void checkHasWritableArray() { + if (!hasReadableArray()) { + throw new UnsupportedOperationException("No writable array available."); + } + } + + @Override + public int writableArrayLength() { + return capacity() - woff; + } + + @Override + public long writableNativeAddress() { + return nativeAddress(); + } + + @Override + public ByteBuffer writableBuffer() { + final ByteBuffer buf; + if (hasWritableArray()) { + buf = bbslice(ByteBuffer.wrap(writableArray()), writableArrayOffset(), writableArrayLength()); + } else { + buf = PlatformDependent.directBuffer(address + woff, writableBytes()); + } + return buf; + } + // + + @Override + public int forEachReadable(int initialIndex, ReadableComponentProcessor processor) + throws E { + checkRead(readerOffset(), Math.max(1, readableBytes())); + return processor.process(initialIndex, this)? 1 : -1; + } + + @Override + public int forEachWritable(int initialIndex, WritableComponentProcessor processor) + throws E { + checkWrite(writerOffset(), Math.max(1, writableBytes())); + return processor.process(initialIndex, this)? 1 : -1; + } + + // + @Override + public byte readByte() { + checkRead(roff, Byte.BYTES); + try { + var value = loadByte(address + roff); + roff += Byte.BYTES; + return value; + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public byte getByte(int roff) { + checkGet(roff, Byte.BYTES); + try { + return loadByte(address + roff); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public int readUnsignedByte() { + return readByte() & 0xFF; + } + + @Override + public int getUnsignedByte(int roff) { + return getByte(roff) & 0xFF; + } + + @Override + public Buffer writeByte(byte value) { + checkWrite(woff, Byte.BYTES); + long offset = address + woff; + woff += Byte.BYTES; + try { + storeByte(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setByte(int woff, byte value) { + checkSet(woff, Byte.BYTES); + long offset = address + woff; + try { + storeByte(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer writeUnsignedByte(int value) { + checkWrite(woff, Byte.BYTES); + long offset = address + woff; + woff += Byte.BYTES; + try { + storeByte(offset, (byte) (value & 0xFF)); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setUnsignedByte(int woff, int value) { + checkSet(woff, Byte.BYTES); + long offset = address + woff; + try { + storeByte(offset, (byte) (value & 0xFF)); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public char readChar() { + checkRead(roff, Character.BYTES); + try { + long offset = address + roff; + roff += Character.BYTES; + return loadChar(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public char getChar(int roff) { + checkGet(roff, Character.BYTES); + try { + long offset = address + roff; + return loadChar(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public Buffer writeChar(char value) { + checkWrite(woff, Character.BYTES); + long offset = address + woff; + woff += Character.BYTES; + try { + storeChar(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setChar(int woff, char value) { + checkSet(woff, Character.BYTES); + long offset = address + woff; + try { + storeChar(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public short readShort() { + checkRead(roff, Short.BYTES); + try { + long offset = address + roff; + roff += Short.BYTES; + return loadShort(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public short getShort(int roff) { + checkGet(roff, Short.BYTES); + try { + long offset = address + roff; + return loadShort(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public int readUnsignedShort() { + return readShort() & 0xFFFF; + } + + @Override + public int getUnsignedShort(int roff) { + return getShort(roff) & 0xFFFF; + } + + @Override + public Buffer writeShort(short value) { + checkWrite(woff, Short.BYTES); + long offset = address + woff; + woff += Short.BYTES; + try { + storeShort(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setShort(int woff, short value) { + checkSet(woff, Short.BYTES); + long offset = address + woff; + try { + storeShort(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer writeUnsignedShort(int value) { + checkWrite(woff, Short.BYTES); + long offset = address + woff; + woff += Short.BYTES; + try { + storeShort(offset, (short) (value & 0xFFFF)); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setUnsignedShort(int woff, int value) { + checkSet(woff, Short.BYTES); + long offset = address + woff; + try { + storeShort(offset, (short) (value & 0xFFFF)); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public int readMedium() { + checkRead(roff, 3); + long offset = address + roff; + int value = loadByte(offset) << 16 | (loadByte(offset + 1) & 0xFF) << 8 | loadByte(offset + 2) & 0xFF; + roff += 3; + return value; + } + + @Override + public int getMedium(int roff) { + checkGet(roff, 3); + long offset = address + roff; + return loadByte(offset) << 16 | (loadByte(offset + 1) & 0xFF) << 8 | loadByte(offset + 2) & 0xFF; + } + + @Override + public int readUnsignedMedium() { + checkRead(roff, 3); + long offset = address + roff; + int value = + (loadByte(offset) << 16 | (loadByte(offset + 1) & 0xFF) << 8 | loadByte(offset + 2) & 0xFF) & 0xFFFFFF; + roff += 3; + return value; + } + + @Override + public int getUnsignedMedium(int roff) { + checkGet(roff, 3); + long offset = address + roff; + return (loadByte(offset) << 16 | (loadByte(offset + 1) & 0xFF) << 8 | loadByte(offset + 2) & 0xFF) & 0xFFFFFF; + } + + @Override + public Buffer writeMedium(int value) { + checkWrite(woff, 3); + long offset = address + woff; + storeByte(offset, (byte) (value >> 16)); + storeByte(offset + 1, (byte) (value >> 8 & 0xFF)); + storeByte(offset + 2, (byte) (value & 0xFF)); + woff += 3; + return this; + } + + @Override + public Buffer setMedium(int woff, int value) { + checkSet(woff, 3); + long offset = address + woff; + storeByte(offset, (byte) (value >> 16)); + storeByte(offset + 1, (byte) (value >> 8 & 0xFF)); + storeByte(offset + 2, (byte) (value & 0xFF)); + return this; + } + + @Override + public Buffer writeUnsignedMedium(int value) { + checkWrite(woff, 3); + long offset = address + woff; + storeByte(offset, (byte) (value >> 16)); + storeByte(offset + 1, (byte) (value >> 8 & 0xFF)); + storeByte(offset + 2, (byte) (value & 0xFF)); + woff += 3; + return this; + } + + @Override + public Buffer setUnsignedMedium(int woff, int value) { + checkSet(woff, 3); + long offset = address + woff; + storeByte(offset, (byte) (value >> 16)); + storeByte(offset + 1, (byte) (value >> 8 & 0xFF)); + storeByte(offset + 2, (byte) (value & 0xFF)); + return this; + } + + @Override + public int readInt() { + checkRead(roff, Integer.BYTES); + try { + long offset = address + roff; + roff += Integer.BYTES; + return loadInt(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public int getInt(int roff) { + checkGet(roff, Integer.BYTES); + try { + long offset = address + roff; + return loadInt(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public long readUnsignedInt() { + return readInt() & 0x0000_0000_FFFF_FFFFL; + } + + @Override + public long getUnsignedInt(int roff) { + return getInt(roff) & 0x0000_0000_FFFF_FFFFL; + } + + @Override + public Buffer writeInt(int value) { + checkWrite(woff, Integer.BYTES); + long offset = address + woff; + woff += Integer.BYTES; + try { + storeInt(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setInt(int woff, int value) { + checkSet(woff, Integer.BYTES); + long offset = address + woff; + try { + storeInt(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer writeUnsignedInt(long value) { + checkWrite(woff, Integer.BYTES); + long offset = address + woff; + woff += Integer.BYTES; + try { + storeInt(offset, (int) (value & 0xFFFF_FFFFL)); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setUnsignedInt(int woff, long value) { + checkSet(woff, Integer.BYTES); + long offset = address + woff; + try { + storeInt(offset, (int) (value & 0xFFFF_FFFFL)); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public float readFloat() { + checkRead(roff, Float.BYTES); + try { + long offset = address + roff; + roff += Float.BYTES; + return loadFloat(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public float getFloat(int roff) { + checkGet(roff, Float.BYTES); + try { + long offset = address + roff; + return loadFloat(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public Buffer writeFloat(float value) { + checkWrite(woff, Float.BYTES); + long offset = address + woff; + woff += Float.BYTES; + try { + storeFloat(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setFloat(int woff, float value) { + checkSet(woff, Float.BYTES); + long offset = address + woff; + try { + storeFloat(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public long readLong() { + checkRead(roff, Long.BYTES); + try { + long offset = address + roff; + roff += Long.BYTES; + return loadLong(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public long getLong(int roff) { + checkGet(roff, Long.BYTES); + try { + long offset = address + roff; + return loadLong(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public Buffer writeLong(long value) { + checkWrite(woff, Long.BYTES); + long offset = address + woff; + woff += Long.BYTES; + try { + storeLong(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setLong(int woff, long value) { + checkSet(woff, Long.BYTES); + long offset = address + woff; + try { + storeLong(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public double readDouble() { + checkRead(roff, Double.BYTES); + try { + long offset = address + roff; + roff += Double.BYTES; + return loadDouble(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public double getDouble(int roff) { + checkGet(roff, Double.BYTES); + try { + long offset = address + roff; + return loadDouble(offset); + } finally { + Reference.reachabilityFence(memory); + } + } + + @Override + public Buffer writeDouble(double value) { + checkWrite(woff, Double.BYTES); + long offset = address + woff; + woff += Double.BYTES; + try { + storeDouble(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + + @Override + public Buffer setDouble(int woff, double value) { + checkSet(woff, Double.BYTES); + long offset = address + woff; + try { + storeDouble(offset, value); + } finally { + Reference.reachabilityFence(memory); + } + return this; + } + // + + @Override + protected Owned prepareSend() { + var roff = this.roff; + var woff = this.woff; + var readOnly = readOnly(); + var isConst = constBuffer; + UnsafeMemory memory = this.memory; + AllocatorControl control = this.control; + long baseOffset = this.baseOffset; + int rsize = this.rsize; + makeInaccessible(); + return new Owned() { + @Override + public UnsafeBuffer transferOwnership(Drop drop) { + UnsafeBuffer copy = new UnsafeBuffer(memory, baseOffset, rsize, control, drop); + copy.roff = roff; + copy.woff = woff; + if (readOnly) { + copy.makeReadOnly(); + } + copy.constBuffer = isConst; + return copy; + } + }; + } + + @Override + protected Drop unsafeGetDrop() { + MakeInaccessibleOnDrop drop = (MakeInaccessibleOnDrop) super.unsafeGetDrop(); + return drop.delegate; + } + + @Override + protected void unsafeSetDrop(Drop replacement) { + super.unsafeSetDrop(new MakeInaccessibleOnDrop(replacement)); + } + + private static final class MakeInaccessibleOnDrop implements Drop { + final Drop delegate; + + private MakeInaccessibleOnDrop(Drop delegate) { + this.delegate = delegate; + } + + @Override + public void drop(UnsafeBuffer buf) { + try { + delegate.drop(buf); + } finally { + buf.makeInaccessible(); + } + } + + @Override + public void attach(UnsafeBuffer buf) { + delegate.attach(buf); + } + + @Override + public String toString() { + return "MakeInaccessibleOnDrop(" + delegate + ')'; + } + } + + void makeInaccessible() { + roff = 0; + woff = 0; + rsize = CLOSED_SIZE; + wsize = CLOSED_SIZE; + readOnly = false; + } + + @Override + public boolean isOwned() { + return super.isOwned() && ((ArcDrop) unsafeGetDrop()).isOwned(); + } + + @Override + public int countBorrows() { + return super.countBorrows() + ((ArcDrop) unsafeGetDrop()).countBorrows(); + } + + private void checkRead(int index, int size) { + if (index < 0 || woff < index + size) { + throw readAccessCheckException(index); + } + } + + private void checkGet(int index, int size) { + if (index < 0 || rsize < index + size) { + throw readAccessCheckException(index); + } + } + + private void checkWrite(int index, int size) { + if (index < roff || wsize < index + size) { + throw writeAccessCheckException(index); + } + } + + private void checkSet(int index, int size) { + if (index < 0 || wsize < index + size) { + throw writeAccessCheckException(index); + } + } + + private RuntimeException readAccessCheckException(int index) { + if (rsize == CLOSED_SIZE) { + throw bufferIsClosed(this); + } + return outOfBounds(index); + } + + private RuntimeException writeAccessCheckException(int index) { + if (rsize == CLOSED_SIZE) { + throw bufferIsClosed(this); + } + if (wsize != rsize) { + return bufferIsReadOnly(this); + } + return outOfBounds(index); + } + + private IndexOutOfBoundsException outOfBounds(int index) { + return new IndexOutOfBoundsException( + "Index " + index + " is out of bounds: [read 0 to " + woff + ", write 0 to " + + rsize + "]."); + } + + private byte loadByte(long off) { + return PlatformDependent.getByte(base, off); + } + + private char loadChar(long offset) { + if (ACCESS_UNALIGNED) { + var value = PlatformDependent.getChar(base, offset); + return FLIP_BYTES? Character.reverseBytes(value) : value; + } + return loadCharUnaligned(offset); + } + + private char loadCharUnaligned(long offset) { + final char value; + Object b = base; + if ((offset & 1) == 0) { + value = PlatformDependent.getChar(b, offset); + } else { + value = (char) (PlatformDependent.getByte(b, offset) << 8 | + PlatformDependent.getByte(b, offset + 1)); + } + return FLIP_BYTES? Character.reverseBytes(value) : value; + } + + private short loadShort(long offset) { + if (ACCESS_UNALIGNED) { + var value = PlatformDependent.getShort(base, offset); + return FLIP_BYTES? Short.reverseBytes(value) : value; + } + return loadShortUnaligned(offset); + } + + private short loadShortUnaligned(long offset) { + final short value; + Object b = base; + if ((offset & 1) == 0) { + value = PlatformDependent.getShort(b, offset); + } else { + value = (short) (PlatformDependent.getByte(b, offset) << 8 | + PlatformDependent.getByte(b, offset + 1)); + } + return FLIP_BYTES? Short.reverseBytes(value) : value; + } + + private int loadInt(long offset) { + if (ACCESS_UNALIGNED) { + var value = PlatformDependent.getInt(base, offset); + return FLIP_BYTES? Integer.reverseBytes(value) : value; + } + return loadIntUnaligned(offset); + } + + private int loadIntUnaligned(long offset) { + final int value; + Object b = base; + if ((offset & 3) == 0) { + value = PlatformDependent.getInt(b, offset); + } else if ((offset & 1) == 0) { + value = PlatformDependent.getShort(b, offset) << 16 | + PlatformDependent.getShort(b, offset + 2); + } else { + value = PlatformDependent.getByte(b, offset) << 24 | + PlatformDependent.getByte(b, offset + 1) << 16 | + PlatformDependent.getByte(b, offset + 2) << 8 | + PlatformDependent.getByte(b, offset + 3); + } + return FLIP_BYTES? Integer.reverseBytes(value) : value; + } + + private float loadFloat(long offset) { + if (ACCESS_UNALIGNED) { + if (FLIP_BYTES) { + var value = PlatformDependent.getInt(base, offset); + return Float.intBitsToFloat(Integer.reverseBytes(value)); + } + return PlatformDependent.getFloat(base, offset); + } + return loadFloatUnaligned(offset); + } + + private float loadFloatUnaligned(long offset) { + return Float.intBitsToFloat(loadIntUnaligned(offset)); + } + + private long loadLong(long offset) { + if (ACCESS_UNALIGNED) { + var value = PlatformDependent.getLong(base, offset); + return FLIP_BYTES? Long.reverseBytes(value) : value; + } + return loadLongUnaligned(offset); + } + + private long loadLongUnaligned(long offset) { + final long value; + Object b = base; + if ((offset & 7) == 0) { + value = PlatformDependent.getLong(b, offset); + } else if ((offset & 3) == 0) { + value = (long) PlatformDependent.getInt(b, offset) << 32 | + PlatformDependent.getInt(b, offset + 4); + } else if ((offset & 1) == 0) { + value = (long) PlatformDependent.getShort(b, offset) << 48 | + (long) PlatformDependent.getShort(b, offset + 2) << 32 | + (long) PlatformDependent.getShort(b, offset + 4) << 16 | + PlatformDependent.getShort(b, offset + 6); + } else { + value = (long) PlatformDependent.getByte(b, offset) << 54 | + (long) PlatformDependent.getByte(b, offset + 1) << 48 | + (long) PlatformDependent.getByte(b, offset + 2) << 40 | + (long) PlatformDependent.getByte(b, offset + 3) << 32 | + (long) PlatformDependent.getByte(b, offset + 4) << 24 | + (long) PlatformDependent.getByte(b, offset + 5) << 16 | + (long) PlatformDependent.getByte(b, offset + 6) << 8 | + PlatformDependent.getByte(b, offset + 7); + } + return FLIP_BYTES? Long.reverseBytes(value) : value; + } + + private double loadDouble(long offset) { + if (ACCESS_UNALIGNED) { + if (FLIP_BYTES) { + var value = PlatformDependent.getLong(base, offset); + return Double.longBitsToDouble(Long.reverseBytes(value)); + } + return PlatformDependent.getDouble(base, offset); + } + return loadDoubleUnaligned(offset); + } + + private double loadDoubleUnaligned(long offset) { + return Double.longBitsToDouble(loadLongUnaligned(offset)); + } + + private void storeByte(long offset, byte value) { + PlatformDependent.putByte(base, offset, value); + } + + private void storeChar(long offset, char value) { + if (FLIP_BYTES) { + value = Character.reverseBytes(value); + } + if (ACCESS_UNALIGNED) { + PlatformDependent.putChar(base, offset, value); + } else { + storeCharUnaligned(offset, value); + } + } + + private void storeCharUnaligned(long offset, char value) { + Object b = base; + if ((offset & 1) == 0) { + PlatformDependent.putChar(b, offset, value); + } else { + PlatformDependent.putByte(b, offset, (byte) (value >> 8)); + PlatformDependent.putByte(b, offset + 1, (byte) value); + } + } + + private void storeShort(long offset, short value) { + if (FLIP_BYTES) { + value = Short.reverseBytes(value); + } + if (ACCESS_UNALIGNED) { + PlatformDependent.putShort(base, offset, value); + } else { + storeShortUnaligned(offset, value); + } + } + + private void storeShortUnaligned(long offset, short value) { + Object b = base; + if ((offset & 1) == 0) { + PlatformDependent.putShort(b, offset, value); + } else { + PlatformDependent.putByte(b, offset, (byte) (value >> 8)); + PlatformDependent.putByte(b, offset + 1, (byte) value); + } + } + + private void storeInt(long offset, int value) { + if (FLIP_BYTES) { + value = Integer.reverseBytes(value); + } + if (ACCESS_UNALIGNED) { + PlatformDependent.putInt(base, offset, value); + } else { + storeIntUnaligned(offset, value); + } + } + + private void storeIntUnaligned(long offset, int value) { + Object b = base; + if ((offset & 3) == 0) { + PlatformDependent.putInt(b, offset, value); + } else if ((offset & 1) == 0) { + PlatformDependent.putShort(b, offset, (short) (value >> 16)); + PlatformDependent.putShort(b, offset + 2, (short) value); + } else { + PlatformDependent.putByte(b, offset, (byte) (value >> 24)); + PlatformDependent.putByte(b, offset + 1, (byte) (value >> 16)); + PlatformDependent.putByte(b, offset + 2, (byte) (value >> 8)); + PlatformDependent.putByte(b, offset + 3, (byte) value); + } + } + + private void storeFloat(long offset, float value) { + storeInt(offset, Float.floatToRawIntBits(value)); + } + + private void storeLong(long offset, long value) { + if (FLIP_BYTES) { + value = Long.reverseBytes(value); + } + if (ACCESS_UNALIGNED) { + PlatformDependent.putLong(base, offset, value); + } else { + storeLongUnaligned(offset, value); + } + } + + private void storeLongUnaligned(long offset, long value) { + Object b = base; + if ((offset & 7) == 0) { + PlatformDependent.putLong(b, offset, value); + } else if ((offset & 3) == 0) { + PlatformDependent.putInt(b, offset, (int) (value >> 32)); + PlatformDependent.putInt(b, offset + 4, (int) value); + } else if ((offset & 1) == 0) { + PlatformDependent.putShort(b, offset, (short) (value >> 48)); + PlatformDependent.putShort(b, offset + 16, (short) (value >> 32)); + PlatformDependent.putShort(b, offset + 32, (short) (value >> 16)); + PlatformDependent.putShort(b, offset + 48, (short) value); + } else { + PlatformDependent.putByte(b, offset, (byte) (value >> 56)); + PlatformDependent.putByte(b, offset + 1, (byte) (value >> 48)); + PlatformDependent.putByte(b, offset + 2, (byte) (value >> 40)); + PlatformDependent.putByte(b, offset + 3, (byte) (value >> 32)); + PlatformDependent.putByte(b, offset + 4, (byte) (value >> 24)); + PlatformDependent.putByte(b, offset + 5, (byte) (value >> 16)); + PlatformDependent.putByte(b, offset + 6, (byte) (value >> 8)); + PlatformDependent.putByte(b, offset + 7, (byte) value); + } + } + + private void storeDouble(long offset, double value) { + storeLong(offset, Double.doubleToRawLongBits(value)); + } + + Object recover() { + return memory; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeCleanerDrop.java b/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeCleanerDrop.java new file mode 100644 index 00000000000..a19d4b4572a --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeCleanerDrop.java @@ -0,0 +1,60 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.unsafe; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.internal.Statics; +import io.netty.util.internal.PlatformDependent; + +import java.lang.ref.Cleaner; + +public class UnsafeCleanerDrop implements Drop { + private final Drop drop; + + public UnsafeCleanerDrop(UnsafeMemory memory, Drop drop, Cleaner cleaner) { + this.drop = drop; + long address = memory.address; + int size = memory.size; + cleaner.register(memory, new FreeAddress(address, size)); + } + + @Override + public void drop(Buffer obj) { + drop.drop(obj); + } + + @Override + public void attach(Buffer obj) { + drop.attach(obj); + } + + private static class FreeAddress implements Runnable { + private final long address; + private final int size; + + FreeAddress(long address, int size) { + this.address = address; + this.size = size; + } + + @Override + public void run() { + PlatformDependent.freeMemory(address); + Statics.MEM_USAGE_NATIVE.add(-size); + } + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeMemory.java b/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeMemory.java new file mode 100644 index 00000000000..27de302d386 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeMemory.java @@ -0,0 +1,32 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.unsafe; + +class UnsafeMemory { + final Object base; + final long address; + final int size; + + UnsafeMemory(Object base, long address, int size) { + this.base = base; + this.address = address; + this.size = size; + } + + public UnsafeMemory slice(int offset, int length) { + return new UnsafeMemory(base, address + offset, length); + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeMemoryManager.java b/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeMemoryManager.java new file mode 100644 index 00000000000..861c65534b7 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/unsafe/UnsafeMemoryManager.java @@ -0,0 +1,101 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.unsafe; + +import io.netty.buffer.api.AllocationType; +import io.netty.buffer.api.AllocatorControl; +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.Drop; +import io.netty.buffer.api.MemoryManager; +import io.netty.buffer.api.StandardAllocationTypes; +import io.netty.buffer.api.internal.Statics; +import io.netty.util.internal.PlatformDependent; + +import java.lang.ref.Cleaner; + +import static io.netty.buffer.api.internal.Statics.convert; + +public class UnsafeMemoryManager implements MemoryManager { + public UnsafeMemoryManager() { + if (!PlatformDependent.hasUnsafe()) { + throw new UnsupportedOperationException("Unsafe is not available."); + } + if (!PlatformDependent.hasDirectBufferNoCleanerConstructor()) { + throw new UnsupportedOperationException("DirectByteBuffer internal constructor is not available."); + } + } + + @Override + public Buffer allocateShared(AllocatorControl allocatorControl, long size, Drop drop, Cleaner cleaner, + AllocationType allocationType) { + final Object base; + final long address; + final UnsafeMemory memory; + final int size32 = Math.toIntExact(size); + if (cleaner == null) { + cleaner = Statics.CLEANER; + } + if (allocationType == StandardAllocationTypes.OFF_HEAP) { + base = null; + address = PlatformDependent.allocateMemory(size); + Statics.MEM_USAGE_NATIVE.add(size); + PlatformDependent.setMemory(address, size, (byte) 0); + memory = new UnsafeMemory(base, address, size32); + drop = new UnsafeCleanerDrop(memory, drop, cleaner); + } else if (allocationType == StandardAllocationTypes.ON_HEAP) { + base = new byte[size32]; + address = PlatformDependent.byteArrayBaseOffset(); + memory = new UnsafeMemory(base, address, size32); + } else { + throw new IllegalArgumentException("Unknown allocation type: " + allocationType); + } + return new UnsafeBuffer(memory, 0, size32, allocatorControl, convert(drop)); + } + + @Override + public Buffer allocateConstChild(Buffer readOnlyConstParent) { + assert readOnlyConstParent.readOnly(); + UnsafeBuffer buf = (UnsafeBuffer) readOnlyConstParent; + return new UnsafeBuffer(buf); + } + + @Override + public Drop drop() { + // We cannot reliably drop unsafe memory. We have to rely on the cleaner to do that. + return Statics.NO_OP_DROP; + } + + @Override + public Object unwrapRecoverableMemory(Buffer buf) { + return ((UnsafeBuffer) buf).recover(); + } + + @Override + public Buffer recoverMemory(AllocatorControl allocatorControl, Object recoverableMemory, Drop drop) { + UnsafeMemory memory = (UnsafeMemory) recoverableMemory; + return new UnsafeBuffer(memory, 0, memory.size, allocatorControl, convert(drop)); + } + + @Override + public Object sliceMemory(Object memory, int offset, int length) { + return ((UnsafeMemory) memory).slice(offset, length); + } + + @Override + public String implementationName() { + return "Unsafe"; + } +} diff --git a/buffer/src/main/java/io/netty/buffer/api/unsafe/package-info.java b/buffer/src/main/java/io/netty/buffer/api/unsafe/package-info.java new file mode 100644 index 00000000000..3fd081ad154 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/api/unsafe/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * A {@link io.netty.buffer.api.Buffer} implementation that is based on {@code sun.misc.Unsafe}. + */ +package io.netty.buffer.api.unsafe; diff --git a/buffer/src/main/java/io/netty/buffer/package-info.java b/buffer/src/main/java/io/netty/buffer/package-info.java index cc0d28851a7..4ed7939e509 100644 --- a/buffer/src/main/java/io/netty/buffer/package-info.java +++ b/buffer/src/main/java/io/netty/buffer/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/main/java/io/netty/buffer/search/AbstractMultiSearchProcessorFactory.java b/buffer/src/main/java/io/netty/buffer/search/AbstractMultiSearchProcessorFactory.java new file mode 100644 index 00000000000..e05a497d5c5 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/AbstractMultiSearchProcessorFactory.java @@ -0,0 +1,94 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.search; + +/** + * Base class for precomputed factories that create {@link MultiSearchProcessor}s. + *
    + * The purpose of {@link MultiSearchProcessor} is to perform efficient simultaneous search for multiple {@code needles} + * in the {@code haystack}, while scanning every byte of the input sequentially, only once. While it can also be used + * to search for just a single {@code needle}, using a {@link SearchProcessorFactory} would be more efficient for + * doing that. + *
    + * See the documentation of {@link AbstractSearchProcessorFactory} for a comprehensive description of common usage. + * In addition to the functionality provided by {@link SearchProcessor}, {@link MultiSearchProcessor} adds + * a method to get the index of the {@code needle} found at the current position of the {@link MultiSearchProcessor} - + * {@link MultiSearchProcessor#getFoundNeedleId()}. + *
    + * Note: in some cases one {@code needle} can be a suffix of another {@code needle}, eg. {@code {"BC", "ABC"}}, + * and there can potentially be multiple {@code needles} found ending at the same position of the {@code haystack}. + * In such case {@link MultiSearchProcessor#getFoundNeedleId()} returns the index of the longest matching {@code needle} + * in the array of {@code needles}. + *
    + * Usage example (given that the {@code haystack} is a {@link io.netty.buffer.ByteBuf} containing "ABCD" and the + * {@code needles} are "AB", "BC" and "CD"): + *

    + *      MultiSearchProcessorFactory factory = MultiSearchProcessorFactory.newAhoCorasicSearchProcessorFactory(
    + *          "AB".getBytes(CharsetUtil.UTF_8), "BC".getBytes(CharsetUtil.UTF_8), "CD".getBytes(CharsetUtil.UTF_8));
    + *      MultiSearchProcessor processor = factory.newSearchProcessor();
    + *
    + *      int idx1 = haystack.forEachByte(processor);
    + *      // idx1 is 1 (index of the last character of the occurrence of "AB" in the haystack)
    + *      // processor.getFoundNeedleId() is 0 (index of "AB" in needles[])
    + *
    + *      int continueFrom1 = idx1 + 1;
    + *      // continue the search starting from the next character
    + *
    + *      int idx2 = haystack.forEachByte(continueFrom1, haystack.readableBytes() - continueFrom1, processor);
    + *      // idx2 is 2 (index of the last character of the occurrence of "BC" in the haystack)
    + *      // processor.getFoundNeedleId() is 1 (index of "BC" in needles[])
    + *
    + *      int continueFrom2 = idx2 + 1;
    + *
    + *      int idx3 = haystack.forEachByte(continueFrom2, haystack.readableBytes() - continueFrom2, processor);
    + *      // idx3 is 3 (index of the last character of the occurrence of "CD" in the haystack)
    + *      // processor.getFoundNeedleId() is 2 (index of "CD" in needles[])
    + *
    + *      int continueFrom3 = idx3 + 1;
    + *
    + *      int idx4 = haystack.forEachByte(continueFrom3, haystack.readableBytes() - continueFrom3, processor);
    + *      // idx4 is -1 (no more occurrences of any of the needles)
    + *
    + *      // This search session is complete, processor should be discarded.
    + *      // To search for the same needles again, reuse the same {@link AbstractMultiSearchProcessorFactory}
    + *      // to get a new MultiSearchProcessor.
    + * 
    + */ +public abstract class AbstractMultiSearchProcessorFactory implements MultiSearchProcessorFactory { + + /** + * Creates a {@link MultiSearchProcessorFactory} based on + * Aho–Corasick + * string search algorithm. + *
    + * Precomputation (this method) time is linear in the size of input ({@code O(ÎŖ|needles|)}). + *
    + * The factory allocates and retains an array of 256 * X ints plus another array of X ints, where X + * is the sum of lengths of each entry of {@code needles} minus the sum of lengths of repeated + * prefixes of the {@code needles}. + *
    + * Search (the actual application of {@link MultiSearchProcessor}) time is linear in the size of + * {@link io.netty.buffer.ByteBuf} on which the search is peformed ({@code O(|haystack|)}). + * Every byte of {@link io.netty.buffer.ByteBuf} is processed only once, sequentually, regardles of + * the number of {@code needles} being searched for. + * + * @param needles a varargs array of arrays of bytes to search for + * @return a new instance of {@link AhoCorasicSearchProcessorFactory} precomputed for the given {@code needles} + */ + public static AhoCorasicSearchProcessorFactory newAhoCorasicSearchProcessorFactory(byte[] ...needles) { + return new AhoCorasicSearchProcessorFactory(needles); + } + +} diff --git a/buffer/src/main/java/io/netty/buffer/search/AbstractSearchProcessorFactory.java b/buffer/src/main/java/io/netty/buffer/search/AbstractSearchProcessorFactory.java new file mode 100644 index 00000000000..f76b2d3e53e --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/AbstractSearchProcessorFactory.java @@ -0,0 +1,115 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.search; + +/** + * Base class for precomputed factories that create {@link SearchProcessor}s. + *
    + * Different factories implement different search algorithms with performance characteristics that + * depend on a use case, so it is advisable to benchmark a concrete use case with different algorithms + * before choosing one of them. + *
    + * A concrete instance of {@link AbstractSearchProcessorFactory} is built for searching for a concrete sequence of bytes + * (the {@code needle}), it contains precomputed data needed to perform the search, and is meant to be reused + * whenever searching for the same {@code needle}. + *
    + * Note: implementations of {@link SearchProcessor} scan the {@link io.netty.buffer.ByteBuf} sequentially, + * one byte after another, without doing any random access. As a result, when using {@link SearchProcessor} + * with such methods as {@link io.netty.buffer.ByteBuf#forEachByte}, these methods return the index of the last byte + * of the found byte sequence within the {@link io.netty.buffer.ByteBuf} (which might feel counterintuitive, + * and different from {@link io.netty.buffer.ByteBufUtil#indexOf} which returns the index of the first byte + * of found sequence). + *
    + * A {@link SearchProcessor} is implemented as a + * Finite State Automaton that contains a + * small internal state which is updated with every byte processed. As a result, an instance of {@link SearchProcessor} + * should not be reused across independent search sessions (eg. for searching in different + * {@link io.netty.buffer.ByteBuf}s). A new instance should be created with {@link AbstractSearchProcessorFactory} for + * every search session. However, a {@link SearchProcessor} can (and should) be reused within the search session, + * eg. when searching for all occurrences of the {@code needle} within the same {@code haystack}. That way, it can + * also detect overlapping occurrences of the {@code needle} (eg. a string "ABABAB" contains two occurrences of "BAB" + * that overlap by one character "B"). For this to work correctly, after an occurrence of the {@code needle} is + * found ending at index {@code idx}, the search should continue starting from the index {@code idx + 1}. + *
    + * Example (given that the {@code haystack} is a {@link io.netty.buffer.ByteBuf} containing "ABABAB" and + * the {@code needle} is "BAB"): + *
    + *     SearchProcessorFactory factory =
    + *         SearchProcessorFactory.newKmpSearchProcessorFactory(needle.getBytes(CharsetUtil.UTF_8));
    + *     SearchProcessor processor = factory.newSearchProcessor();
    + *
    + *     int idx1 = haystack.forEachByte(processor);
    + *     // idx1 is 3 (index of the last character of the first occurrence of the needle in the haystack)
    + *
    + *     int continueFrom1 = idx1 + 1;
    + *     // continue the search starting from the next character
    + *
    + *     int idx2 = haystack.forEachByte(continueFrom1, haystack.readableBytes() - continueFrom1, processor);
    + *     // idx2 is 5 (index of the last character of the second occurrence of the needle in the haystack)
    + *
    + *     int continueFrom2 = idx2 + 1;
    + *     // continue the search starting from the next character
    + *
    + *     int idx3 = haystack.forEachByte(continueFrom2, haystack.readableBytes() - continueFrom2, processor);
    + *     // idx3 is -1 (no more occurrences of the needle)
    + *
    + *     // After this search session is complete, processor should be discarded.
    + *     // To search for the same needle again, reuse the same factory to get a new SearchProcessor.
    + * 
    + */ +public abstract class AbstractSearchProcessorFactory implements SearchProcessorFactory { + + /** + * Creates a {@link SearchProcessorFactory} based on + * Knuth-Morris-Pratt + * string search algorithm. It is a reasonable default choice among the provided algorithms. + *
    + * Precomputation (this method) time is linear in the size of input ({@code O(|needle|)}). + *
    + * The factory allocates and retains an int array of size {@code needle.length + 1}, and retains a reference + * to the {@code needle} itself. + *
    + * Search (the actual application of {@link SearchProcessor}) time is linear in the size of + * {@link io.netty.buffer.ByteBuf} on which the search is peformed ({@code O(|haystack|)}). + * Every byte of {@link io.netty.buffer.ByteBuf} is processed only once, sequentually. + * + * @param needle an array of bytes to search for + * @return a new instance of {@link KmpSearchProcessorFactory} precomputed for the given {@code needle} + */ + public static KmpSearchProcessorFactory newKmpSearchProcessorFactory(byte[] needle) { + return new KmpSearchProcessorFactory(needle); + } + + /** + * Creates a {@link SearchProcessorFactory} based on Bitap string search algorithm. + * It is a jump free algorithm that has very stable performance (the contents of the inputs have a minimal + * effect on it). The limitation is that the {@code needle} can be no more than 64 bytes long. + *
    + * Precomputation (this method) time is linear in the size of the input ({@code O(|needle|)}). + *
    + * The factory allocates and retains a long[256] array. + *
    + * Search (the actual application of {@link SearchProcessor}) time is linear in the size of + * {@link io.netty.buffer.ByteBuf} on which the search is peformed ({@code O(|haystack|)}). + * Every byte of {@link io.netty.buffer.ByteBuf} is processed only once, sequentually. + * + * @param needle an array of no more than 64 bytes to search for + * @return a new instance of {@link BitapSearchProcessorFactory} precomputed for the given {@code needle} + */ + public static BitapSearchProcessorFactory newBitapSearchProcessorFactory(byte[] needle) { + return new BitapSearchProcessorFactory(needle); + } + +} diff --git a/buffer/src/main/java/io/netty/buffer/search/AhoCorasicSearchProcessorFactory.java b/buffer/src/main/java/io/netty/buffer/search/AhoCorasicSearchProcessorFactory.java new file mode 100644 index 00000000000..5ee27c140c3 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/AhoCorasicSearchProcessorFactory.java @@ -0,0 +1,191 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.search; + +import io.netty.util.internal.PlatformDependent; + +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Queue; + +/** + * Implements Aho–Corasick + * string search algorithm. + * Use static {@link AbstractMultiSearchProcessorFactory#newAhoCorasicSearchProcessorFactory} + * to create an instance of this factory. + * Use {@link AhoCorasicSearchProcessorFactory#newSearchProcessor} to get an instance of + * {@link io.netty.util.ByteProcessor} implementation for performing the actual search. + * @see AbstractMultiSearchProcessorFactory + */ +public class AhoCorasicSearchProcessorFactory extends AbstractMultiSearchProcessorFactory { + + private final int[] jumpTable; + private final int[] matchForNeedleId; + + static final int BITS_PER_SYMBOL = 8; + static final int ALPHABET_SIZE = 1 << BITS_PER_SYMBOL; + + private static class Context { + int[] jumpTable; + int[] matchForNeedleId; + } + + public static class Processor implements MultiSearchProcessor { + + private final int[] jumpTable; + private final int[] matchForNeedleId; + private long currentPosition; + + Processor(int[] jumpTable, int[] matchForNeedleId) { + this.jumpTable = jumpTable; + this.matchForNeedleId = matchForNeedleId; + } + + @Override + public boolean process(byte value) { + currentPosition = PlatformDependent.getInt(jumpTable, currentPosition | (value & 0xffL)); + if (currentPosition < 0) { + currentPosition = -currentPosition; + return false; + } + return true; + } + + @Override + public int getFoundNeedleId() { + return matchForNeedleId[(int) currentPosition >> AhoCorasicSearchProcessorFactory.BITS_PER_SYMBOL]; + } + + @Override + public void reset() { + currentPosition = 0; + } + } + + AhoCorasicSearchProcessorFactory(byte[] ...needles) { + + for (byte[] needle: needles) { + if (needle.length == 0) { + throw new IllegalArgumentException("Needle must be non empty"); + } + } + + Context context = buildTrie(needles); + jumpTable = context.jumpTable; + matchForNeedleId = context.matchForNeedleId; + + linkSuffixes(); + + for (int i = 0; i < jumpTable.length; i++) { + if (matchForNeedleId[jumpTable[i] >> BITS_PER_SYMBOL] >= 0) { + jumpTable[i] = -jumpTable[i]; + } + } + } + + private static Context buildTrie(byte[][] needles) { + + ArrayList jumpTableBuilder = new ArrayList(ALPHABET_SIZE); + for (int i = 0; i < ALPHABET_SIZE; i++) { + jumpTableBuilder.add(-1); + } + + ArrayList matchForBuilder = new ArrayList(); + matchForBuilder.add(-1); + + for (int needleId = 0; needleId < needles.length; needleId++) { + byte[] needle = needles[needleId]; + int currentPosition = 0; + + for (byte ch0: needle) { + + final int ch = ch0 & 0xff; + final int next = currentPosition + ch; + + if (jumpTableBuilder.get(next) == -1) { + jumpTableBuilder.set(next, jumpTableBuilder.size()); + for (int i = 0; i < ALPHABET_SIZE; i++) { + jumpTableBuilder.add(-1); + } + matchForBuilder.add(-1); + } + + currentPosition = jumpTableBuilder.get(next); + } + + matchForBuilder.set(currentPosition >> BITS_PER_SYMBOL, needleId); + } + + Context context = new Context(); + + context.jumpTable = new int[jumpTableBuilder.size()]; + for (int i = 0; i < jumpTableBuilder.size(); i++) { + context.jumpTable[i] = jumpTableBuilder.get(i); + } + + context.matchForNeedleId = new int[matchForBuilder.size()]; + for (int i = 0; i < matchForBuilder.size(); i++) { + context.matchForNeedleId[i] = matchForBuilder.get(i); + } + + return context; + } + + private void linkSuffixes() { + + Queue queue = new ArrayDeque(); + queue.add(0); + + int[] suffixLinks = new int[matchForNeedleId.length]; + Arrays.fill(suffixLinks, -1); + + while (!queue.isEmpty()) { + + final int v = queue.remove(); + int vPosition = v >> BITS_PER_SYMBOL; + final int u = suffixLinks[vPosition] == -1 ? 0 : suffixLinks[vPosition]; + + if (matchForNeedleId[vPosition] == -1) { + matchForNeedleId[vPosition] = matchForNeedleId[u >> BITS_PER_SYMBOL]; + } + + for (int ch = 0; ch < ALPHABET_SIZE; ch++) { + + final int vIndex = v | ch; + final int uIndex = u | ch; + + final int jumpV = jumpTable[vIndex]; + final int jumpU = jumpTable[uIndex]; + + if (jumpV != -1) { + suffixLinks[jumpV >> BITS_PER_SYMBOL] = v > 0 && jumpU != -1 ? jumpU : 0; + queue.add(jumpV); + } else { + jumpTable[vIndex] = jumpU != -1 ? jumpU : 0; + } + } + } + } + + /** + * Returns a new {@link Processor}. + */ + @Override + public Processor newSearchProcessor() { + return new Processor(jumpTable, matchForNeedleId); + } + +} diff --git a/buffer/src/main/java/io/netty/buffer/search/BitapSearchProcessorFactory.java b/buffer/src/main/java/io/netty/buffer/search/BitapSearchProcessorFactory.java new file mode 100644 index 00000000000..bb4a7c531e8 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/BitapSearchProcessorFactory.java @@ -0,0 +1,77 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.search; + +import io.netty.util.internal.PlatformDependent; + +/** + * Implements Bitap string search algorithm. + * Use static {@link AbstractSearchProcessorFactory#newBitapSearchProcessorFactory} + * to create an instance of this factory. + * Use {@link BitapSearchProcessorFactory#newSearchProcessor} to get an instance of {@link io.netty.util.ByteProcessor} + * implementation for performing the actual search. + * @see AbstractSearchProcessorFactory + */ +public class BitapSearchProcessorFactory extends AbstractSearchProcessorFactory { + + private final long[] bitMasks = new long[256]; + private final long successBit; + + public static class Processor implements SearchProcessor { + + private final long[] bitMasks; + private final long successBit; + private long currentMask; + + Processor(long[] bitMasks, long successBit) { + this.bitMasks = bitMasks; + this.successBit = successBit; + } + + @Override + public boolean process(byte value) { + currentMask = ((currentMask << 1) | 1) & PlatformDependent.getLong(bitMasks, value & 0xffL); + return (currentMask & successBit) == 0; + } + + @Override + public void reset() { + currentMask = 0; + } + } + + BitapSearchProcessorFactory(byte[] needle) { + if (needle.length > 64) { + throw new IllegalArgumentException("Maximum supported search pattern length is 64, got " + needle.length); + } + + long bit = 1L; + for (byte c: needle) { + bitMasks[c & 0xff] |= bit; + bit <<= 1; + } + + successBit = 1L << (needle.length - 1); + } + + /** + * Returns a new {@link Processor}. + */ + @Override + public Processor newSearchProcessor() { + return new Processor(bitMasks, successBit); + } + +} diff --git a/buffer/src/main/java/io/netty/buffer/search/KmpSearchProcessorFactory.java b/buffer/src/main/java/io/netty/buffer/search/KmpSearchProcessorFactory.java new file mode 100644 index 00000000000..5b16b7f8429 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/KmpSearchProcessorFactory.java @@ -0,0 +1,91 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.search; + +import io.netty.util.internal.PlatformDependent; + +/** + * Implements + * Knuth-Morris-Pratt + * string search algorithm. + * Use static {@link AbstractSearchProcessorFactory#newKmpSearchProcessorFactory} + * to create an instance of this factory. + * Use {@link KmpSearchProcessorFactory#newSearchProcessor} to get an instance of {@link io.netty.util.ByteProcessor} + * implementation for performing the actual search. + * @see AbstractSearchProcessorFactory + */ +public class KmpSearchProcessorFactory extends AbstractSearchProcessorFactory { + + private final int[] jumpTable; + private final byte[] needle; + + public static class Processor implements SearchProcessor { + + private final byte[] needle; + private final int[] jumpTable; + private long currentPosition; + + Processor(byte[] needle, int[] jumpTable) { + this.needle = needle; + this.jumpTable = jumpTable; + } + + @Override + public boolean process(byte value) { + while (currentPosition > 0 && PlatformDependent.getByte(needle, currentPosition) != value) { + currentPosition = PlatformDependent.getInt(jumpTable, currentPosition); + } + if (PlatformDependent.getByte(needle, currentPosition) == value) { + currentPosition++; + } + if (currentPosition == needle.length) { + currentPosition = PlatformDependent.getInt(jumpTable, currentPosition); + return false; + } + + return true; + } + + @Override + public void reset() { + currentPosition = 0; + } + } + + KmpSearchProcessorFactory(byte[] needle) { + this.needle = needle.clone(); + this.jumpTable = new int[needle.length + 1]; + + int j = 0; + for (int i = 1; i < needle.length; i++) { + while (j > 0 && needle[j] != needle[i]) { + j = jumpTable[j]; + } + if (needle[j] == needle[i]) { + j++; + } + jumpTable[i + 1] = j; + } + } + + /** + * Returns a new {@link Processor}. + */ + @Override + public Processor newSearchProcessor() { + return new Processor(needle, jumpTable); + } + +} diff --git a/buffer/src/main/java/io/netty/buffer/search/MultiSearchProcessor.java b/buffer/src/main/java/io/netty/buffer/search/MultiSearchProcessor.java new file mode 100644 index 00000000000..f7e99876f23 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/MultiSearchProcessor.java @@ -0,0 +1,28 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.search; + +/** + * Interface for {@link SearchProcessor} that implements simultaneous search for multiple strings. + * @see MultiSearchProcessorFactory + */ +public interface MultiSearchProcessor extends SearchProcessor { + + /** + * @return the index of found search string (if any, or -1 if none) at current position of this MultiSearchProcessor + */ + int getFoundNeedleId(); + +} diff --git a/buffer/src/main/java/io/netty/buffer/search/MultiSearchProcessorFactory.java b/buffer/src/main/java/io/netty/buffer/search/MultiSearchProcessorFactory.java new file mode 100644 index 00000000000..176ea8a12e9 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/MultiSearchProcessorFactory.java @@ -0,0 +1,25 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.search; + +public interface MultiSearchProcessorFactory extends SearchProcessorFactory { + + /** + * Returns a new {@link MultiSearchProcessor}. + */ + @Override + MultiSearchProcessor newSearchProcessor(); + +} diff --git a/buffer/src/main/java/io/netty/buffer/search/SearchProcessor.java b/buffer/src/main/java/io/netty/buffer/search/SearchProcessor.java new file mode 100644 index 00000000000..baefd251508 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/SearchProcessor.java @@ -0,0 +1,30 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.search; + +import io.netty.util.ByteProcessor; + +/** + * Interface for {@link ByteProcessor} that implements string search. + * @see SearchProcessorFactory + */ +public interface SearchProcessor extends ByteProcessor { + + /** + * Resets the state of SearchProcessor. + */ + void reset(); + +} diff --git a/buffer/src/main/java/io/netty/buffer/search/SearchProcessorFactory.java b/buffer/src/main/java/io/netty/buffer/search/SearchProcessorFactory.java new file mode 100644 index 00000000000..17679d74b7d --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/SearchProcessorFactory.java @@ -0,0 +1,24 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the + * "License"); you may not use this file except in compliance with the License. You may obtain a + * copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License + * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express + * or implied. See the License for the specific language governing permissions and limitations under + * the License. + */ +package io.netty.buffer.search; + +public interface SearchProcessorFactory { + + /** + * Returns a new {@link SearchProcessor}. + */ + SearchProcessor newSearchProcessor(); + +} diff --git a/buffer/src/main/java/io/netty/buffer/search/package-info.java b/buffer/src/main/java/io/netty/buffer/search/package-info.java new file mode 100644 index 00000000000..630e341b2d9 --- /dev/null +++ b/buffer/src/main/java/io/netty/buffer/search/package-info.java @@ -0,0 +1,20 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +/** + * Utility classes for performing efficient substring search within {@link io.netty.buffer.ByteBuf}. + */ +package io.netty.buffer.search; diff --git a/buffer/src/main/resources/META-INF/native-image/io.netty/buffer/native-image.properties b/buffer/src/main/resources/META-INF/native-image/io.netty/buffer/native-image.properties new file mode 100644 index 00000000000..4a422fde1ac --- /dev/null +++ b/buffer/src/main/resources/META-INF/native-image/io.netty/buffer/native-image.properties @@ -0,0 +1,15 @@ +# Copyright 2019 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +Args = --initialize-at-run-time=io.netty.buffer.PooledByteBufAllocator,io.netty.buffer.ByteBufAllocator,io.netty.buffer.ByteBufUtil,io.netty.buffer.AbstractReferenceCountedByteBuf diff --git a/buffer/src/main/resources/META-INF/services/io.netty.buffer.api.MemoryManager b/buffer/src/main/resources/META-INF/services/io.netty.buffer.api.MemoryManager new file mode 100644 index 00000000000..b885a62c29d --- /dev/null +++ b/buffer/src/main/resources/META-INF/services/io.netty.buffer.api.MemoryManager @@ -0,0 +1,2 @@ +io.netty.buffer.api.bytebuffer.ByteBufferMemoryManager +io.netty.buffer.api.unsafe.UnsafeMemoryManager diff --git a/buffer/src/test/java/io/netty/buffer/AbstractByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/AbstractByteBufAllocatorTest.java index 8e0f21b0201..e2f3ab72ef3 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractByteBufAllocatorTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractByteBufAllocatorTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,11 +16,11 @@ package io.netty.buffer; import io.netty.util.internal.PlatformDependent; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public abstract class AbstractByteBufAllocatorTest extends ByteBufAllocatorTest { @@ -108,7 +108,7 @@ public void testUsedDirectMemory() { // Double the size of the buffer buffer.capacity(capacity << 1); capacity = buffer.capacity(); - assertEquals(buffer.toString(), expectedUsedMemory(allocator, capacity), metric.usedDirectMemory()); + assertEquals(expectedUsedMemory(allocator, capacity), metric.usedDirectMemory(), buffer.toString()); buffer.release(); assertEquals(expectedUsedMemoryAfterRelease(allocator, capacity), metric.usedDirectMemory()); diff --git a/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java index e307f25baf9..dcac9b3c4eb 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,9 +19,10 @@ import io.netty.util.CharsetUtil; import io.netty.util.IllegalReferenceCountException; import io.netty.util.internal.PlatformDependent; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.io.ByteArrayInputStream; import java.io.ByteArrayOutputStream; @@ -30,6 +31,7 @@ import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.ByteOrder; +import java.nio.CharBuffer; import java.nio.ReadOnlyBufferException; import java.nio.channels.Channels; import java.nio.channels.FileChannel; @@ -45,6 +47,7 @@ import java.util.Set; import java.util.concurrent.CountDownLatch; import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ThreadLocalRandom; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; @@ -57,16 +60,17 @@ import static io.netty.buffer.Unpooled.wrappedBuffer; import static io.netty.util.internal.EmptyArrays.EMPTY_BYTES; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.junit.Assume.assumeFalse; -import static org.junit.Assume.assumeTrue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; /** * An abstract test class for channel buffers @@ -91,14 +95,14 @@ protected boolean discardReadBytesDoesNotMoveWritableBytes() { return true; } - @Before + @BeforeEach public void init() { buffer = newBuffer(CAPACITY); seed = System.currentTimeMillis(); random = new Random(seed); } - @After + @AfterEach public void dispose() { if (buffer != null) { assertThat(buffer.release(), is(true)); @@ -137,34 +141,34 @@ public void initialState() { assertEquals(0, buffer.readerIndex()); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void readerIndexBoundaryCheck1() { try { buffer.writerIndex(0); } catch (IndexOutOfBoundsException e) { fail(); } - buffer.readerIndex(-1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.readerIndex(-1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void readerIndexBoundaryCheck2() { try { buffer.writerIndex(buffer.capacity()); } catch (IndexOutOfBoundsException e) { fail(); } - buffer.readerIndex(buffer.capacity() + 1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.readerIndex(buffer.capacity() + 1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void readerIndexBoundaryCheck3() { try { buffer.writerIndex(CAPACITY / 2); } catch (IndexOutOfBoundsException e) { fail(); } - buffer.readerIndex(CAPACITY * 3 / 2); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.readerIndex(CAPACITY * 3 / 2)); } @Test @@ -175,12 +179,12 @@ public void readerIndexBoundaryCheck4() { buffer.readerIndex(buffer.capacity()); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void writerIndexBoundaryCheck1() { - buffer.writerIndex(-1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.writerIndex(-1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void writerIndexBoundaryCheck2() { try { buffer.writerIndex(CAPACITY); @@ -188,10 +192,10 @@ public void writerIndexBoundaryCheck2() { } catch (IndexOutOfBoundsException e) { fail(); } - buffer.writerIndex(buffer.capacity() + 1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.writerIndex(buffer.capacity() + 1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void writerIndexBoundaryCheck3() { try { buffer.writerIndex(CAPACITY); @@ -199,7 +203,7 @@ public void writerIndexBoundaryCheck3() { } catch (IndexOutOfBoundsException e) { fail(); } - buffer.writerIndex(CAPACITY / 4); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.writerIndex(CAPACITY / 4)); } @Test @@ -211,74 +215,74 @@ public void writerIndexBoundaryCheck4() { buffer.writeBytes(ByteBuffer.wrap(EMPTY_BYTES)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getBooleanBoundaryCheck1() { - buffer.getBoolean(-1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBoolean(-1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getBooleanBoundaryCheck2() { - buffer.getBoolean(buffer.capacity()); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBoolean(buffer.capacity())); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getByteBoundaryCheck1() { - buffer.getByte(-1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getByte(-1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getByteBoundaryCheck2() { - buffer.getByte(buffer.capacity()); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getByte(buffer.capacity())); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getShortBoundaryCheck1() { - buffer.getShort(-1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getShort(-1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getShortBoundaryCheck2() { - buffer.getShort(buffer.capacity() - 1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getShort(buffer.capacity() - 1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getMediumBoundaryCheck1() { - buffer.getMedium(-1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getMedium(-1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getMediumBoundaryCheck2() { - buffer.getMedium(buffer.capacity() - 2); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getMedium(buffer.capacity() - 2)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getIntBoundaryCheck1() { - buffer.getInt(-1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getInt(-1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getIntBoundaryCheck2() { - buffer.getInt(buffer.capacity() - 3); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getInt(buffer.capacity() - 3)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getLongBoundaryCheck1() { - buffer.getLong(-1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getLong(-1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getLongBoundaryCheck2() { - buffer.getLong(buffer.capacity() - 7); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getLong(buffer.capacity() - 7)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getByteArrayBoundaryCheck1() { - buffer.getBytes(-1, EMPTY_BYTES); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(-1, EMPTY_BYTES)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getByteArrayBoundaryCheck2() { - buffer.getBytes(-1, EMPTY_BYTES, 0, 0); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(-1, EMPTY_BYTES, 0, 0)); } @Test @@ -317,44 +321,44 @@ public void getByteArrayBoundaryCheck4() { assertEquals(0, dst[3]); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getByteBufferBoundaryCheck() { - buffer.getBytes(-1, ByteBuffer.allocate(0)); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(-1, ByteBuffer.allocate(0))); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void copyBoundaryCheck1() { - buffer.copy(-1, 0); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.copy(-1, 0)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void copyBoundaryCheck2() { - buffer.copy(0, buffer.capacity() + 1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.copy(0, buffer.capacity() + 1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void copyBoundaryCheck3() { - buffer.copy(buffer.capacity() + 1, 0); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.copy(buffer.capacity() + 1, 0)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void copyBoundaryCheck4() { - buffer.copy(buffer.capacity(), 1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.copy(buffer.capacity(), 1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void setIndexBoundaryCheck1() { - buffer.setIndex(-1, CAPACITY); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.setIndex(-1, CAPACITY)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void setIndexBoundaryCheck2() { - buffer.setIndex(CAPACITY / 2, CAPACITY / 4); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.setIndex(CAPACITY / 2, CAPACITY / 4)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void setIndexBoundaryCheck3() { - buffer.setIndex(0, CAPACITY + 1); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.setIndex(0, CAPACITY + 1)); } @Test @@ -379,9 +383,9 @@ public void getByteBufferState() { assertEquals(0, dst.get(3)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void getDirectByteBufferBoundaryCheck() { - buffer.getBytes(-1, ByteBuffer.allocateDirect(0)); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(-1, ByteBuffer.allocateDirect(0))); } @Test @@ -1774,9 +1778,9 @@ public void testDiscardReadBytes() { // Make sure there's no effect if called when readerIndex is 0. buffer.readerIndex(CAPACITY / 4); - buffer.markReaderIndex(); + int readerIndex = buffer.readerIndex(); buffer.writerIndex(CAPACITY / 3); - buffer.markWriterIndex(); + int writerIndex = buffer.writerIndex(); buffer.readerIndex(0); buffer.writerIndex(CAPACITY / 2); buffer.discardReadBytes(); @@ -1784,9 +1788,9 @@ public void testDiscardReadBytes() { assertEquals(0, buffer.readerIndex()); assertEquals(CAPACITY / 2, buffer.writerIndex()); assertEquals(copy.slice(0, CAPACITY / 2), buffer.slice(0, CAPACITY / 2)); - buffer.resetReaderIndex(); + buffer.readerIndex(readerIndex); assertEquals(CAPACITY / 4, buffer.readerIndex()); - buffer.resetWriterIndex(); + buffer.writerIndex(writerIndex); assertEquals(CAPACITY / 3, buffer.writerIndex()); // Make sure bytes after writerIndex is not copied. @@ -1805,11 +1809,6 @@ public void testDiscardReadBytes() { assertEquals(copy.slice(CAPACITY / 2, CAPACITY / 2), buffer.slice(CAPACITY / 2 - 1, CAPACITY / 2)); } - // Marks also should be relocated. - buffer.resetReaderIndex(); - assertEquals(CAPACITY / 4 - 1, buffer.readerIndex()); - buffer.resetWriterIndex(); - assertEquals(CAPACITY / 3 - 1, buffer.writerIndex()); copy.release(); } @@ -2084,25 +2083,23 @@ public void testToString() { copied.release(); } - @Test(timeout = 10000) + @Test + @Timeout(value = 10000, unit = TimeUnit.MILLISECONDS) public void testToStringMultipleThreads() throws Throwable { buffer.clear(); buffer.writeBytes("Hello, World!".getBytes(CharsetUtil.ISO_8859_1)); final AtomicInteger counter = new AtomicInteger(30000); - final AtomicReference errorRef = new AtomicReference(); - List threads = new ArrayList(); + final AtomicReference errorRef = new AtomicReference<>(); + List threads = new ArrayList<>(); for (int i = 0; i < 10; i++) { - Thread thread = new Thread(new Runnable() { - @Override - public void run() { - try { - while (errorRef.get() == null && counter.decrementAndGet() > 0) { - assertEquals("Hello, World!", buffer.toString(CharsetUtil.ISO_8859_1)); - } - } catch (Throwable cause) { - errorRef.compareAndSet(null, cause); + Thread thread = new Thread(() -> { + try { + while (errorRef.get() == null && counter.decrementAndGet() > 0) { + assertEquals("Hello, World!", buffer.toString(CharsetUtil.ISO_8859_1)); } + } catch (Throwable cause) { + errorRef.compareAndSet(null, cause); } }); threads.add(thread); @@ -2121,9 +2118,42 @@ public void run() { } } + @Test + public void testSWARIndexOf() { + ByteBuf buffer = newBuffer(16); + buffer.clear(); + // Ensure the buffer is completely zero'ed. + buffer.setZero(0, buffer.capacity()); + buffer.writeByte((byte) 0); // 0 + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); // 7 + + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 1); // 11 + buffer.writeByte((byte) 2); + buffer.writeByte((byte) 3); + buffer.writeByte((byte) 4); + buffer.writeByte((byte) 1); + assertEquals(11, buffer.indexOf(0, 12, (byte) 1)); + assertEquals(12, buffer.indexOf(0, 16, (byte) 2)); + assertEquals(-1, buffer.indexOf(0, 11, (byte) 1)); + assertEquals(11, buffer.indexOf(0, 16, (byte) 1)); + buffer.release(); + } + @Test public void testIndexOf() { buffer.clear(); + // Ensure the buffer is completely zero'ed. + buffer.setZero(0, buffer.capacity()); + buffer.writeByte((byte) 1); buffer.writeByte((byte) 2); buffer.writeByte((byte) 3); @@ -2134,6 +2164,38 @@ public void testIndexOf() { assertEquals(-1, buffer.indexOf(4, 1, (byte) 1)); assertEquals(1, buffer.indexOf(1, 4, (byte) 2)); assertEquals(3, buffer.indexOf(4, 1, (byte) 2)); + + try { + buffer.indexOf(0, buffer.capacity() + 1, (byte) 0); + fail(); + } catch (IndexOutOfBoundsException expected) { + // expected + } + + try { + buffer.indexOf(buffer.capacity(), -1, (byte) 0); + fail(); + } catch (IndexOutOfBoundsException expected) { + // expected + } + + assertEquals(4, buffer.indexOf(buffer.capacity() + 1, 0, (byte) 1)); + assertEquals(0, buffer.indexOf(-1, buffer.capacity(), (byte) 1)); + } + + @Test + public void testIndexOfReleaseBuffer() { + ByteBuf buffer = releasedBuffer(); + if (buffer.capacity() != 0) { + try { + buffer.indexOf(0, 1, (byte) 1); + fail(); + } catch (IllegalReferenceCountException expected) { + // expected + } + } else { + assertEquals(-1, buffer.indexOf(0, 1, (byte) 1)); + } } @Test @@ -2206,7 +2268,7 @@ public void testHashCode() { elemA.writeBytes(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 }); elemB.writeBytes(new byte[] { 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9 }); - Set set = new HashSet(); + Set set = new HashSet<>(); set.add(elemA); set.add(elemB); @@ -2256,7 +2318,7 @@ public void testForEachByte() { int i = CAPACITY / 4; @Override - public boolean process(byte value) throws Exception { + public boolean process(byte value) { assertThat(value, is((byte) (i + 1))); lastIndex.set(i); i ++; @@ -2279,7 +2341,7 @@ public void testForEachByteAbort() { int i = CAPACITY / 3; @Override - public boolean process(byte value) throws Exception { + public boolean process(byte value) { assertThat(value, is((byte) (i + 1))); if (i == stop) { return false; @@ -2303,7 +2365,7 @@ public void testForEachByteDesc() { int i = CAPACITY * 3 / 4 - 1; @Override - public boolean process(byte value) throws Exception { + public boolean process(byte value) { assertThat(value, is((byte) (i + 1))); lastIndex.set(i); i --; @@ -2330,7 +2392,7 @@ private void testInternalNioBuffer(int a) { assertEquals(1, buf.remaining()); byte[] data = new byte[a]; - PlatformDependent.threadLocalRandom().nextBytes(data); + ThreadLocalRandom.current().nextBytes(data); buffer.writeBytes(data); buf = buffer.internalNioBuffer(buffer.readerIndex(), a); @@ -2362,34 +2424,31 @@ private void testReadGatheringByteChannelMultipleThreads(final boolean slice) th final CountDownLatch latch = new CountDownLatch(60000); final CyclicBarrier barrier = new CyclicBarrier(11); for (int i = 0; i < 10; i++) { - new Thread(new Runnable() { - @Override - public void run() { - while (latch.getCount() > 0) { - ByteBuf buf; - if (slice) { - buf = buffer.slice(); - } else { - buf = buffer.duplicate(); - } - TestGatheringByteChannel channel = new TestGatheringByteChannel(); - - while (buf.isReadable()) { - try { - buf.readBytes(channel, buf.readableBytes()); - } catch (IOException e) { - // Never happens - return; - } - } - assertArrayEquals(bytes, channel.writtenBytes()); - latch.countDown(); + new Thread(() -> { + while (latch.getCount() > 0) { + ByteBuf buf; + if (slice) { + buf = buffer.slice(); + } else { + buf = buffer.duplicate(); } - try { - barrier.await(); - } catch (Exception e) { - // ignore + TestGatheringByteChannel channel = new TestGatheringByteChannel(); + + while (buf.isReadable()) { + try { + buf.readBytes(channel, buf.readableBytes()); + } catch (IOException e) { + // Never happens + return; + } } + assertArrayEquals(bytes, channel.writtenBytes()); + latch.countDown(); + } + try { + barrier.await(); + } catch (Exception e) { + // ignore } }).start(); } @@ -2417,34 +2476,31 @@ private void testReadOutputStreamMultipleThreads(final boolean slice) throws Exc final CountDownLatch latch = new CountDownLatch(60000); final CyclicBarrier barrier = new CyclicBarrier(11); for (int i = 0; i < 10; i++) { - new Thread(new Runnable() { - @Override - public void run() { - while (latch.getCount() > 0) { - ByteBuf buf; - if (slice) { - buf = buffer.slice(); - } else { - buf = buffer.duplicate(); - } - ByteArrayOutputStream out = new ByteArrayOutputStream(); - - while (buf.isReadable()) { - try { - buf.readBytes(out, buf.readableBytes()); - } catch (IOException e) { - // Never happens - return; - } - } - assertArrayEquals(bytes, out.toByteArray()); - latch.countDown(); + new Thread(() -> { + while (latch.getCount() > 0) { + ByteBuf buf; + if (slice) { + buf = buffer.slice(); + } else { + buf = buffer.duplicate(); } - try { - barrier.await(); - } catch (Exception e) { - // ignore + ByteArrayOutputStream out = new ByteArrayOutputStream(); + + while (buf.isReadable()) { + try { + buf.readBytes(out, buf.readableBytes()); + } catch (IOException e) { + // Never happens + return; + } } + assertArrayEquals(bytes, out.toByteArray()); + latch.countDown(); + } + try { + barrier.await(); + } catch (Exception e) { + // ignore } }).start(); } @@ -2469,37 +2525,34 @@ private void testBytesInArrayMultipleThreads(final boolean slice) throws Excepti final ByteBuf buffer = newBuffer(8); buffer.writeBytes(bytes); - final AtomicReference cause = new AtomicReference(); + final AtomicReference cause = new AtomicReference<>(); final CountDownLatch latch = new CountDownLatch(60000); final CyclicBarrier barrier = new CyclicBarrier(11); for (int i = 0; i < 10; i++) { - new Thread(new Runnable() { - @Override - public void run() { - while (cause.get() == null && latch.getCount() > 0) { - ByteBuf buf; - if (slice) { - buf = buffer.slice(); - } else { - buf = buffer.duplicate(); - } + new Thread(() -> { + while (cause.get() == null && latch.getCount() > 0) { + ByteBuf buf; + if (slice) { + buf = buffer.slice(); + } else { + buf = buffer.duplicate(); + } - byte[] array = new byte[8]; - buf.readBytes(array); + byte[] array = new byte[8]; + buf.readBytes(array); - assertArrayEquals(bytes, array); + assertArrayEquals(bytes, array); - Arrays.fill(array, (byte) 0); - buf.getBytes(0, array); - assertArrayEquals(bytes, array); + Arrays.fill(array, (byte) 0); + buf.getBytes(0, array); + assertArrayEquals(bytes, array); - latch.countDown(); - } - try { - barrier.await(); - } catch (Exception e) { - // ignore - } + latch.countDown(); + } + try { + barrier.await(); + } catch (Exception e) { + // ignore } }).start(); } @@ -2509,13 +2562,15 @@ public void run() { buffer.release(); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void readByteThrowsIndexOutOfBoundsException() { final ByteBuf buffer = newBuffer(8); try { - buffer.writeByte(0); - assertEquals((byte) 0, buffer.readByte()); - buffer.readByte(); + assertThrows(IndexOutOfBoundsException.class, () -> { + buffer.writeByte(0); + assertEquals((byte) 0, buffer.readByte()); + buffer.readByte(); + }); } finally { buffer.release(); } @@ -2569,7 +2624,6 @@ public void testLittleEndianWithExpand() { private ByteBuf releasedBuffer() { ByteBuf buffer = newBuffer(8); - // Clear the buffer so we are sure the reader and writer indices are 0. // This is important as we may return a slice from newBuffer(...). buffer.clear(); @@ -2577,718 +2631,742 @@ private ByteBuf releasedBuffer() { return buffer; } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testDiscardReadBytesAfterRelease() { - releasedBuffer().discardReadBytes(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().discardReadBytes()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testDiscardSomeReadBytesAfterRelease() { - releasedBuffer().discardSomeReadBytes(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().discardSomeReadBytes()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testEnsureWritableAfterRelease() { - releasedBuffer().ensureWritable(16); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().ensureWritable(16)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetBooleanAfterRelease() { - releasedBuffer().getBoolean(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBoolean(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetByteAfterRelease() { - releasedBuffer().getByte(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getByte(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetUnsignedByteAfterRelease() { - releasedBuffer().getUnsignedByte(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedByte(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetShortAfterRelease() { - releasedBuffer().getShort(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getShort(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetShortLEAfterRelease() { - releasedBuffer().getShortLE(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getShortLE(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetUnsignedShortAfterRelease() { - releasedBuffer().getUnsignedShort(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedShort(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetUnsignedShortLEAfterRelease() { - releasedBuffer().getUnsignedShortLE(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedShortLE(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetMediumAfterRelease() { - releasedBuffer().getMedium(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getMedium(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetMediumLEAfterRelease() { - releasedBuffer().getMediumLE(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getMediumLE(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetUnsignedMediumAfterRelease() { - releasedBuffer().getUnsignedMedium(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedMedium(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetIntAfterRelease() { - releasedBuffer().getInt(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getInt(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetIntLEAfterRelease() { - releasedBuffer().getIntLE(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getIntLE(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetUnsignedIntAfterRelease() { - releasedBuffer().getUnsignedInt(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedInt(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetUnsignedIntLEAfterRelease() { - releasedBuffer().getUnsignedIntLE(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedIntLE(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetLongAfterRelease() { - releasedBuffer().getLong(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getLong(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetLongLEAfterRelease() { - releasedBuffer().getLongLE(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getLongLE(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetCharAfterRelease() { - releasedBuffer().getChar(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getChar(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetFloatAfterRelease() { - releasedBuffer().getFloat(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getFloat(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetFloatLEAfterRelease() { - releasedBuffer().getFloatLE(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getFloatLE(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetDoubleAfterRelease() { - releasedBuffer().getDouble(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getDouble(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetDoubleLEAfterRelease() { - releasedBuffer().getDoubleLE(0); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getDoubleLE(0)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetBytesAfterRelease() { ByteBuf buffer = buffer(8); try { - releasedBuffer().getBytes(0, buffer); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBytes(0, buffer)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetBytesAfterRelease2() { ByteBuf buffer = buffer(); try { - releasedBuffer().getBytes(0, buffer, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBytes(0, buffer, 1)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetBytesAfterRelease3() { ByteBuf buffer = buffer(); try { - releasedBuffer().getBytes(0, buffer, 0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBytes(0, buffer, 0, 1)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetBytesAfterRelease4() { - releasedBuffer().getBytes(0, new byte[8]); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBytes(0, new byte[8])); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetBytesAfterRelease5() { - releasedBuffer().getBytes(0, new byte[8], 0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBytes(0, new byte[8], 0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testGetBytesAfterRelease6() { - releasedBuffer().getBytes(0, ByteBuffer.allocate(8)); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBytes(0, ByteBuffer.allocate(8))); } - @Test(expected = IllegalReferenceCountException.class) - public void testGetBytesAfterRelease7() throws IOException { - releasedBuffer().getBytes(0, new ByteArrayOutputStream(), 1); + @Test + public void testGetBytesAfterRelease7() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().getBytes(0, new ByteArrayOutputStream(), 1)); } - @Test(expected = IllegalReferenceCountException.class) - public void testGetBytesAfterRelease8() throws IOException { - releasedBuffer().getBytes(0, new DevNullGatheringByteChannel(), 1); + @Test + public void testGetBytesAfterRelease8() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().getBytes(0, new DevNullGatheringByteChannel(), 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetBooleanAfterRelease() { - releasedBuffer().setBoolean(0, true); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBoolean(0, true)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetByteAfterRelease() { - releasedBuffer().setByte(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setByte(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetShortAfterRelease() { - releasedBuffer().setShort(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setShort(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetShortLEAfterRelease() { - releasedBuffer().setShortLE(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setShortLE(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetMediumAfterRelease() { - releasedBuffer().setMedium(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setMedium(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetMediumLEAfterRelease() { - releasedBuffer().setMediumLE(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setMediumLE(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetIntAfterRelease() { - releasedBuffer().setInt(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setInt(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetIntLEAfterRelease() { - releasedBuffer().setIntLE(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setIntLE(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetLongAfterRelease() { - releasedBuffer().setLong(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setLong(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetLongLEAfterRelease() { - releasedBuffer().setLongLE(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setLongLE(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetCharAfterRelease() { - releasedBuffer().setChar(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setChar(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetFloatAfterRelease() { - releasedBuffer().setFloat(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setFloat(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetDoubleAfterRelease() { - releasedBuffer().setDouble(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setDouble(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetBytesAfterRelease() { ByteBuf buffer = buffer(); try { - releasedBuffer().setBytes(0, buffer); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBytes(0, buffer)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetBytesAfterRelease2() { ByteBuf buffer = buffer(); try { - releasedBuffer().setBytes(0, buffer, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBytes(0, buffer, 1)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetBytesAfterRelease3() { ByteBuf buffer = buffer(); try { - releasedBuffer().setBytes(0, buffer, 0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBytes(0, buffer, 0, 1)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetUsAsciiCharSequenceAfterRelease() { - testSetCharSequenceAfterRelease0(CharsetUtil.US_ASCII); + assertThrows(IllegalReferenceCountException.class, + () -> testSetCharSequenceAfterRelease0(CharsetUtil.US_ASCII)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetIso88591CharSequenceAfterRelease() { - testSetCharSequenceAfterRelease0(CharsetUtil.ISO_8859_1); + assertThrows(IllegalReferenceCountException.class, + () -> testSetCharSequenceAfterRelease0(CharsetUtil.ISO_8859_1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetUtf8CharSequenceAfterRelease() { - testSetCharSequenceAfterRelease0(CharsetUtil.UTF_8); + assertThrows(IllegalReferenceCountException.class, + () -> testSetCharSequenceAfterRelease0(CharsetUtil.UTF_8)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetUtf16CharSequenceAfterRelease() { - testSetCharSequenceAfterRelease0(CharsetUtil.UTF_16); + assertThrows(IllegalReferenceCountException.class, + () -> testSetCharSequenceAfterRelease0(CharsetUtil.UTF_16)); } private void testSetCharSequenceAfterRelease0(Charset charset) { releasedBuffer().setCharSequence(0, "x", charset); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetBytesAfterRelease4() { - releasedBuffer().setBytes(0, new byte[8]); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBytes(0, new byte[8])); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetBytesAfterRelease5() { - releasedBuffer().setBytes(0, new byte[8], 0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBytes(0, new byte[8], 0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetBytesAfterRelease6() { - releasedBuffer().setBytes(0, ByteBuffer.allocate(8)); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBytes(0, ByteBuffer.allocate(8))); } - @Test(expected = IllegalReferenceCountException.class) - public void testSetBytesAfterRelease7() throws IOException { - releasedBuffer().setBytes(0, new ByteArrayInputStream(new byte[8]), 1); + @Test + public void testSetBytesAfterRelease7() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().setBytes(0, new ByteArrayInputStream(new byte[8]), 1)); } - @Test(expected = IllegalReferenceCountException.class) - public void testSetBytesAfterRelease8() throws IOException { - releasedBuffer().setBytes(0, new TestScatteringByteChannel(), 1); + @Test + public void testSetBytesAfterRelease8() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().setBytes(0, new TestScatteringByteChannel(), 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testSetZeroAfterRelease() { - releasedBuffer().setZero(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setZero(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadBooleanAfterRelease() { - releasedBuffer().readBoolean(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBoolean()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadByteAfterRelease() { - releasedBuffer().readByte(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readByte()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadUnsignedByteAfterRelease() { - releasedBuffer().readUnsignedByte(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedByte()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadShortAfterRelease() { - releasedBuffer().readShort(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readShort()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadShortLEAfterRelease() { - releasedBuffer().readShortLE(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readShortLE()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadUnsignedShortAfterRelease() { - releasedBuffer().readUnsignedShort(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedShort()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadUnsignedShortLEAfterRelease() { - releasedBuffer().readUnsignedShortLE(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedShortLE()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadMediumAfterRelease() { - releasedBuffer().readMedium(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readMedium()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadMediumLEAfterRelease() { - releasedBuffer().readMediumLE(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readMediumLE()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadUnsignedMediumAfterRelease() { - releasedBuffer().readUnsignedMedium(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedMedium()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadUnsignedMediumLEAfterRelease() { - releasedBuffer().readUnsignedMediumLE(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedMediumLE()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadIntAfterRelease() { - releasedBuffer().readInt(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readInt()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadIntLEAfterRelease() { - releasedBuffer().readIntLE(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readIntLE()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadUnsignedIntAfterRelease() { - releasedBuffer().readUnsignedInt(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedInt()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadUnsignedIntLEAfterRelease() { - releasedBuffer().readUnsignedIntLE(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedIntLE()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadLongAfterRelease() { - releasedBuffer().readLong(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readLong()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadLongLEAfterRelease() { - releasedBuffer().readLongLE(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readLongLE()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadCharAfterRelease() { - releasedBuffer().readChar(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readChar()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadFloatAfterRelease() { - releasedBuffer().readFloat(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readFloat()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadFloatLEAfterRelease() { - releasedBuffer().readFloatLE(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readFloatLE()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadDoubleAfterRelease() { - releasedBuffer().readDouble(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readDouble()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadDoubleLEAfterRelease() { - releasedBuffer().readDoubleLE(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readDoubleLE()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadBytesAfterRelease() { - releasedBuffer().readBytes(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadBytesAfterRelease2() { ByteBuf buffer = buffer(8); try { - releasedBuffer().readBytes(buffer); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(buffer)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadBytesAfterRelease3() { ByteBuf buffer = buffer(8); try { - releasedBuffer().readBytes(buffer); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(buffer)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadBytesAfterRelease4() { ByteBuf buffer = buffer(8); try { - releasedBuffer().readBytes(buffer, 0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(buffer, 0, 1)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadBytesAfterRelease5() { - releasedBuffer().readBytes(new byte[8]); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(new byte[8])); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadBytesAfterRelease6() { - releasedBuffer().readBytes(new byte[8], 0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(new byte[8], 0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReadBytesAfterRelease7() { - releasedBuffer().readBytes(ByteBuffer.allocate(8)); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(ByteBuffer.allocate(8))); } - @Test(expected = IllegalReferenceCountException.class) - public void testReadBytesAfterRelease8() throws IOException { - releasedBuffer().readBytes(new ByteArrayOutputStream(), 1); + @Test + public void testReadBytesAfterRelease8() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().readBytes(new ByteArrayOutputStream(), 1)); } - @Test(expected = IllegalReferenceCountException.class) - public void testReadBytesAfterRelease9() throws IOException { - releasedBuffer().readBytes(new ByteArrayOutputStream(), 1); + @Test + public void testReadBytesAfterRelease9() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().readBytes(new ByteArrayOutputStream(), 1)); } - @Test(expected = IllegalReferenceCountException.class) - public void testReadBytesAfterRelease10() throws IOException { - releasedBuffer().readBytes(new DevNullGatheringByteChannel(), 1); + @Test + public void testReadBytesAfterRelease10() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().readBytes(new DevNullGatheringByteChannel(), 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteBooleanAfterRelease() { - releasedBuffer().writeBoolean(true); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBoolean(true)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteByteAfterRelease() { - releasedBuffer().writeByte(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeByte(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteShortAfterRelease() { - releasedBuffer().writeShort(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeShort(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteShortLEAfterRelease() { - releasedBuffer().writeShortLE(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeShortLE(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteMediumAfterRelease() { - releasedBuffer().writeMedium(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeMedium(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteMediumLEAfterRelease() { - releasedBuffer().writeMediumLE(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeMediumLE(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteIntAfterRelease() { - releasedBuffer().writeInt(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeInt(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteIntLEAfterRelease() { - releasedBuffer().writeIntLE(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeIntLE(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteLongAfterRelease() { - releasedBuffer().writeLong(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeLong(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteLongLEAfterRelease() { - releasedBuffer().writeLongLE(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeLongLE(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteCharAfterRelease() { - releasedBuffer().writeChar(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeChar(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteFloatAfterRelease() { - releasedBuffer().writeFloat(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeFloat(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteFloatLEAfterRelease() { - releasedBuffer().writeFloatLE(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeFloatLE(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteDoubleAfterRelease() { - releasedBuffer().writeDouble(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeDouble(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteDoubleLEAfterRelease() { - releasedBuffer().writeDoubleLE(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeDoubleLE(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteBytesAfterRelease() { ByteBuf buffer = buffer(8); try { - releasedBuffer().writeBytes(buffer); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBytes(buffer)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteBytesAfterRelease2() { ByteBuf buffer = copiedBuffer(new byte[8]); try { - releasedBuffer().writeBytes(buffer, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBytes(buffer, 1)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteBytesAfterRelease3() { ByteBuf buffer = buffer(8); try { - releasedBuffer().writeBytes(buffer, 0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBytes(buffer, 0, 1)); } finally { buffer.release(); } } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteBytesAfterRelease4() { - releasedBuffer().writeBytes(new byte[8]); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBytes(new byte[8])); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteBytesAfterRelease5() { - releasedBuffer().writeBytes(new byte[8], 0 , 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBytes(new byte[8], 0 , 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteBytesAfterRelease6() { - releasedBuffer().writeBytes(ByteBuffer.allocate(8)); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBytes(ByteBuffer.allocate(8))); } - @Test(expected = IllegalReferenceCountException.class) - public void testWriteBytesAfterRelease7() throws IOException { - releasedBuffer().writeBytes(new ByteArrayInputStream(new byte[8]), 1); + @Test + public void testWriteBytesAfterRelease7() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().writeBytes(new ByteArrayInputStream(new byte[8]), 1)); } - @Test(expected = IllegalReferenceCountException.class) - public void testWriteBytesAfterRelease8() throws IOException { - releasedBuffer().writeBytes(new TestScatteringByteChannel(), 1); + @Test + public void testWriteBytesAfterRelease8() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().writeBytes(new TestScatteringByteChannel(), 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteZeroAfterRelease() throws IOException { - releasedBuffer().writeZero(1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeZero(1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteUsAsciiCharSequenceAfterRelease() { - testWriteCharSequenceAfterRelease0(CharsetUtil.US_ASCII); + assertThrows(IllegalReferenceCountException.class, + () -> testWriteCharSequenceAfterRelease0(CharsetUtil.US_ASCII)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteIso88591CharSequenceAfterRelease() { - testWriteCharSequenceAfterRelease0(CharsetUtil.ISO_8859_1); + assertThrows(IllegalReferenceCountException.class, + () -> testWriteCharSequenceAfterRelease0(CharsetUtil.ISO_8859_1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteUtf8CharSequenceAfterRelease() { - testWriteCharSequenceAfterRelease0(CharsetUtil.UTF_8); + assertThrows(IllegalReferenceCountException.class, () -> testWriteCharSequenceAfterRelease0(CharsetUtil.UTF_8)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testWriteUtf16CharSequenceAfterRelease() { - testWriteCharSequenceAfterRelease0(CharsetUtil.UTF_16); + assertThrows(IllegalReferenceCountException.class, + () -> testWriteCharSequenceAfterRelease0(CharsetUtil.UTF_16)); } private void testWriteCharSequenceAfterRelease0(Charset charset) { releasedBuffer().writeCharSequence("x", charset); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testForEachByteAfterRelease() { - releasedBuffer().forEachByte(new TestByteProcessor()); + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().forEachByte(new TestByteProcessor())); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testForEachByteAfterRelease1() { - releasedBuffer().forEachByte(0, 1, new TestByteProcessor()); + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().forEachByte(0, 1, new TestByteProcessor())); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testForEachByteDescAfterRelease() { - releasedBuffer().forEachByteDesc(new TestByteProcessor()); + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().forEachByteDesc(new TestByteProcessor())); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testForEachByteDescAfterRelease1() { - releasedBuffer().forEachByteDesc(0, 1, new TestByteProcessor()); + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().forEachByteDesc(0, 1, new TestByteProcessor())); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testCopyAfterRelease() { - releasedBuffer().copy(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().copy()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testCopyAfterRelease1() { - releasedBuffer().copy(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().copy()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testNioBufferAfterRelease() { - releasedBuffer().nioBuffer(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().nioBuffer()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testNioBufferAfterRelease1() { - releasedBuffer().nioBuffer(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().nioBuffer(0, 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testInternalNioBufferAfterRelease() { + testInternalNioBufferAfterRelease0(IllegalReferenceCountException.class); + } + + protected void testInternalNioBufferAfterRelease0(final Class expectedException) { ByteBuf releasedBuffer = releasedBuffer(); - releasedBuffer.internalNioBuffer(releasedBuffer.readerIndex(), 1); + assertThrows(expectedException, () -> releasedBuffer.internalNioBuffer(releasedBuffer.readerIndex(), 1)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testNioBuffersAfterRelease() { - releasedBuffer().nioBuffers(); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().nioBuffers()); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testNioBuffersAfterRelease2() { - releasedBuffer().nioBuffers(0, 1); + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().nioBuffers(0, 1)); } @Test @@ -3317,6 +3395,224 @@ public void testMemoryAddressAfterRelease() { } } + @Test + public void testSliceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().slice()); + } + + @Test + public void testSliceAfterRelease2() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().slice(0, 1)); + } + + private static void assertSliceFailAfterRelease(ByteBuf... bufs) { + for (ByteBuf buf : bufs) { + if (buf.refCnt() > 0) { + buf.release(); + } + } + for (ByteBuf buf : bufs) { + try { + assertEquals(0, buf.refCnt()); + buf.slice(); + fail(); + } catch (IllegalReferenceCountException ignored) { + // as expected + } + } + } + + @Test + public void testSliceAfterReleaseRetainedSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + assertSliceFailAfterRelease(buf, buf2); + } + + @Test + public void testSliceAfterReleaseRetainedSliceDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.duplicate(); + assertSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testSliceAfterReleaseRetainedSliceRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.retainedDuplicate(); + assertSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testSliceAfterReleaseRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + assertSliceFailAfterRelease(buf, buf2); + } + + @Test + public void testSliceAfterReleaseRetainedDuplicateSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + ByteBuf buf3 = buf2.slice(0, 1); + assertSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testRetainedSliceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().retainedSlice()); + } + + @Test + public void testRetainedSliceAfterRelease2() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().retainedSlice(0, 1)); + } + + private static void assertRetainedSliceFailAfterRelease(ByteBuf... bufs) { + for (ByteBuf buf : bufs) { + if (buf.refCnt() > 0) { + buf.release(); + } + } + for (ByteBuf buf : bufs) { + try { + assertEquals(0, buf.refCnt()); + buf.retainedSlice(); + fail(); + } catch (IllegalReferenceCountException ignored) { + // as expected + } + } + } + + @Test + public void testRetainedSliceAfterReleaseRetainedSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + assertRetainedSliceFailAfterRelease(buf, buf2); + } + + @Test + public void testRetainedSliceAfterReleaseRetainedSliceDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.duplicate(); + assertRetainedSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testRetainedSliceAfterReleaseRetainedSliceRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.retainedDuplicate(); + assertRetainedSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testRetainedSliceAfterReleaseRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + assertRetainedSliceFailAfterRelease(buf, buf2); + } + + @Test + public void testRetainedSliceAfterReleaseRetainedDuplicateSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + ByteBuf buf3 = buf2.slice(0, 1); + assertRetainedSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testDuplicateAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().duplicate()); + } + + @Test + public void testRetainedDuplicateAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().retainedDuplicate()); + } + + private static void assertDuplicateFailAfterRelease(ByteBuf... bufs) { + for (ByteBuf buf : bufs) { + if (buf.refCnt() > 0) { + buf.release(); + } + } + for (ByteBuf buf : bufs) { + try { + assertEquals(0, buf.refCnt()); + buf.duplicate(); + fail(); + } catch (IllegalReferenceCountException ignored) { + // as expected + } + } + } + + @Test + public void testDuplicateAfterReleaseRetainedSliceDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.duplicate(); + assertDuplicateFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testDuplicateAfterReleaseRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + assertDuplicateFailAfterRelease(buf, buf2); + } + + @Test + public void testDuplicateAfterReleaseRetainedDuplicateSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + ByteBuf buf3 = buf2.slice(0, 1); + assertDuplicateFailAfterRelease(buf, buf2, buf3); + } + + private static void assertRetainedDuplicateFailAfterRelease(ByteBuf... bufs) { + for (ByteBuf buf : bufs) { + if (buf.refCnt() > 0) { + buf.release(); + } + } + for (ByteBuf buf : bufs) { + try { + assertEquals(0, buf.refCnt()); + buf.retainedDuplicate(); + fail(); + } catch (IllegalReferenceCountException ignored) { + // as expected + } + } + } + + @Test + public void testRetainedDuplicateAfterReleaseRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + assertRetainedDuplicateFailAfterRelease(buf, buf2); + } + + @Test + public void testRetainedDuplicateAfterReleaseDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.duplicate(); + assertRetainedDuplicateFailAfterRelease(buf, buf2); + } + + @Test + public void testRetainedDuplicateAfterReleaseRetainedSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + assertRetainedDuplicateFailAfterRelease(buf, buf2); + } + @Test public void testSliceRelease() { ByteBuf buf = newBuffer(8); @@ -3325,14 +3621,14 @@ public void testSliceRelease() { assertEquals(0, buf.refCnt()); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testReadSliceOutOfBounds() { - testReadSliceOutOfBounds(false); + assertThrows(IndexOutOfBoundsException.class, () -> testReadSliceOutOfBounds(false)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testReadRetainedSliceOutOfBounds() { - testReadSliceOutOfBounds(true); + assertThrows(IndexOutOfBoundsException.class, () -> testReadSliceOutOfBounds(true)); } private void testReadSliceOutOfBounds(boolean retainedSlice) { @@ -3381,24 +3677,24 @@ private void testWriteCharSequenceExpand(Charset charset) { } } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testSetUsAsciiCharSequenceNoExpand() { - testSetCharSequenceNoExpand(CharsetUtil.US_ASCII); + assertThrows(IndexOutOfBoundsException.class, () -> testSetCharSequenceNoExpand(CharsetUtil.US_ASCII)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testSetUtf8CharSequenceNoExpand() { - testSetCharSequenceNoExpand(CharsetUtil.UTF_8); + assertThrows(IndexOutOfBoundsException.class, () -> testSetCharSequenceNoExpand(CharsetUtil.UTF_8)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testSetIso88591CharSequenceNoExpand() { - testSetCharSequenceNoExpand(CharsetUtil.ISO_8859_1); + assertThrows(IndexOutOfBoundsException.class, () -> testSetCharSequenceNoExpand(CharsetUtil.ISO_8859_1)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testSetUtf16CharSequenceNoExpand() { - testSetCharSequenceNoExpand(CharsetUtil.UTF_16); + assertThrows(IndexOutOfBoundsException.class, () -> testSetCharSequenceNoExpand(CharsetUtil.UTF_16)); } private void testSetCharSequenceNoExpand(Charset charset) { @@ -3430,11 +3726,23 @@ public void testSetUtf16CharSequence() { testSetGetCharSequence(CharsetUtil.UTF_16); } + private static final CharBuffer EXTENDED_ASCII_CHARS, ASCII_CHARS; + + static { + char[] chars = new char[256]; + for (char c = 0; c < chars.length; c++) { + chars[c] = c; + } + EXTENDED_ASCII_CHARS = CharBuffer.wrap(chars); + ASCII_CHARS = CharBuffer.wrap(chars, 0, 128); + } + private void testSetGetCharSequence(Charset charset) { - ByteBuf buf = newBuffer(16); - String sequence = "AB"; + ByteBuf buf = newBuffer(1024); + CharBuffer sequence = CharsetUtil.US_ASCII.equals(charset) + ? ASCII_CHARS : EXTENDED_ASCII_CHARS; int bytes = buf.setCharSequence(1, sequence, charset); - assertEquals(sequence, buf.getCharSequence(1, bytes, charset)); + assertEquals(sequence, CharBuffer.wrap(buf.getCharSequence(1, bytes, charset))); buf.release(); } @@ -3459,62 +3767,63 @@ public void testWriteReadUtf16CharSequence() { } private void testWriteReadCharSequence(Charset charset) { - ByteBuf buf = newBuffer(16); - String sequence = "AB"; + ByteBuf buf = newBuffer(1024); + CharBuffer sequence = CharsetUtil.US_ASCII.equals(charset) + ? ASCII_CHARS : EXTENDED_ASCII_CHARS; buf.writerIndex(1); int bytes = buf.writeCharSequence(sequence, charset); buf.readerIndex(1); - assertEquals(sequence, buf.readCharSequence(bytes, charset)); + assertEquals(sequence, CharBuffer.wrap(buf.readCharSequence(bytes, charset))); buf.release(); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testRetainedSliceIndexOutOfBounds() { - testSliceOutOfBounds(true, true, true); + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(true, true, true)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testRetainedSliceLengthOutOfBounds() { - testSliceOutOfBounds(true, true, false); + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(true, true, false)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testMixedSliceAIndexOutOfBounds() { - testSliceOutOfBounds(true, false, true); + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(true, false, true)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testMixedSliceALengthOutOfBounds() { - testSliceOutOfBounds(true, false, false); + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(true, false, false)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testMixedSliceBIndexOutOfBounds() { - testSliceOutOfBounds(false, true, true); + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(false, true, true)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testMixedSliceBLengthOutOfBounds() { - testSliceOutOfBounds(false, true, false); + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(false, true, false)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testSliceIndexOutOfBounds() { - testSliceOutOfBounds(false, false, true); + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(false, false, true)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testSliceLengthOutOfBounds() { - testSliceOutOfBounds(false, false, false); + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(false, false, false)); } @Test public void testRetainedSliceAndRetainedDuplicateContentIsExpected() { - ByteBuf buf = newBuffer(8).resetWriterIndex(); - ByteBuf expected1 = newBuffer(6).resetWriterIndex(); - ByteBuf expected2 = newBuffer(5).resetWriterIndex(); - ByteBuf expected3 = newBuffer(4).resetWriterIndex(); - ByteBuf expected4 = newBuffer(3).resetWriterIndex(); + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(6).writerIndex(0); + ByteBuf expected2 = newBuffer(5).writerIndex(0); + ByteBuf expected3 = newBuffer(4).writerIndex(0); + ByteBuf expected4 = newBuffer(3).writerIndex(0); buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); expected1.writeBytes(new byte[] {2, 3, 4, 5, 6, 7}); expected2.writeBytes(new byte[] {3, 4, 5, 6, 7}); @@ -3573,10 +3882,10 @@ public void testRetainedSliceAndRetainedDuplicateContentIsExpected() { @Test public void testRetainedDuplicateAndRetainedSliceContentIsExpected() { - ByteBuf buf = newBuffer(8).resetWriterIndex(); - ByteBuf expected1 = newBuffer(6).resetWriterIndex(); - ByteBuf expected2 = newBuffer(5).resetWriterIndex(); - ByteBuf expected3 = newBuffer(4).resetWriterIndex(); + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(6).writerIndex(0); + ByteBuf expected2 = newBuffer(5).writerIndex(0); + ByteBuf expected3 = newBuffer(4).writerIndex(0); buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); expected1.writeBytes(new byte[] {2, 3, 4, 5, 6, 7}); expected2.writeBytes(new byte[] {3, 4, 5, 6, 7}); @@ -3761,14 +4070,14 @@ public void testRetainedDuplicateCapacityChange() { testDuplicateCapacityChange(true); } - @Test(expected = UnsupportedOperationException.class) + @Test public void testSliceCapacityChange() { - testSliceCapacityChange(false); + assertThrows(UnsupportedOperationException.class, () -> testSliceCapacityChange(false)); } - @Test(expected = UnsupportedOperationException.class) + @Test public void testRetainedSliceCapacityChange() { - testSliceCapacityChange(true); + assertThrows(UnsupportedOperationException.class, () -> testSliceCapacityChange(true)); } @Test @@ -3925,8 +4234,8 @@ private void testSliceOutOfBounds(boolean initRetainedSlice, boolean finalRetain } private void testSliceContents(boolean retainedSlice) { - ByteBuf buf = newBuffer(8).resetWriterIndex(); - ByteBuf expected = newBuffer(3).resetWriterIndex(); + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected = newBuffer(3).writerIndex(0); buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); expected.writeBytes(new byte[] {4, 5, 6}); ByteBuf slice = retainedSlice ? buf.retainedSlice(buf.readerIndex() + 3, 3) @@ -3948,9 +4257,9 @@ private void testSliceContents(boolean retainedSlice) { } private void testSliceReleaseOriginal(boolean retainedSlice1, boolean retainedSlice2) { - ByteBuf buf = newBuffer(8).resetWriterIndex(); - ByteBuf expected1 = newBuffer(3).resetWriterIndex(); - ByteBuf expected2 = newBuffer(2).resetWriterIndex(); + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(3).writerIndex(0); + ByteBuf expected2 = newBuffer(2).writerIndex(0); buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); expected1.writeBytes(new byte[] {6, 7, 8}); expected2.writeBytes(new byte[] {7, 8}); @@ -3982,12 +4291,12 @@ private void testSliceReleaseOriginal(boolean retainedSlice1, boolean retainedSl } private void testMultipleLevelRetainedSliceWithNonRetained(boolean doSlice1, boolean doSlice2) { - ByteBuf buf = newBuffer(8).resetWriterIndex(); - ByteBuf expected1 = newBuffer(6).resetWriterIndex(); - ByteBuf expected2 = newBuffer(4).resetWriterIndex(); - ByteBuf expected3 = newBuffer(2).resetWriterIndex(); - ByteBuf expected4SliceSlice = newBuffer(1).resetWriterIndex(); - ByteBuf expected4DupSlice = newBuffer(1).resetWriterIndex(); + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(6).writerIndex(0); + ByteBuf expected2 = newBuffer(4).writerIndex(0); + ByteBuf expected3 = newBuffer(2).writerIndex(0); + ByteBuf expected4SliceSlice = newBuffer(1).writerIndex(0); + ByteBuf expected4DupSlice = newBuffer(1).writerIndex(0); buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); expected1.writeBytes(new byte[] {2, 3, 4, 5, 6, 7}); expected2.writeBytes(new byte[] {3, 4, 5, 6}); @@ -4052,8 +4361,8 @@ private void testMultipleLevelRetainedSliceWithNonRetained(boolean doSlice1, boo } private void testDuplicateReleaseOriginal(boolean retainedDuplicate1, boolean retainedDuplicate2) { - ByteBuf buf = newBuffer(8).resetWriterIndex(); - ByteBuf expected = newBuffer(8).resetWriterIndex(); + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected = newBuffer(8).writerIndex(0); buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); expected.writeBytes(buf, buf.readerIndex(), buf.readableBytes()); ByteBuf dup1 = retainedDuplicate1 ? buf.retainedDuplicate() @@ -4083,10 +4392,10 @@ private void testDuplicateReleaseOriginal(boolean retainedDuplicate1, boolean re } private void testMultipleRetainedSliceReleaseOriginal(boolean retainedSlice1, boolean retainedSlice2) { - ByteBuf buf = newBuffer(8).resetWriterIndex(); - ByteBuf expected1 = newBuffer(3).resetWriterIndex(); - ByteBuf expected2 = newBuffer(2).resetWriterIndex(); - ByteBuf expected3 = newBuffer(2).resetWriterIndex(); + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(3).writerIndex(0); + ByteBuf expected2 = newBuffer(2).writerIndex(0); + ByteBuf expected3 = newBuffer(2).writerIndex(0); buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); expected1.writeBytes(new byte[] {6, 7, 8}); expected2.writeBytes(new byte[] {7, 8}); @@ -4127,8 +4436,8 @@ private void testMultipleRetainedSliceReleaseOriginal(boolean retainedSlice1, bo } private void testMultipleRetainedDuplicateReleaseOriginal(boolean retainedDuplicate1, boolean retainedDuplicate2) { - ByteBuf buf = newBuffer(8).resetWriterIndex(); - ByteBuf expected = newBuffer(8).resetWriterIndex(); + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected = newBuffer(8).writerIndex(0); buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); expected.writeBytes(buf, buf.readerIndex(), buf.readableBytes()); ByteBuf dup1 = retainedDuplicate1 ? buf.retainedDuplicate() @@ -4174,7 +4483,7 @@ private void testMultipleRetainedDuplicateReleaseOriginal(boolean retainedDuplic } private void testDuplicateContents(boolean retainedDuplicate) { - ByteBuf buf = newBuffer(8).resetWriterIndex(); + ByteBuf buf = newBuffer(8).writerIndex(0); buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); ByteBuf dup = retainedDuplicate ? buf.retainedDuplicate() : buf.duplicate(); try { @@ -4255,7 +4564,7 @@ private void testGetReadOnlyDst(boolean direct) { @Test public void testReadBytesAndWriteBytesWithFileChannel() throws IOException { - File file = File.createTempFile("file-channel", ".tmp"); + File file = PlatformDependent.createTempFile("file-channel", ".tmp", null); RandomAccessFile randomAccessFile = null; try { randomAccessFile = new RandomAccessFile(file, "rw"); @@ -4265,9 +4574,7 @@ public void testReadBytesAndWriteBytesWithFileChannel() throws IOException { byte[] bytes = {'a', 'b', 'c', 'd'}; int len = bytes.length; - ByteBuf buffer = newBuffer(len); - buffer.resetReaderIndex(); - buffer.resetWriterIndex(); + ByteBuf buffer = newBuffer(len).writerIndex(0); buffer.writeBytes(bytes); int oldReaderIndex = buffer.readerIndex(); @@ -4275,9 +4582,7 @@ public void testReadBytesAndWriteBytesWithFileChannel() throws IOException { assertEquals(oldReaderIndex + len, buffer.readerIndex()); assertEquals(channelPosition, channel.position()); - ByteBuf buffer2 = newBuffer(len); - buffer2.resetReaderIndex(); - buffer2.resetWriterIndex(); + ByteBuf buffer2 = newBuffer(len).writerIndex(0); int oldWriterIndex = buffer2.writerIndex(); assertEquals(len, buffer2.writeBytes(channel, 10, len)); assertEquals(channelPosition, channel.position()); @@ -4298,7 +4603,7 @@ public void testReadBytesAndWriteBytesWithFileChannel() throws IOException { @Test public void testGetBytesAndSetBytesWithFileChannel() throws IOException { - File file = File.createTempFile("file-channel", ".tmp"); + File file = PlatformDependent.createTempFile("file-channel", ".tmp", null); RandomAccessFile randomAccessFile = null; try { randomAccessFile = new RandomAccessFile(file, "rw"); @@ -4308,9 +4613,7 @@ public void testGetBytesAndSetBytesWithFileChannel() throws IOException { byte[] bytes = {'a', 'b', 'c', 'd'}; int len = bytes.length; - ByteBuf buffer = newBuffer(len); - buffer.resetReaderIndex(); - buffer.resetWriterIndex(); + ByteBuf buffer = newBuffer(len).writerIndex(0); buffer.writeBytes(bytes); int oldReaderIndex = buffer.readerIndex(); @@ -4318,9 +4621,7 @@ public void testGetBytesAndSetBytesWithFileChannel() throws IOException { assertEquals(oldReaderIndex, buffer.readerIndex()); assertEquals(channelPosition, channel.position()); - ByteBuf buffer2 = newBuffer(len); - buffer2.resetReaderIndex(); - buffer2.resetWriterIndex(); + ByteBuf buffer2 = newBuffer(len).writerIndex(0); int oldWriterIndex = buffer2.writerIndex(); assertEquals(buffer2.setBytes(oldWriterIndex, channel, 10, len), len); assertEquals(channelPosition, channel.position()); @@ -4367,7 +4668,7 @@ public void testForEachByteDesc2() { private int index = bytes.length - 1; @Override - public boolean process(byte value) throws Exception { + public boolean process(byte value) { bytes[index--] = value; return true; } @@ -4390,7 +4691,7 @@ public void testForEachByte2() { private int index; @Override - public boolean process(byte value) throws Exception { + public boolean process(byte value) { bytes[index++] = value; return true; } @@ -4402,7 +4703,7 @@ public boolean process(byte value) throws Exception { } } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testGetBytesByteBuffer() { byte[] bytes = {'a', 'b', 'c', 'd', 'e', 'f', 'g'}; // Ensure destination buffer is bigger then what is in the ByteBuf. @@ -4410,7 +4711,7 @@ public void testGetBytesByteBuffer() { ByteBuf buffer = newBuffer(bytes.length); try { buffer.writeBytes(bytes); - buffer.getBytes(buffer.readerIndex(), nioBuffer); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(buffer.readerIndex(), nioBuffer)); } finally { buffer.release(); } @@ -4424,30 +4725,24 @@ private void testRefCnt0(final boolean parameter) throws Exception { final ByteBuf buffer = newBuffer(4); assertEquals(1, buffer.refCnt()); final AtomicInteger cnt = new AtomicInteger(Integer.MAX_VALUE); - Thread t1 = new Thread(new Runnable() { - @Override - public void run() { - boolean released; - if (parameter) { - released = buffer.release(buffer.refCnt()); - } else { - released = buffer.release(); - } - assertTrue(released); - Thread t2 = new Thread(new Runnable() { - @Override - public void run() { - cnt.set(buffer.refCnt()); - latch.countDown(); - } - }); - t2.start(); - try { - // Keep Thread alive a bit so the ThreadLocal caches are not freed - innerLatch.await(); - } catch (InterruptedException ignore) { - // ignore - } + Thread t1 = new Thread(() -> { + boolean released; + if (parameter) { + released = buffer.release(buffer.refCnt()); + } else { + released = buffer.release(); + } + assertTrue(released); + Thread t2 = new Thread(() -> { + cnt.set(buffer.refCnt()); + latch.countDown(); + }); + t2.start(); + try { + // Keep Thread alive a bit so the ThreadLocal caches are not freed + innerLatch.await(); + } catch (InterruptedException ignore) { + // ignore } }); t1.start(); @@ -4569,30 +4864,30 @@ public void close() { private static final class TestByteProcessor implements ByteProcessor { @Override - public boolean process(byte value) throws Exception { + public boolean process(byte value) { return true; } } - @Test(expected = IllegalArgumentException.class) + @Test public void testCapacityEnforceMaxCapacity() { ByteBuf buffer = newBuffer(3, 13); assertEquals(13, buffer.maxCapacity()); assertEquals(3, buffer.capacity()); try { - buffer.capacity(14); + assertThrows(IllegalArgumentException.class, () -> buffer.capacity(14)); } finally { buffer.release(); } } - @Test(expected = IllegalArgumentException.class) + @Test public void testCapacityNegative() { ByteBuf buffer = newBuffer(3, 13); assertEquals(13, buffer.maxCapacity()); assertEquals(3, buffer.capacity()); try { - buffer.capacity(-1); + assertThrows(IllegalArgumentException.class, () -> buffer.capacity(-1)); } finally { buffer.release(); } @@ -4626,7 +4921,7 @@ public void testCapacityIncrease() { } } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testReaderIndexLargerThanWriterIndex() { String content1 = "hello"; String content2 = "world"; @@ -4634,16 +4929,101 @@ public void testReaderIndexLargerThanWriterIndex() { ByteBuf buffer = newBuffer(length); buffer.setIndex(0, 0); buffer.writeCharSequence(content1, CharsetUtil.US_ASCII); - buffer.markWriterIndex(); buffer.skipBytes(content1.length()); buffer.writeCharSequence(content2, CharsetUtil.US_ASCII); buffer.skipBytes(content2.length()); assertTrue(buffer.readerIndex() <= buffer.writerIndex()); try { - buffer.resetWriterIndex(); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.readerIndex(buffer.writerIndex() + 1)); } finally { buffer.release(); } } + + @Test + public void testMaxFastWritableBytes() { + ByteBuf buffer = newBuffer(150, 500).writerIndex(100); + assertEquals(50, buffer.writableBytes()); + assertEquals(150, buffer.capacity()); + assertEquals(500, buffer.maxCapacity()); + assertEquals(400, buffer.maxWritableBytes()); + // Default implementation has fast writable == writable + assertEquals(50, buffer.maxFastWritableBytes()); + buffer.release(); + } + + @Test + public void testEnsureWritableIntegerOverflow() { + ByteBuf buffer = newBuffer(CAPACITY); + buffer.writerIndex(buffer.readerIndex()); + buffer.writeByte(1); + try { + buffer.ensureWritable(Integer.MAX_VALUE); + fail(); + } catch (IndexOutOfBoundsException e) { + // expected + } finally { + buffer.release(); + } + } + + @Test + public void testEndiannessIndexOf() { + buffer.clear(); + final int v = 0x02030201; + buffer.writeIntLE(v); + buffer.writeByte(0x01); + + assertEquals(-1, buffer.indexOf(1, 4, (byte) 1)); + assertEquals(-1, buffer.indexOf(4, 1, (byte) 1)); + assertEquals(1, buffer.indexOf(1, 4, (byte) 2)); + assertEquals(3, buffer.indexOf(4, 1, (byte) 2)); + } + + @Test + public void explicitLittleEndianReadMethodsMustAlwaysUseLittleEndianByteOrder() { + buffer.clear(); + buffer.writeBytes(new byte[] {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}); + assertEquals(0x0201, buffer.readShortLE()); + buffer.readerIndex(0); + assertEquals(0x0201, buffer.readUnsignedShortLE()); + buffer.readerIndex(0); + assertEquals(0x030201, buffer.readMediumLE()); + buffer.readerIndex(0); + assertEquals(0x030201, buffer.readUnsignedMediumLE()); + buffer.readerIndex(0); + assertEquals(0x04030201, buffer.readIntLE()); + buffer.readerIndex(0); + assertEquals(0x04030201, buffer.readUnsignedIntLE()); + buffer.readerIndex(0); + assertEquals(0x04030201, Float.floatToRawIntBits(buffer.readFloatLE())); + buffer.readerIndex(0); + assertEquals(0x0807060504030201L, buffer.readLongLE()); + buffer.readerIndex(0); + assertEquals(0x0807060504030201L, Double.doubleToRawLongBits(buffer.readDoubleLE())); + buffer.readerIndex(0); + } + + @Test + public void explicitLittleEndianWriteMethodsMustAlwaysUseLittleEndianByteOrder() { + buffer.clear(); + buffer.writeShortLE(0x0102); + assertEquals(0x0102, buffer.readShortLE()); + buffer.clear(); + buffer.writeMediumLE(0x010203); + assertEquals(0x010203, buffer.readMediumLE()); + buffer.clear(); + buffer.writeIntLE(0x01020304); + assertEquals(0x01020304, buffer.readIntLE()); + buffer.clear(); + buffer.writeFloatLE(Float.intBitsToFloat(0x01020304)); + assertEquals(0x01020304, Float.floatToRawIntBits(buffer.readFloatLE())); + buffer.clear(); + buffer.writeLongLE(0x0102030405060708L); + assertEquals(0x0102030405060708L, buffer.readLongLE()); + buffer.clear(); + buffer.writeDoubleLE(Double.longBitsToDouble(0x0102030405060708L)); + assertEquals(0x0102030405060708L, Double.doubleToRawLongBits(buffer.readDoubleLE())); + } } diff --git a/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java index f51475bf9ae..8d84d12b774 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractCompositeByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,8 +16,8 @@ package io.netty.buffer; import io.netty.util.ReferenceCountUtil; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; import java.nio.ByteOrder; @@ -27,6 +27,8 @@ import java.util.Iterator; import java.util.List; import java.util.NoSuchElementException; +import java.util.Objects; +import java.util.concurrent.ThreadLocalRandom; import static io.netty.buffer.Unpooled.EMPTY_BUFFER; import static io.netty.buffer.Unpooled.buffer; @@ -36,35 +38,35 @@ import static io.netty.util.internal.EmptyArrays.EMPTY_BYTES; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNotSame; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * An abstract test class for composite channel buffers */ public abstract class AbstractCompositeByteBufTest extends AbstractByteBufTest { + private static final ByteBufAllocator ALLOC = UnpooledByteBufAllocator.DEFAULT; + private final ByteOrder order; protected AbstractCompositeByteBufTest(ByteOrder order) { - if (order == null) { - throw new NullPointerException("order"); - } - this.order = order; + this.order = Objects.requireNonNull(order, "order"); } @Override protected ByteBuf newBuffer(int length, int maxCapacity) { - Assume.assumeTrue(maxCapacity == Integer.MAX_VALUE); + Assumptions.assumeTrue(maxCapacity == Integer.MAX_VALUE); - List buffers = new ArrayList(); + List buffers = new ArrayList<>(); for (int i = 0; i < length + 45; i += 45) { buffers.add(EMPTY_BUFFER); buffers.add(wrappedBuffer(new byte[1])); @@ -87,7 +89,20 @@ protected ByteBuf newBuffer(int length, int maxCapacity) { buffers.add(EMPTY_BUFFER); } - ByteBuf buffer = wrappedBuffer(Integer.MAX_VALUE, buffers.toArray(new ByteBuf[buffers.size()])).order(order); + ByteBuf buffer; + // Ensure that we are really testing a CompositeByteBuf + switch (buffers.size()) { + case 0: + buffer = compositeBuffer(Integer.MAX_VALUE); + break; + case 1: + buffer = compositeBuffer(Integer.MAX_VALUE).addComponent(buffers.get(0)); + break; + default: + buffer = wrappedBuffer(Integer.MAX_VALUE, buffers.toArray(new ByteBuf[0])); + break; + } + buffer = buffer.order(order); // Truncate to the requested capacity. buffer.capacity(length); @@ -99,6 +114,10 @@ protected ByteBuf newBuffer(int length, int maxCapacity) { return buffer; } + protected CompositeByteBuf newCompositeBuffer() { + return compositeBuffer(); + } + // Composite buffer does not waste bandwidth on discardReadBytes, but // the test will fail in strict mode. @Override @@ -106,6 +125,13 @@ protected boolean discardReadBytesDoesNotMoveWritableBytes() { return false; } + @Test + public void testIsContiguous() { + ByteBuf buf = newBuffer(4); + assertFalse(buf.isContiguous()); + buf.release(); + } + /** * Tests the "getBufferFor" method */ @@ -132,6 +158,41 @@ public void testComponentAtOffset() { buf.release(); } + @Test + public void testToComponentIndex() { + CompositeByteBuf buf = (CompositeByteBuf) wrappedBuffer(new byte[]{1, 2, 3, 4, 5}, + new byte[]{4, 5, 6, 7, 8, 9, 26}, new byte[]{10, 9, 8, 7, 6, 5, 33}); + + // spot checks + assertEquals(0, buf.toComponentIndex(4)); + assertEquals(1, buf.toComponentIndex(5)); + assertEquals(2, buf.toComponentIndex(15)); + + //Loop through each byte + + byte index = 0; + + while (index < buf.capacity()) { + int cindex = buf.toComponentIndex(index++); + assertTrue(cindex >= 0 && cindex < buf.numComponents()); + } + + buf.release(); + } + + @Test + public void testToByteIndex() { + CompositeByteBuf buf = (CompositeByteBuf) wrappedBuffer(new byte[]{1, 2, 3, 4, 5}, + new byte[]{4, 5, 6, 7, 8, 9, 26}, new byte[]{10, 9, 8, 7, 6, 5, 33}); + + // spot checks + assertEquals(0, buf.toByteIndex(0)); + assertEquals(5, buf.toByteIndex(1)); + assertEquals(12, buf.toByteIndex(2)); + + buf.release(); + } + @Test public void testDiscardReadBytes3() { ByteBuf a, b; @@ -140,17 +201,13 @@ public void testDiscardReadBytes3() { wrappedBuffer(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, 0, 5).order(order), wrappedBuffer(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 }, 5, 5).order(order)); a.skipBytes(6); - a.markReaderIndex(); b.skipBytes(6); - b.markReaderIndex(); assertEquals(a.readerIndex(), b.readerIndex()); a.readerIndex(a.readerIndex() - 1); b.readerIndex(b.readerIndex() - 1); assertEquals(a.readerIndex(), b.readerIndex()); a.writerIndex(a.writerIndex() - 1); - a.markWriterIndex(); b.writerIndex(b.writerIndex() - 1); - b.markWriterIndex(); assertEquals(a.writerIndex(), b.writerIndex()); a.writerIndex(a.writerIndex() + 1); b.writerIndex(b.writerIndex() + 1); @@ -162,12 +219,6 @@ public void testDiscardReadBytes3() { assertEquals(a.readerIndex(), b.readerIndex()); assertEquals(a.writerIndex(), b.writerIndex()); assertTrue(ByteBufUtil.equals(a, b)); - a.resetReaderIndex(); - b.resetReaderIndex(); - assertEquals(a.readerIndex(), b.readerIndex()); - a.resetWriterIndex(); - b.resetWriterIndex(); - assertEquals(a.writerIndex(), b.writerIndex()); assertTrue(ByteBufUtil.equals(a, b)); a.release(); @@ -666,7 +717,7 @@ public void testReferenceCounts3() { CompositeByteBuf buf = compositeBuffer(); assertThat(buf.refCnt(), is(1)); - List components = new ArrayList(); + List components = new ArrayList<>(); Collections.addAll(components, c1, c2, c3); buf.addComponents(components); @@ -744,6 +795,20 @@ public void testRemoveLastComponentWithOthersLeft() { buf.release(); } + @Test + public void testRemoveComponents() { + CompositeByteBuf buf = compositeBuffer(); + for (int i = 0; i < 10; i++) { + buf.addComponent(wrappedBuffer(new byte[]{1, 2})); + } + assertEquals(10, buf.numComponents()); + assertEquals(20, buf.capacity()); + buf.removeComponents(4, 3); + assertEquals(7, buf.numComponents()); + assertEquals(14, buf.capacity()); + buf.release(); + } + @Test public void testGatheringWritesHeap() throws Exception { testGatheringWrites(buffer().order(order), buffer().order(order)); @@ -925,7 +990,27 @@ private static void testGatheringWritesSingleBuf(ByteBuf buf1) throws Exception @Override @Test public void testInternalNioBuffer() { - // ignore + CompositeByteBuf buf = compositeBuffer(); + assertEquals(0, buf.internalNioBuffer(0, 0).remaining()); + + // If non-derived buffer is added, its internal buffer should be returned + ByteBuf concreteBuffer = directBuffer().writeByte(1); + buf.addComponent(concreteBuffer); + assertSame(concreteBuffer.internalNioBuffer(0, 1), buf.internalNioBuffer(0, 1)); + buf.release(); + + // In derived cases, the original internal buffer must not be used + buf = compositeBuffer(); + concreteBuffer = directBuffer().writeByte(1); + buf.addComponent(concreteBuffer.slice()); + assertNotSame(concreteBuffer.internalNioBuffer(0, 1), buf.internalNioBuffer(0, 1)); + buf.release(); + + buf = compositeBuffer(); + concreteBuffer = directBuffer().writeByte(1); + buf.addComponent(concreteBuffer.duplicate()); + assertNotSame(concreteBuffer.internalNioBuffer(0, 1), buf.internalNioBuffer(0, 1)); + buf.release(); } @Test @@ -1018,8 +1103,112 @@ public void testAddEmptyBufferInMiddle() { } @Test - public void testIterator() { + public void testInsertEmptyBufferInMiddle() { CompositeByteBuf cbuf = compositeBuffer(); + ByteBuf buf1 = buffer().writeByte((byte) 1); + cbuf.addComponent(true, buf1); + ByteBuf buf2 = buffer().writeByte((byte) 2); + cbuf.addComponent(true, buf2); + + // insert empty one between the first two + cbuf.addComponent(true, 1, EMPTY_BUFFER); + + assertEquals(2, cbuf.readableBytes()); + assertEquals((byte) 1, cbuf.readByte()); + assertEquals((byte) 2, cbuf.readByte()); + + assertEquals(2, cbuf.capacity()); + assertEquals(3, cbuf.numComponents()); + + byte[] dest = new byte[2]; + // should skip over the empty one, not throw a java.lang.Error :) + cbuf.getBytes(0, dest); + + assertArrayEquals(new byte[] {1, 2}, dest); + + cbuf.release(); + } + + @Test + public void testAddFlattenedComponents() { + testAddFlattenedComponents(false); + } + + @Test + public void testAddFlattenedComponentsWithWrappedComposite() { + testAddFlattenedComponents(true); + } + + private void testAddFlattenedComponents(boolean addWrapped) { + ByteBuf b1 = Unpooled.wrappedBuffer(new byte[] { 1, 2, 3 }); + CompositeByteBuf newComposite = newCompositeBuffer() + .addComponent(true, b1) + .addFlattenedComponents(true, b1.retain()) + .addFlattenedComponents(true, Unpooled.EMPTY_BUFFER); + + assertEquals(2, newComposite.numComponents()); + assertEquals(6, newComposite.capacity()); + assertEquals(6, newComposite.writerIndex()); + + // It is important to use a pooled allocator here to ensure + // the slices returned by readRetainedSlice are of type + // PooledSlicedByteBuf, which maintains an independent refcount + // (so that we can be sure to cover this case) + ByteBuf buffer = PooledByteBufAllocator.DEFAULT.buffer() + .writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}); + + // use mixture of slice and retained slice + ByteBuf s1 = buffer.readRetainedSlice(2); + ByteBuf s2 = s1.retainedSlice(0, 2); + ByteBuf s3 = buffer.slice(0, 2).retain(); + ByteBuf s4 = s2.retainedSlice(0, 2); + buffer.release(); + + CompositeByteBuf compositeToAdd = compositeBuffer() + .addComponent(s1) + .addComponent(Unpooled.EMPTY_BUFFER) + .addComponents(s2, s3, s4); + // set readable range to be from middle of first component + // to middle of penultimate component + compositeToAdd.setIndex(1, 5); + + assertEquals(1, compositeToAdd.refCnt()); + assertEquals(1, s4.refCnt()); + + ByteBuf compositeCopy = compositeToAdd.copy(); + + if (addWrapped) { + compositeToAdd = new WrappedCompositeByteBuf(compositeToAdd); + } + newComposite.addFlattenedComponents(true, compositeToAdd); + + // verify that added range matches + ByteBufUtil.equals(compositeCopy, 0, + newComposite, 6, compositeCopy.readableBytes()); + + // should not include empty component or last component + // (latter outside of the readable range) + assertEquals(5, newComposite.numComponents()); + assertEquals(10, newComposite.capacity()); + assertEquals(10, newComposite.writerIndex()); + + assertEquals(0, compositeToAdd.refCnt()); + // s4 wasn't in added range so should have been jettisoned + assertEquals(0, s4.refCnt()); + assertEquals(1, newComposite.refCnt()); + + // releasing composite should release the remaining components + newComposite.release(); + assertEquals(0, newComposite.refCnt()); + assertEquals(0, s1.refCnt()); + assertEquals(0, s2.refCnt()); + assertEquals(0, s3.refCnt()); + assertEquals(0, b1.refCnt()); + } + + @Test + public void testIterator() { + CompositeByteBuf cbuf = newCompositeBuffer(); cbuf.addComponent(EMPTY_BUFFER); cbuf.addComponent(EMPTY_BUFFER); @@ -1041,7 +1230,7 @@ public void testIterator() { @Test public void testEmptyIterator() { - CompositeByteBuf cbuf = compositeBuffer(); + CompositeByteBuf cbuf = newCompositeBuffer(); Iterator it = cbuf.iterator(); assertFalse(it.hasNext()); @@ -1055,9 +1244,9 @@ public void testEmptyIterator() { cbuf.release(); } - @Test(expected = ConcurrentModificationException.class) + @Test public void testIteratorConcurrentModificationAdd() { - CompositeByteBuf cbuf = compositeBuffer(); + CompositeByteBuf cbuf = newCompositeBuffer(); cbuf.addComponent(EMPTY_BUFFER); Iterator it = cbuf.iterator(); @@ -1065,15 +1254,15 @@ public void testIteratorConcurrentModificationAdd() { assertTrue(it.hasNext()); try { - it.next(); + assertThrows(ConcurrentModificationException.class, it::next); } finally { cbuf.release(); } } - @Test(expected = ConcurrentModificationException.class) + @Test public void testIteratorConcurrentModificationRemove() { - CompositeByteBuf cbuf = compositeBuffer(); + CompositeByteBuf cbuf = newCompositeBuffer(); cbuf.addComponent(EMPTY_BUFFER); Iterator it = cbuf.iterator(); @@ -1081,7 +1270,7 @@ public void testIteratorConcurrentModificationRemove() { assertTrue(it.hasNext()); try { - it.next(); + assertThrows(ConcurrentModificationException.class, it::next); } finally { cbuf.release(); } @@ -1116,6 +1305,97 @@ public void testReleasesItsComponents() { assertEquals(0, buffer.refCnt()); } + @Test + public void testReleasesItsComponents2() { + // It is important to use a pooled allocator here to ensure + // the slices returned by readRetainedSlice are of type + // PooledSlicedByteBuf, which maintains an independent refcount + // (so that we can be sure to cover this case) + ByteBuf buffer = PooledByteBufAllocator.DEFAULT.buffer(); // 1 + + buffer.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}); + + // use readRetainedSlice this time - produces different kind of slices + ByteBuf s1 = buffer.readRetainedSlice(2); // 2 + ByteBuf s2 = s1.readRetainedSlice(2); // 3 + ByteBuf s3 = s2.readRetainedSlice(2); // 4 + ByteBuf s4 = s3.readRetainedSlice(2); // 5 + + ByteBuf composite = newCompositeBuffer() + .addComponent(s1) + .addComponents(s2, s3, s4) + .order(ByteOrder.LITTLE_ENDIAN); + + assertEquals(1, composite.refCnt()); + assertEquals(2, buffer.refCnt()); + + // releasing composite should release the 4 components + composite.release(); + assertEquals(0, composite.refCnt()); + assertEquals(1, buffer.refCnt()); + + // last remaining ref to buffer + buffer.release(); + assertEquals(0, buffer.refCnt()); + } + + @Test + public void testReleasesOnShrink() { + + ByteBuf b1 = Unpooled.buffer(2).writeShort(1); + ByteBuf b2 = Unpooled.buffer(2).writeShort(2); + + // composite takes ownership of s1 and s2 + ByteBuf composite = newCompositeBuffer() + .addComponents(b1, b2); + + assertEquals(4, composite.capacity()); + + // reduce capacity down to two, will drop the second component + composite.capacity(2); + assertEquals(2, composite.capacity()); + + // releasing composite should release the components + composite.release(); + assertEquals(0, composite.refCnt()); + assertEquals(0, b1.refCnt()); + assertEquals(0, b2.refCnt()); + } + + @Test + public void testReleasesOnShrink2() { + // It is important to use a pooled allocator here to ensure + // the slices returned by readRetainedSlice are of type + // PooledSlicedByteBuf, which maintains an independent refcount + // (so that we can be sure to cover this case) + ByteBuf buffer = PooledByteBufAllocator.DEFAULT.buffer(); + + buffer.writeShort(1).writeShort(2); + + ByteBuf b1 = buffer.readRetainedSlice(2); + ByteBuf b2 = b1.retainedSlice(b1.readerIndex(), 2); + + // composite takes ownership of b1 and b2 + ByteBuf composite = newCompositeBuffer() + .addComponents(b1, b2); + + assertEquals(4, composite.capacity()); + + // reduce capacity down to two, will drop the second component + composite.capacity(2); + assertEquals(2, composite.capacity()); + + // releasing composite should release the components + composite.release(); + assertEquals(0, composite.refCnt()); + assertEquals(0, b1.refCnt()); + assertEquals(0, b2.refCnt()); + + // release last remaining ref to buffer + buffer.release(); + assertEquals(0, buffer.refCnt()); + } + @Test public void testAllocatorIsSameWhenCopy() { testAllocatorIsSameWhenCopy(false); @@ -1136,4 +1416,271 @@ private void testAllocatorIsSameWhenCopy(boolean withIndexAndLength) { buffer.release(); copy.release(); } + + @Test + public void testDecomposeMultiple() { + testDecompose(150, 500, 3); + } + + @Test + public void testDecomposeOne() { + testDecompose(310, 50, 1); + } + + @Test + public void testDecomposeNone() { + testDecompose(310, 0, 0); + } + + private void testDecompose(int offset, int length, int expectedListSize) { + byte[] bytes = new byte[1024]; + ThreadLocalRandom.current().nextBytes(bytes); + ByteBuf buf = wrappedBuffer(bytes); + + CompositeByteBuf composite = newCompositeBuffer(); + composite.addComponents(true, + buf.retainedSlice(100, 200), + buf.retainedSlice(300, 400), + buf.retainedSlice(700, 100)); + + ByteBuf slice = composite.slice(offset, length); + List bufferList = composite.decompose(offset, length); + assertEquals(expectedListSize, bufferList.size()); + ByteBuf wrapped = wrappedBuffer(bufferList.toArray(new ByteBuf[0])); + + assertEquals(slice, wrapped); + composite.release(); + buf.release(); + + for (ByteBuf buffer: bufferList) { + assertEquals(0, buffer.refCnt()); + } + } + + @Test + public void testComponentsLessThanLowerBound() { + try { + new CompositeByteBuf(ALLOC, true, 0); + fail(); + } catch (IllegalArgumentException e) { + assertEquals("maxNumComponents: 0 (expected: >= 1)", e.getMessage()); + } + } + + @Test + public void testComponentsEqualToLowerBound() { + assertCompositeBufCreated(1); + } + + @Test + public void testComponentsGreaterThanLowerBound() { + assertCompositeBufCreated(5); + } + + /** + * Assert that a new {@linkplain CompositeByteBuf} was created successfully with the desired number of max + * components. + */ + private static void assertCompositeBufCreated(int expectedMaxComponents) { + CompositeByteBuf buf = new CompositeByteBuf(ALLOC, true, expectedMaxComponents); + + assertEquals(expectedMaxComponents, buf.maxNumComponents()); + assertTrue(buf.release()); + } + + @Test + public void testDiscardSomeReadBytesCorrectlyUpdatesLastAccessed() { + testDiscardCorrectlyUpdatesLastAccessed(true); + } + + @Test + public void testDiscardReadBytesCorrectlyUpdatesLastAccessed() { + testDiscardCorrectlyUpdatesLastAccessed(false); + } + + private void testDiscardCorrectlyUpdatesLastAccessed(boolean discardSome) { + CompositeByteBuf cbuf = newCompositeBuffer(); + List buffers = new ArrayList(4); + for (int i = 0; i < 4; i++) { + ByteBuf buf = buffer().writeInt(i); + cbuf.addComponent(true, buf); + buffers.add(buf); + } + + // Skip the first 2 bytes which means even if we call discard*ReadBytes() later we can no drop the first + // component as it is still used. + cbuf.skipBytes(2); + if (discardSome) { + cbuf.discardSomeReadBytes(); + } else { + cbuf.discardReadBytes(); + } + assertEquals(4, cbuf.numComponents()); + + // Now skip 3 bytes which means we should be able to drop the first component on the next discard*ReadBytes() + // call. + cbuf.skipBytes(3); + + if (discardSome) { + cbuf.discardSomeReadBytes(); + } else { + cbuf.discardReadBytes(); + } + assertEquals(3, cbuf.numComponents()); + // Now skip again 3 bytes which should bring our readerIndex == start of the 3 component. + cbuf.skipBytes(3); + + // Read one int (4 bytes) which should bring our readerIndex == start of the 4 component. + assertEquals(2, cbuf.readInt()); + if (discardSome) { + cbuf.discardSomeReadBytes(); + } else { + cbuf.discardReadBytes(); + } + + // Now all except the last component should have been dropped / released. + assertEquals(1, cbuf.numComponents()); + assertEquals(3, cbuf.readInt()); + if (discardSome) { + cbuf.discardSomeReadBytes(); + } else { + cbuf.discardReadBytes(); + } + assertEquals(0, cbuf.numComponents()); + + // These should have been released already. + for (ByteBuf buffer: buffers) { + assertEquals(0, buffer.refCnt()); + } + assertTrue(cbuf.release()); + } + + // See https://github.com/netty/netty/issues/11612 + @Test + public void testAddComponentWithNullEntry() { + final ByteBuf buffer = Unpooled.buffer(8).writeZero(8); + final CompositeByteBuf compositeByteBuf = compositeBuffer(Integer.MAX_VALUE); + try { + compositeByteBuf.addComponents(true, new ByteBuf[] { buffer, null }); + assertEquals(8, compositeByteBuf.readableBytes()); + assertEquals(1, compositeByteBuf.numComponents()); + } finally { + compositeByteBuf.release(); + } + } + + @Test + public void testOverflowWhileAddingComponent() { + int capacity = 1024 * 1024; // 1MB + ByteBuf buffer = Unpooled.buffer(capacity).writeZero(capacity); + CompositeByteBuf compositeByteBuf = compositeBuffer(Integer.MAX_VALUE); + + try { + assertThrows(IllegalArgumentException.class, () -> { + for (int i = 0; i >= 0; i += buffer.readableBytes()) { + ByteBuf duplicate = buffer.duplicate(); + compositeByteBuf.addComponent(duplicate); + duplicate.retain(); + } + }); + } finally { + compositeByteBuf.release(); + } + } + + @Test + public void testOverflowWhileAddingComponentsViaVarargs() { + int capacity = 1024 * 1024; // 1MB + ByteBuf buffer = Unpooled.buffer(capacity).writeZero(capacity); + CompositeByteBuf compositeByteBuf = compositeBuffer(Integer.MAX_VALUE); + + try { + assertThrows(IllegalArgumentException.class, () -> { + for (int i = 0; i >= 0; i += buffer.readableBytes()) { + ByteBuf duplicate = buffer.duplicate(); + compositeByteBuf.addComponents(duplicate); + duplicate.retain(); + } + }); + } finally { + compositeByteBuf.release(); + } + } + + @Test + public void testOverflowWhileAddingComponentsViaIterable() { + int capacity = 1024 * 1024; // 1MB + ByteBuf buffer = Unpooled.buffer(capacity).writeZero(capacity); + CompositeByteBuf compositeByteBuf = compositeBuffer(Integer.MAX_VALUE); + + try { + assertThrows(IllegalArgumentException.class, () -> { + for (int i = 0; i >= 0; i += buffer.readableBytes()) { + ByteBuf duplicate = buffer.duplicate(); + compositeByteBuf.addComponents(Collections.singletonList(duplicate)); + duplicate.retain(); + } + }); + } finally { + compositeByteBuf.release(); + } + } + + @Test + public void testOverflowWhileUseConstructorWithOffset() { + int capacity = 1024 * 1024; // 1MB + final ByteBuf buffer = Unpooled.buffer(capacity).writeZero(capacity); + final List buffers = new ArrayList(); + for (long i = 0; i <= Integer.MAX_VALUE; i += capacity) { + buffers.add(buffer.duplicate()); + } + // Add one more + buffers.add(buffer.duplicate()); + + try { + assertThrows(IllegalArgumentException.class, () -> { + ByteBuf[] bufferArray = buffers.toArray(new ByteBuf[0]); + new CompositeByteBuf(ALLOC, false, Integer.MAX_VALUE, bufferArray, 0); + }); + } finally { + buffer.release(); + } + } + + @Test + public void testNotOverflowWhileUseConstructorWithOffset() { + int capacity = 1024 * 1024; // 1MB + final ByteBuf buffer = Unpooled.buffer(capacity).writeZero(capacity); + final List buffers = new ArrayList(); + for (long i = 0; i <= Integer.MAX_VALUE; i += capacity) { + buffers.add(buffer.duplicate()); + } + // Add one more + buffers.add(buffer.duplicate()); + + ByteBuf[] bufferArray = buffers.toArray(new ByteBuf[0]); + CompositeByteBuf compositeByteBuf = + new CompositeByteBuf(ALLOC, false, Integer.MAX_VALUE, bufferArray, bufferArray.length - 1); + compositeByteBuf.release(); + } + + @Test + public void sliceOfCompositeBufferMustThrowISEAfterDiscardBytes() { + CompositeByteBuf composite = compositeBuffer(); + composite.addComponent(true, buffer(8).writeZero(8)); + + ByteBuf slice = composite.retainedSlice(); + composite.skipBytes(slice.readableBytes()); + composite.discardSomeReadBytes(); + + try { + slice.readByte(); + fail("Expected readByte of discarded slice to throw."); + } catch (IllegalStateException ignore) { + // Good. + } finally { + slice.release(); + composite.release(); + } + } } diff --git a/buffer/src/test/java/io/netty/buffer/AbstractPooledByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AbstractPooledByteBufTest.java index eb76157ab1e..6b75a1e42fe 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractPooledByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractPooledByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,13 +15,15 @@ */ package io.netty.buffer; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public abstract class AbstractPooledByteBufTest extends AbstractByteBufTest { @@ -31,11 +33,7 @@ public abstract class AbstractPooledByteBufTest extends AbstractByteBufTest { protected ByteBuf newBuffer(int length, int maxCapacity) { ByteBuf buffer = alloc(length, maxCapacity); - // Testing if the writerIndex and readerIndex are correct when allocate and also after we reset the mark. - assertEquals(0, buffer.writerIndex()); - assertEquals(0, buffer.readerIndex()); - buffer.resetReaderIndex(); - buffer.resetWriterIndex(); + // Testing if the writerIndex and readerIndex are correct when allocate. assertEquals(0, buffer.writerIndex()); assertEquals(0, buffer.readerIndex()); return buffer; @@ -49,14 +47,91 @@ public void ensureWritableWithEnoughSpaceShouldNotThrow() { buf.release(); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void ensureWritableWithNotEnoughSpaceShouldThrow() { ByteBuf buf = newBuffer(1, 10); try { - buf.ensureWritable(11); - fail(); + assertThrows(IndexOutOfBoundsException.class, () -> buf.ensureWritable(11)); } finally { buf.release(); } } + + @Override + @Test + public void testMaxFastWritableBytes() { + ByteBuf buffer = newBuffer(150, 500).writerIndex(100); + assertEquals(50, buffer.writableBytes()); + assertEquals(150, buffer.capacity()); + assertEquals(500, buffer.maxCapacity()); + assertEquals(400, buffer.maxWritableBytes()); + + int chunkSize = pooledByteBuf(buffer).maxLength; + assertTrue(chunkSize >= 150); + int remainingInAlloc = Math.min(chunkSize - 100, 400); + assertEquals(remainingInAlloc, buffer.maxFastWritableBytes()); + + // write up to max, chunk alloc should not change (same handle) + long handleBefore = pooledByteBuf(buffer).handle; + buffer.writeBytes(new byte[remainingInAlloc]); + assertEquals(handleBefore, pooledByteBuf(buffer).handle); + + assertEquals(0, buffer.maxFastWritableBytes()); + // writing one more should trigger a reallocation (new handle) + buffer.writeByte(7); + assertNotEquals(handleBefore, pooledByteBuf(buffer).handle); + + // should not exceed maxCapacity even if chunk alloc does + buffer.capacity(500); + assertEquals(500 - buffer.writerIndex(), buffer.maxFastWritableBytes()); + buffer.release(); + } + + private static PooledByteBuf pooledByteBuf(ByteBuf buffer) { + // might need to unwrap if swapped (LE) and/or leak-aware-wrapped + while (!(buffer instanceof PooledByteBuf)) { + buffer = buffer.unwrap(); + } + return (PooledByteBuf) buffer; + } + + @Test + public void testEnsureWritableDoesntGrowTooMuch() { + ByteBuf buffer = newBuffer(150, 500).writerIndex(100); + + assertEquals(50, buffer.writableBytes()); + int fastWritable = buffer.maxFastWritableBytes(); + assertTrue(fastWritable > 50); + + long handleBefore = pooledByteBuf(buffer).handle; + + // capacity expansion should not cause reallocation + // (should grow precisely the specified amount) + buffer.ensureWritable(fastWritable); + assertEquals(handleBefore, pooledByteBuf(buffer).handle); + assertEquals(100 + fastWritable, buffer.capacity()); + assertEquals(buffer.writableBytes(), buffer.maxFastWritableBytes()); + buffer.release(); + } + + @Test + public void testIsContiguous() { + ByteBuf buf = newBuffer(4); + assertTrue(buf.isContiguous()); + buf.release(); + } + + @Test + public void distinctBuffersMustNotOverlap() { + ByteBuf a = newBuffer(16384); + ByteBuf b = newBuffer(65536); + a.setByte(a.capacity() - 1, 1); + b.setByte(0, 2); + try { + assertEquals(1, a.getByte(a.capacity() - 1)); + } finally { + a.release(); + b.release(); + } + } } diff --git a/buffer/src/test/java/io/netty/buffer/AbstractReferenceCountedByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AbstractReferenceCountedByteBufTest.java index 48fe4153325..9207462cfb9 100644 --- a/buffer/src/test/java/io/netty/buffer/AbstractReferenceCountedByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AbstractReferenceCountedByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,7 +16,7 @@ package io.netty.buffer; import io.netty.util.IllegalReferenceCountException; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.io.InputStream; @@ -27,48 +27,62 @@ import java.nio.channels.GatheringByteChannel; import java.nio.channels.ScatteringByteChannel; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class AbstractReferenceCountedByteBufTest { - @Test(expected = IllegalReferenceCountException.class) + @Test public void testRetainOverflow() { AbstractReferenceCountedByteBuf referenceCounted = newReferenceCounted(); referenceCounted.setRefCnt(Integer.MAX_VALUE); assertEquals(Integer.MAX_VALUE, referenceCounted.refCnt()); - referenceCounted.retain(); + assertThrows(IllegalReferenceCountException.class, referenceCounted::retain); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testRetainOverflow2() { AbstractReferenceCountedByteBuf referenceCounted = newReferenceCounted(); assertEquals(1, referenceCounted.refCnt()); - referenceCounted.retain(Integer.MAX_VALUE); + assertThrows(IllegalReferenceCountException.class, () -> referenceCounted.retain(Integer.MAX_VALUE)); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testReleaseOverflow() { AbstractReferenceCountedByteBuf referenceCounted = newReferenceCounted(); referenceCounted.setRefCnt(0); assertEquals(0, referenceCounted.refCnt()); - referenceCounted.release(Integer.MAX_VALUE); + assertThrows(IllegalReferenceCountException.class, () -> referenceCounted.release(Integer.MAX_VALUE)); } - @Test(expected = IllegalReferenceCountException.class) + @Test + public void testReleaseErrorMessage() { + AbstractReferenceCountedByteBuf referenceCounted = newReferenceCounted(); + assertTrue(referenceCounted.release()); + try { + referenceCounted.release(1); + fail("IllegalReferenceCountException didn't occur"); + } catch (IllegalReferenceCountException e) { + assertEquals("refCnt: 0, decrement: 1", e.getMessage()); + } + } + + @Test public void testRetainResurrect() { AbstractReferenceCountedByteBuf referenceCounted = newReferenceCounted(); assertTrue(referenceCounted.release()); assertEquals(0, referenceCounted.refCnt()); - referenceCounted.retain(); + assertThrows(IllegalReferenceCountException.class, referenceCounted::retain); } - @Test(expected = IllegalReferenceCountException.class) + @Test public void testRetainResurrect2() { AbstractReferenceCountedByteBuf referenceCounted = newReferenceCounted(); assertTrue(referenceCounted.release()); assertEquals(0, referenceCounted.refCnt()); - referenceCounted.retain(2); + assertThrows(IllegalReferenceCountException.class, () -> referenceCounted.retain(2)); } private static AbstractReferenceCountedByteBuf newReferenceCounted() { diff --git a/buffer/src/test/java/io/netty/buffer/AdvancedLeakAwareByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AdvancedLeakAwareByteBufTest.java index 4e7747b3b62..e79b35b277b 100644 --- a/buffer/src/test/java/io/netty/buffer/AdvancedLeakAwareByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AdvancedLeakAwareByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,12 @@ */ package io.netty.buffer; +import static io.netty.buffer.Unpooled.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; + +import org.junit.jupiter.api.Test; + +import io.netty.util.CharsetUtil; import io.netty.util.ResourceLeakTracker; public class AdvancedLeakAwareByteBufTest extends SimpleLeakAwareByteBufTest { @@ -28,4 +34,20 @@ protected Class leakClass() { protected SimpleLeakAwareByteBuf wrap(ByteBuf buffer, ResourceLeakTracker tracker) { return new AdvancedLeakAwareByteBuf(buffer, tracker); } + + @Test + public void testAddComponentWithLeakAwareByteBuf() { + NoopResourceLeakTracker tracker = new NoopResourceLeakTracker(); + + ByteBuf buffer = wrappedBuffer("hello world".getBytes(CharsetUtil.US_ASCII)).slice(6, 5); + ByteBuf leakAwareBuf = wrap(buffer, tracker); + + CompositeByteBuf composite = compositeBuffer(); + composite.addComponent(true, leakAwareBuf); + byte[] result = new byte[5]; + ByteBuf bb = composite.component(0); + bb.readBytes(result); + assertArrayEquals("world".getBytes(CharsetUtil.US_ASCII), result); + composite.release(); + } } diff --git a/buffer/src/test/java/io/netty/buffer/AdvancedLeakAwareCompositeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/AdvancedLeakAwareCompositeByteBufTest.java index fd56e723326..e5742f4d9d7 100644 --- a/buffer/src/test/java/io/netty/buffer/AdvancedLeakAwareCompositeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/AdvancedLeakAwareCompositeByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/test/java/io/netty/buffer/AlignedPooledByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/AlignedPooledByteBufAllocatorTest.java new file mode 100644 index 00000000000..59fcf77c0e0 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/AlignedPooledByteBufAllocatorTest.java @@ -0,0 +1,33 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer; + +public class AlignedPooledByteBufAllocatorTest extends PooledByteBufAllocatorTest { + @Override + protected PooledByteBufAllocator newAllocator(boolean preferDirect) { + int directMemoryCacheAlignment = 1; + return new PooledByteBufAllocator( + preferDirect, + PooledByteBufAllocator.defaultNumHeapArena(), + PooledByteBufAllocator.defaultNumDirectArena(), + PooledByteBufAllocator.defaultPageSize(), + 11, + PooledByteBufAllocator.defaultSmallCacheSize(), + 64, + PooledByteBufAllocator.defaultUseCacheForAllThreads(), + directMemoryCacheAlignment); + } +} diff --git a/buffer/src/test/java/io/netty/buffer/BigEndianCompositeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/BigEndianCompositeByteBufTest.java index 2ae0aaa6ab5..50d3e12d979 100644 --- a/buffer/src/test/java/io/netty/buffer/BigEndianCompositeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/BigEndianCompositeByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,8 +15,7 @@ */ package io.netty.buffer; - -import org.junit.Test; +import org.junit.jupiter.api.Test; /** * Tests big-endian composite channel buffers @@ -27,9 +26,9 @@ public BigEndianCompositeByteBufTest() { } @Override - @Test(expected = UnsupportedOperationException.class) + @Test public void testInternalNioBufferAfterRelease() { - super.testInternalNioBufferAfterRelease(); + testInternalNioBufferAfterRelease0(UnsupportedOperationException.class); } } diff --git a/buffer/src/test/java/io/netty/buffer/BigEndianDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/BigEndianDirectByteBufTest.java index 6943c2f58e4..e7a14630744 100644 --- a/buffer/src/test/java/io/netty/buffer/BigEndianDirectByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/BigEndianDirectByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,10 +15,14 @@ */ package io.netty.buffer; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; import java.nio.ByteOrder; +import org.junit.jupiter.api.Test; + /** * Tests big-endian direct channel buffers */ @@ -35,4 +39,11 @@ protected ByteBuf newBuffer(int length, int maxCapacity) { protected ByteBuf newDirectBuffer(int length, int maxCapacity) { return new UnpooledDirectByteBuf(UnpooledByteBufAllocator.DEFAULT, length, maxCapacity); } + + @Test + public void testIsContiguous() { + ByteBuf buf = newBuffer(4); + assertTrue(buf.isContiguous()); + buf.release(); + } } diff --git a/buffer/src/test/java/io/netty/buffer/BigEndianHeapByteBufTest.java b/buffer/src/test/java/io/netty/buffer/BigEndianHeapByteBufTest.java index 0b14deb80c6..fd2ecb2c1da 100644 --- a/buffer/src/test/java/io/netty/buffer/BigEndianHeapByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/BigEndianHeapByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,9 +15,10 @@ */ package io.netty.buffer; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests big-endian heap channel buffers @@ -31,13 +32,14 @@ protected ByteBuf newBuffer(int length, int maxCapacity) { return buffer; } - @Test(expected = NullPointerException.class) + @Test public void shouldNotAllowNullInConstructor1() { - new UnpooledHeapByteBuf(null, new byte[1], 0); + assertThrows(NullPointerException.class, () -> new UnpooledHeapByteBuf(null, new byte[1], 0)); } - @Test(expected = NullPointerException.class) + @Test public void shouldNotAllowNullInConstructor2() { - new UnpooledHeapByteBuf(UnpooledByteBufAllocator.DEFAULT, null, 0); + assertThrows(NullPointerException.class, + () -> new UnpooledHeapByteBuf(UnpooledByteBufAllocator.DEFAULT, null, 0)); } } diff --git a/buffer/src/test/java/io/netty/buffer/BigEndianUnsafeDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/BigEndianUnsafeDirectByteBufTest.java index 46cb651e0d5..a2de7f653bc 100644 --- a/buffer/src/test/java/io/netty/buffer/BigEndianUnsafeDirectByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/BigEndianUnsafeDirectByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,15 +17,15 @@ import io.netty.util.internal.PlatformDependent; -import org.junit.Assume; -import org.junit.Before; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; public class BigEndianUnsafeDirectByteBufTest extends BigEndianDirectByteBufTest { - @Before + @BeforeEach @Override public void init() { - Assume.assumeTrue("sun.misc.Unsafe not found, skip tests", PlatformDependent.hasUnsafe()); + Assumptions.assumeTrue(PlatformDependent.hasUnsafe(), "sun.misc.Unsafe not found, skip tests"); super.init(); } diff --git a/buffer/src/test/java/io/netty/buffer/BigEndianUnsafeNoCleanerDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/BigEndianUnsafeNoCleanerDirectByteBufTest.java index feb4df3a510..19c053e459d 100644 --- a/buffer/src/test/java/io/netty/buffer/BigEndianUnsafeNoCleanerDirectByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/BigEndianUnsafeNoCleanerDirectByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,18 +15,17 @@ */ package io.netty.buffer; - import io.netty.util.internal.PlatformDependent; -import org.junit.Assume; -import org.junit.Before; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; public class BigEndianUnsafeNoCleanerDirectByteBufTest extends BigEndianDirectByteBufTest { - @Before + @BeforeEach @Override public void init() { - Assume.assumeTrue("java.nio.DirectByteBuffer.(long, int) not found, skip tests", - PlatformDependent.useDirectBufferNoCleaner()); + Assumptions.assumeTrue(PlatformDependent.useDirectBufferNoCleaner(), + "java.nio.DirectByteBuffer.(long, int) not found, skip tests"); super.init(); } diff --git a/buffer/src/test/java/io/netty/buffer/ByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/ByteBufAllocatorTest.java index 71ddd4a7743..b538953c769 100644 --- a/buffer/src/test/java/io/netty/buffer/ByteBufAllocatorTest.java +++ b/buffer/src/test/java/io/netty/buffer/ByteBufAllocatorTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,9 +15,9 @@ */ package io.netty.buffer; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public abstract class ByteBufAllocatorTest { diff --git a/buffer/src/test/java/io/netty/buffer/ByteBufDerivationTest.java b/buffer/src/test/java/io/netty/buffer/ByteBufDerivationTest.java index 2983ba906f6..ab4a59c3c71 100644 --- a/buffer/src/test/java/io/netty/buffer/ByteBufDerivationTest.java +++ b/buffer/src/test/java/io/netty/buffer/ByteBufDerivationTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,14 +16,13 @@ package io.netty.buffer; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.nio.ByteOrder; import java.util.Random; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.*; -import static org.hamcrest.Matchers.sameInstance; -import static org.junit.Assert.*; /** * Tests wrapping a wrapped buffer does not go way too deep chaining. diff --git a/buffer/src/test/java/io/netty/buffer/ByteBufStreamTest.java b/buffer/src/test/java/io/netty/buffer/ByteBufStreamTest.java index f05594d1c12..cd9d42e262e 100644 --- a/buffer/src/test/java/io/netty/buffer/ByteBufStreamTest.java +++ b/buffer/src/test/java/io/netty/buffer/ByteBufStreamTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,13 +15,20 @@ */ package io.netty.buffer; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.EOFException; import java.nio.charset.Charset; import static io.netty.util.internal.EmptyArrays.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Tests channel buffer streams @@ -187,27 +194,110 @@ public void testReadLine() throws Exception { String s = in.readLine(); assertNull(s); + in.close(); + + ByteBuf buf2 = Unpooled.buffer(); + int charCount = 7; //total chars in the string below without new line characters + byte[] abc = "\na\n\nb\r\nc\nd\ne".getBytes(utf8); + buf2.writeBytes(abc); - int charCount = 5; //total chars in the string below without new line characters - byte[] abc = "a\nb\r\nc\nd\ne".getBytes(utf8); - buf.writeBytes(abc); - in.mark(charCount); - assertEquals("a", in.readLine()); - assertEquals("b", in.readLine()); - assertEquals("c", in.readLine()); - assertEquals("d", in.readLine()); - assertEquals("e", in.readLine()); + ByteBufInputStream in2 = new ByteBufInputStream(buf2, true); + in2.mark(charCount); + assertEquals("", in2.readLine()); + assertEquals("a", in2.readLine()); + assertEquals("", in2.readLine()); + assertEquals("b", in2.readLine()); + assertEquals("c", in2.readLine()); + assertEquals("d", in2.readLine()); + assertEquals("e", in2.readLine()); assertNull(in.readLine()); - in.reset(); + in2.reset(); int count = 0; - while (in.readLine() != null) { + while (in2.readLine() != null) { ++count; if (count > charCount) { fail("readLine() should have returned null"); } } assertEquals(charCount, count); + in2.close(); + } + + @Test + public void testRead() throws Exception { + // case1 + ByteBuf buf = Unpooled.buffer(16); + buf.writeBytes(new byte[]{1, 2, 3, 4, 5, 6}); + + ByteBufInputStream in = new ByteBufInputStream(buf, 3); + + assertEquals(1, in.read()); + assertEquals(2, in.read()); + assertEquals(3, in.read()); + assertEquals(-1, in.read()); + assertEquals(-1, in.read()); + assertEquals(-1, in.read()); + + buf.release(); + in.close(); + + // case2 + ByteBuf buf2 = Unpooled.buffer(16); + buf2.writeBytes(new byte[]{1, 2, 3, 4, 5, 6}); + + ByteBufInputStream in2 = new ByteBufInputStream(buf2, 4); + + assertEquals(1, in2.read()); + assertEquals(2, in2.read()); + assertEquals(3, in2.read()); + assertEquals(4, in2.read()); + assertNotEquals(5, in2.read()); + assertEquals(-1, in2.read()); + + buf2.release(); + in2.close(); + } + + @Test + public void testReadLineLengthRespected1() throws Exception { + // case1 + ByteBuf buf = Unpooled.buffer(16); + buf.writeBytes(new byte[] { 1, 2, 3, 4, 5, 6 }); + + ByteBufInputStream in = new ByteBufInputStream(buf, 0); + + assertNull(in.readLine()); + buf.release(); in.close(); } + + @Test + public void testReadLineLengthRespected2() throws Exception { + ByteBuf buf2 = Unpooled.buffer(16); + buf2.writeBytes(new byte[] { 'A', 'B', '\n', 'C', 'E', 'F'}); + + ByteBufInputStream in2 = new ByteBufInputStream(buf2, 4); + + assertEquals("AB", in2.readLine()); + assertEquals("C", in2.readLine()); + assertNull(in2.readLine()); + buf2.release(); + in2.close(); + } + + @Test + public void testReadByteLengthRespected() throws Exception { + // case1 + ByteBuf buf = Unpooled.buffer(16); + buf.writeBytes(new byte[] { 1, 2, 3, 4, 5, 6 }); + + ByteBufInputStream in = new ByteBufInputStream(buf, 0); + try { + assertThrows(EOFException.class, in::readByte); + } finally { + buf.release(); + in.close(); + } + } } diff --git a/buffer/src/test/java/io/netty/buffer/ByteBufUtilTest.java b/buffer/src/test/java/io/netty/buffer/ByteBufUtilTest.java index 38e36e20db3..9cd7a4af4fd 100644 --- a/buffer/src/test/java/io/netty/buffer/ByteBufUtilTest.java +++ b/buffer/src/test/java/io/netty/buffer/ByteBufUtilTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,25 +17,64 @@ import io.netty.util.AsciiString; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import java.nio.ByteOrder; import java.nio.charset.Charset; import java.util.ArrayList; import java.util.Arrays; +import java.util.Collection; import java.util.List; import java.util.Random; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; import static io.netty.buffer.Unpooled.unreleasableBuffer; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.greaterThan; +import static org.hamcrest.Matchers.is; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeTrue; public class ByteBufUtilTest { + private static final String PARAMETERIZED_NAME = "bufferType = {0}"; + + private enum BufferType { + DIRECT_UNPOOLED, DIRECT_POOLED, HEAP_POOLED, HEAP_UNPOOLED + } + + private ByteBuf buffer(BufferType bufferType, int capacity) { + switch (bufferType) { + + case DIRECT_UNPOOLED: + return Unpooled.directBuffer(capacity); + case HEAP_UNPOOLED: + return Unpooled.buffer(capacity); + case DIRECT_POOLED: + return PooledByteBufAllocator.DEFAULT.directBuffer(capacity); + case HEAP_POOLED: + return PooledByteBufAllocator.DEFAULT.buffer(capacity); + default: + throw new AssertionError("unexpected buffer type: " + bufferType); + } + } + + public static Collection noUnsafe() { + return Arrays.asList(new Object[][] { + { BufferType.DIRECT_POOLED }, + { BufferType.DIRECT_UNPOOLED }, + { BufferType.HEAP_POOLED }, + { BufferType.HEAP_UNPOOLED } + }); + } + @Test public void decodeRandomHexBytesWithEvenLength() { decodeRandomHexBytes(256); @@ -58,14 +97,43 @@ private static void decodeRandomHexBytes(int len) { } } - @Test(expected = IllegalArgumentException.class) + @Test public void decodeHexDumpWithOddLength() { - ByteBufUtil.decodeHexDump("abc"); + assertThrows(IllegalArgumentException.class, () -> ByteBufUtil.decodeHexDump("abc")); } - @Test(expected = IllegalArgumentException.class) + @Test public void decodeHexDumpWithInvalidChar() { - ByteBufUtil.decodeHexDump("fg"); + assertThrows(IllegalArgumentException.class, () -> ByteBufUtil.decodeHexDump("fg")); + } + + @Test + public void testIndexOf() { + ByteBuf haystack = Unpooled.copiedBuffer("abc123", CharsetUtil.UTF_8); + assertEquals(0, ByteBufUtil.indexOf(Unpooled.copiedBuffer("a", CharsetUtil.UTF_8), haystack)); + assertEquals(1, ByteBufUtil.indexOf(Unpooled.copiedBuffer("bc".getBytes(CharsetUtil.UTF_8)), haystack)); + assertEquals(2, ByteBufUtil.indexOf(Unpooled.copiedBuffer("c".getBytes(CharsetUtil.UTF_8)), haystack)); + assertEquals(0, ByteBufUtil.indexOf(Unpooled.copiedBuffer("abc12".getBytes(CharsetUtil.UTF_8)), haystack)); + assertEquals(-1, ByteBufUtil.indexOf(Unpooled.copiedBuffer("abcdef".getBytes(CharsetUtil.UTF_8)), haystack)); + assertEquals(-1, ByteBufUtil.indexOf(Unpooled.copiedBuffer("abc12x".getBytes(CharsetUtil.UTF_8)), haystack)); + assertEquals(-1, ByteBufUtil.indexOf(Unpooled.copiedBuffer("abc123def".getBytes(CharsetUtil.UTF_8)), haystack)); + + final ByteBuf needle = Unpooled.copiedBuffer("abc12", CharsetUtil.UTF_8); + haystack.readerIndex(1); + needle.readerIndex(1); + assertEquals(0, ByteBufUtil.indexOf(needle, haystack)); + haystack.readerIndex(2); + needle.readerIndex(3); + assertEquals(1, ByteBufUtil.indexOf(needle, haystack)); + haystack.readerIndex(1); + needle.readerIndex(2); + assertEquals(1, ByteBufUtil.indexOf(needle, haystack)); + haystack.release(); + + haystack = Unpooled.copiedBuffer("123aab123", CharsetUtil.UTF_8); + assertEquals(3, ByteBufUtil.indexOf(Unpooled.copiedBuffer("aab", CharsetUtil.UTF_8), haystack)); + haystack.release(); + needle.release(); } @Test @@ -119,7 +187,7 @@ public void notEqualsBufferOverflow() { Math.max(b1.length, b2.length) * 2)); } - @Test (expected = IllegalArgumentException.class) + @Test public void notEqualsBufferUnderflow() { byte[] b1 = new byte[8]; byte[] b2 = new byte[16]; @@ -130,26 +198,27 @@ public void notEqualsBufferUnderflow() { final int iB2 = iB1 + b1.length; final int length = b1.length - iB1; System.arraycopy(b1, iB1, b2, iB2, length - 1); - assertFalse(ByteBufUtil.equals(Unpooled.wrappedBuffer(b1), iB1, Unpooled.wrappedBuffer(b2), iB2, - -1)); + assertThrows(IllegalArgumentException.class, + () -> ByteBufUtil.equals(Unpooled.wrappedBuffer(b1), iB1, Unpooled.wrappedBuffer(b2), iB2, -1)); } @SuppressWarnings("deprecation") - @Test - public void writeShortBE() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void writeShortBE(BufferType bufferType) { int expected = 0x1234; - ByteBuf buf = Unpooled.buffer(2).order(ByteOrder.BIG_ENDIAN); + ByteBuf buf = buffer(bufferType, 2).order(ByteOrder.BIG_ENDIAN); ByteBufUtil.writeShortBE(buf, expected); assertEquals(expected, buf.readShort()); - buf.resetReaderIndex(); + buf.readerIndex(0); assertEquals(ByteBufUtil.swapShort((short) expected), buf.readShortLE()); buf.release(); - buf = Unpooled.buffer(2).order(ByteOrder.LITTLE_ENDIAN); + buf = buffer(bufferType, 2).order(ByteOrder.LITTLE_ENDIAN); ByteBufUtil.writeShortBE(buf, expected); - assertEquals((short) expected, buf.readShortLE()); - buf.resetReaderIndex(); + assertEquals(ByteBufUtil.swapShort((short) expected), buf.readShortLE()); + buf.readerIndex(0); assertEquals(ByteBufUtil.swapShort((short) expected), buf.readShort()); buf.release(); } @@ -162,44 +231,46 @@ public void setShortBE() { ByteBuf buf = Unpooled.wrappedBuffer(new byte[2]).order(ByteOrder.BIG_ENDIAN); ByteBufUtil.setShortBE(buf, 0, shortValue); assertEquals(shortValue, buf.readShort()); - buf.resetReaderIndex(); + buf.readerIndex(0); assertEquals(ByteBufUtil.swapShort((short) shortValue), buf.readShortLE()); buf.release(); buf = Unpooled.wrappedBuffer(new byte[2]).order(ByteOrder.LITTLE_ENDIAN); ByteBufUtil.setShortBE(buf, 0, shortValue); - assertEquals((short) shortValue, buf.readShortLE()); - buf.resetReaderIndex(); + assertEquals(ByteBufUtil.swapShort((short) shortValue), buf.readShortLE()); + buf.readerIndex(0); assertEquals(ByteBufUtil.swapShort((short) shortValue), buf.readShort()); buf.release(); } @SuppressWarnings("deprecation") - @Test - public void writeMediumBE() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void writeMediumBE(BufferType bufferType) { int mediumValue = 0x123456; - ByteBuf buf = Unpooled.buffer(4).order(ByteOrder.BIG_ENDIAN); + ByteBuf buf = buffer(bufferType, 4).order(ByteOrder.BIG_ENDIAN); ByteBufUtil.writeMediumBE(buf, mediumValue); assertEquals(mediumValue, buf.readMedium()); - buf.resetReaderIndex(); + buf.readerIndex(0); assertEquals(ByteBufUtil.swapMedium(mediumValue), buf.readMediumLE()); buf.release(); - buf = Unpooled.buffer(4).order(ByteOrder.LITTLE_ENDIAN); + buf = buffer(bufferType, 4).order(ByteOrder.LITTLE_ENDIAN); ByteBufUtil.writeMediumBE(buf, mediumValue); - assertEquals(mediumValue, buf.readMediumLE()); - buf.resetReaderIndex(); + assertEquals(ByteBufUtil.swapMedium(mediumValue), buf.readMediumLE()); + buf.readerIndex(0); assertEquals(ByteBufUtil.swapMedium(mediumValue), buf.readMedium()); buf.release(); } - @Test - public void testWriteUsAscii() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUsAscii(BufferType bufferType) { String usAscii = "NettyRocks"; - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(usAscii.getBytes(CharsetUtil.US_ASCII)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeAscii(buf2, usAscii); assertEquals(buf, buf2); @@ -208,12 +279,13 @@ public void testWriteUsAscii() { buf2.release(); } - @Test - public void testWriteUsAsciiSwapped() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUsAsciiSwapped(BufferType bufferType) { String usAscii = "NettyRocks"; - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(usAscii.getBytes(CharsetUtil.US_ASCII)); - SwappedByteBuf buf2 = new SwappedByteBuf(Unpooled.buffer(16)); + SwappedByteBuf buf2 = new SwappedByteBuf(buffer(bufferType, 16)); ByteBufUtil.writeAscii(buf2, usAscii); assertEquals(buf, buf2); @@ -222,13 +294,14 @@ public void testWriteUsAsciiSwapped() { buf2.release(); } - @Test - public void testWriteUsAsciiWrapped() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUsAsciiWrapped(BufferType bufferType) { String usAscii = "NettyRocks"; - ByteBuf buf = unreleasableBuffer(Unpooled.buffer(16)); + ByteBuf buf = unreleasableBuffer(buffer(bufferType, 16)); assertWrapped(buf); buf.writeBytes(usAscii.getBytes(CharsetUtil.US_ASCII)); - ByteBuf buf2 = unreleasableBuffer(Unpooled.buffer(16)); + ByteBuf buf2 = unreleasableBuffer(buffer(bufferType, 16)); assertWrapped(buf2); ByteBufUtil.writeAscii(buf2, usAscii); @@ -238,12 +311,51 @@ public void testWriteUsAsciiWrapped() { buf2.unwrap().release(); } - @Test - public void testWriteUtf8() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUsAsciiComposite(BufferType bufferType) { + String usAscii = "NettyRocks"; + ByteBuf buf = buffer(bufferType, 16); + buf.writeBytes(usAscii.getBytes(CharsetUtil.US_ASCII)); + ByteBuf buf2 = Unpooled.compositeBuffer().addComponent( + buffer(bufferType, 8)).addComponent(buffer(bufferType, 24)); + // write some byte so we start writing with an offset. + buf2.writeByte(1); + ByteBufUtil.writeAscii(buf2, usAscii); + + // Skip the previously written byte. + assertEquals(buf, buf2.skipBytes(1)); + + buf.release(); + buf2.release(); + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUsAsciiCompositeWrapped(BufferType bufferType) { + String usAscii = "NettyRocks"; + ByteBuf buf = buffer(bufferType, 16); + buf.writeBytes(usAscii.getBytes(CharsetUtil.US_ASCII)); + ByteBuf buf2 = new WrappedCompositeByteBuf(Unpooled.compositeBuffer().addComponent( + buffer(bufferType, 8)).addComponent(buffer(bufferType, 24))); + // write some byte so we start writing with an offset. + buf2.writeByte(1); + ByteBufUtil.writeAscii(buf2, usAscii); + + // Skip the previously written byte. + assertEquals(buf, buf2.skipBytes(1)); + + buf.release(); + buf2.release(); + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8(BufferType bufferType) { String usAscii = "Some UTF-8 like äÄ∏ŒŒ"; - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(usAscii.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeUtf8(buf2, usAscii); assertEquals(buf, buf2); @@ -252,8 +364,47 @@ public void testWriteUtf8() { buf2.release(); } - @Test - public void testWriteUtf8Surrogates() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8Composite(BufferType bufferType) { + String utf8 = "Some UTF-8 like äÄ∏ŒŒ"; + ByteBuf buf = buffer(bufferType, 16); + buf.writeBytes(utf8.getBytes(CharsetUtil.UTF_8)); + ByteBuf buf2 = Unpooled.compositeBuffer().addComponent( + buffer(bufferType, 8)).addComponent(buffer(bufferType, 24)); + // write some byte so we start writing with an offset. + buf2.writeByte(1); + ByteBufUtil.writeUtf8(buf2, utf8); + + // Skip the previously written byte. + assertEquals(buf, buf2.skipBytes(1)); + + buf.release(); + buf2.release(); + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8CompositeWrapped(BufferType bufferType) { + String utf8 = "Some UTF-8 like äÄ∏ŒŒ"; + ByteBuf buf = buffer(bufferType, 16); + buf.writeBytes(utf8.getBytes(CharsetUtil.UTF_8)); + ByteBuf buf2 = new WrappedCompositeByteBuf(Unpooled.compositeBuffer().addComponent( + buffer(bufferType, 8)).addComponent(buffer(bufferType, 24))); + // write some byte so we start writing with an offset. + buf2.writeByte(1); + ByteBufUtil.writeUtf8(buf2, utf8); + + // Skip the previously written byte. + assertEquals(buf, buf2.skipBytes(1)); + + buf.release(); + buf2.release(); + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8Surrogates(BufferType bufferType) { // leading surrogate + trailing surrogate String surrogateString = new StringBuilder(2) .append('a') @@ -261,9 +412,9 @@ public void testWriteUtf8Surrogates() { .append('\uDC00') .append('b') .toString(); - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(surrogateString.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeUtf8(buf2, surrogateString); assertEquals(buf, buf2); @@ -273,16 +424,17 @@ public void testWriteUtf8Surrogates() { buf2.release(); } - @Test - public void testWriteUtf8InvalidOnlyTrailingSurrogate() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8InvalidOnlyTrailingSurrogate(BufferType bufferType) { String surrogateString = new StringBuilder(2) .append('a') .append('\uDC00') .append('b') .toString(); - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(surrogateString.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeUtf8(buf2, surrogateString); assertEquals(buf, buf2); @@ -292,16 +444,17 @@ public void testWriteUtf8InvalidOnlyTrailingSurrogate() { buf2.release(); } - @Test - public void testWriteUtf8InvalidOnlyLeadingSurrogate() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8InvalidOnlyLeadingSurrogate(BufferType bufferType) { String surrogateString = new StringBuilder(2) .append('a') .append('\uD800') .append('b') .toString(); - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(surrogateString.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeUtf8(buf2, surrogateString); assertEquals(buf, buf2); @@ -311,17 +464,18 @@ public void testWriteUtf8InvalidOnlyLeadingSurrogate() { buf2.release(); } - @Test - public void testWriteUtf8InvalidSurrogatesSwitched() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8InvalidSurrogatesSwitched(BufferType bufferType) { String surrogateString = new StringBuilder(2) .append('a') .append('\uDC00') .append('\uD800') .append('b') .toString(); - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(surrogateString.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeUtf8(buf2, surrogateString); assertEquals(buf, buf2); @@ -331,17 +485,18 @@ public void testWriteUtf8InvalidSurrogatesSwitched() { buf2.release(); } - @Test - public void testWriteUtf8InvalidTwoLeadingSurrogates() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8InvalidTwoLeadingSurrogates(BufferType bufferType) { String surrogateString = new StringBuilder(2) .append('a') .append('\uD800') .append('\uD800') .append('b') .toString(); - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(surrogateString.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeUtf8(buf2, surrogateString); assertEquals(buf, buf2); @@ -350,17 +505,18 @@ public void testWriteUtf8InvalidTwoLeadingSurrogates() { buf2.release(); } - @Test - public void testWriteUtf8InvalidTwoTrailingSurrogates() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8InvalidTwoTrailingSurrogates(BufferType bufferType) { String surrogateString = new StringBuilder(2) .append('a') .append('\uDC00') .append('\uDC00') .append('b') .toString(); - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(surrogateString.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeUtf8(buf2, surrogateString); assertEquals(buf, buf2); @@ -370,14 +526,15 @@ public void testWriteUtf8InvalidTwoTrailingSurrogates() { buf2.release(); } - @Test - public void testWriteUtf8InvalidEndOnLeadingSurrogate() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8InvalidEndOnLeadingSurrogate(BufferType bufferType) { String surrogateString = new StringBuilder(2) .append('\uD800') .toString(); - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(surrogateString.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeUtf8(buf2, surrogateString); assertEquals(buf, buf2); @@ -387,14 +544,15 @@ public void testWriteUtf8InvalidEndOnLeadingSurrogate() { buf2.release(); } - @Test - public void testWriteUtf8InvalidEndOnTrailingSurrogate() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8InvalidEndOnTrailingSurrogate(BufferType bufferType) { String surrogateString = new StringBuilder(2) .append('\uDC00') .toString(); - ByteBuf buf = Unpooled.buffer(16); + ByteBuf buf = buffer(bufferType, 16); buf.writeBytes(surrogateString.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, 16); ByteBufUtil.writeUtf8(buf2, surrogateString); assertEquals(buf, buf2); @@ -404,12 +562,14 @@ public void testWriteUtf8InvalidEndOnTrailingSurrogate() { buf2.release(); } - @Test - public void testWriteUsAsciiString() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUsAsciiString(BufferType bufferType) { AsciiString usAscii = new AsciiString("NettyRocks"); - ByteBuf buf = Unpooled.buffer(16); + int expectedCapacity = usAscii.length(); + ByteBuf buf = buffer(bufferType, expectedCapacity); buf.writeBytes(usAscii.toString().getBytes(CharsetUtil.US_ASCII)); - ByteBuf buf2 = Unpooled.buffer(16); + ByteBuf buf2 = buffer(bufferType, expectedCapacity); ByteBufUtil.writeAscii(buf2, usAscii); assertEquals(buf, buf2); @@ -418,26 +578,124 @@ public void testWriteUsAsciiString() { buf2.release(); } - @Test - public void testWriteUtf8Wrapped() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8Wrapped(BufferType bufferType) { String usAscii = "Some UTF-8 like äÄ∏ŒŒ"; - ByteBuf buf = unreleasableBuffer(Unpooled.buffer(16)); + ByteBuf buf = unreleasableBuffer(buffer(bufferType, 16)); assertWrapped(buf); buf.writeBytes(usAscii.getBytes(CharsetUtil.UTF_8)); - ByteBuf buf2 = unreleasableBuffer(Unpooled.buffer(16)); + ByteBuf buf2 = unreleasableBuffer(buffer(bufferType, 16)); assertWrapped(buf2); ByteBufUtil.writeUtf8(buf2, usAscii); assertEquals(buf, buf2); - buf.release(); - buf2.release(); + buf.unwrap().release(); + buf2.unwrap().release(); } private static void assertWrapped(ByteBuf buf) { assertTrue(buf instanceof WrappedByteBuf); } + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8Subsequence(BufferType bufferType) { + String usAscii = "Some UTF-8 like äÄ∏ŒŒ"; + ByteBuf buf = buffer(bufferType, 16); + buf.writeBytes(usAscii.substring(5, 18).getBytes(CharsetUtil.UTF_8)); + ByteBuf buf2 = buffer(bufferType, 16); + ByteBufUtil.writeUtf8(buf2, usAscii, 5, 18); + + assertEquals(buf, buf2); + + buf.release(); + buf2.release(); + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8SubsequenceSplitSurrogate(BufferType bufferType) { + String usAscii = "\uD800\uDC00"; // surrogate pair: one code point, two chars + ByteBuf buf = buffer(bufferType, 16); + buf.writeBytes(usAscii.substring(0, 1).getBytes(CharsetUtil.UTF_8)); + ByteBuf buf2 = buffer(bufferType, 16); + ByteBufUtil.writeUtf8(buf2, usAscii, 0, 1); + + assertEquals(buf, buf2); + + buf.release(); + buf2.release(); + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testReserveAndWriteUtf8Subsequence(BufferType bufferType) { + String usAscii = "Some UTF-8 like äÄ∏ŒŒ"; + ByteBuf buf = buffer(bufferType, 16); + buf.writeBytes(usAscii.substring(5, 18).getBytes(CharsetUtil.UTF_8)); + ByteBuf buf2 = buffer(bufferType, 16); + int count = ByteBufUtil.reserveAndWriteUtf8(buf2, usAscii, 5, 18, 16); + + assertEquals(buf, buf2); + assertEquals(buf.readableBytes(), count); + + buf.release(); + buf2.release(); + } + + @Test + public void testUtf8BytesSubsequence() { + String usAscii = "Some UTF-8 like äÄ∏ŒŒ"; + assertEquals(usAscii.substring(5, 18).getBytes(CharsetUtil.UTF_8).length, + ByteBufUtil.utf8Bytes(usAscii, 5, 18)); + } + + private static final int[][] INVALID_RANGES = new int[][] { + { -1, 5 }, { 5, 30 }, { 10, 5 } + }; + + interface TestMethod { + int invoke(Object... args); + } + + private void testInvalidSubsequences(BufferType bufferType, TestMethod method) { + for (int [] range : INVALID_RANGES) { + ByteBuf buf = buffer(bufferType, 16); + try { + method.invoke(buf, "Some UTF-8 like äÄ∏ŒŒ", range[0], range[1]); + fail("Did not throw IndexOutOfBoundsException for range (" + range[0] + ", " + range[1] + ")"); + } catch (IndexOutOfBoundsException iiobe) { + // expected + } finally { + assertFalse(buf.isReadable()); + buf.release(); + } + } + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testWriteUtf8InvalidSubsequences(BufferType bufferType) { + testInvalidSubsequences(bufferType, args -> ByteBufUtil.writeUtf8((ByteBuf) args[0], (String) args[1], + (Integer) args[2], (Integer) args[3])); + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testReserveAndWriteUtf8InvalidSubsequences(BufferType bufferType) { + testInvalidSubsequences(bufferType, args -> ByteBufUtil.reserveAndWriteUtf8((ByteBuf) args[0], (String) args[1], + (Integer) args[2], (Integer) args[3], 32)); + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testUtf8BytesInvalidSubsequences(BufferType bufferType) { + testInvalidSubsequences(bufferType, + args -> ByteBufUtil.utf8Bytes((String) args[1], (Integer) args[2], (Integer) args[3])); + } + @Test public void testDecodeUsAscii() { testDecodeString("This is a test", CharsetUtil.US_ASCII); @@ -454,21 +712,23 @@ private static void testDecodeString(String text, Charset charset) { buffer.release(); } - @Test - public void testToStringDoesNotThrowIndexOutOfBounds() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testToStringDoesNotThrowIndexOutOfBounds(BufferType bufferType) { CompositeByteBuf buffer = Unpooled.compositeBuffer(); try { byte[] bytes = "1234".getBytes(CharsetUtil.UTF_8); - buffer.addComponent(Unpooled.buffer(bytes.length).writeBytes(bytes)); - buffer.addComponent(Unpooled.buffer(bytes.length).writeBytes(bytes)); + buffer.addComponent(buffer(bufferType, bytes.length).writeBytes(bytes)); + buffer.addComponent(buffer(bufferType, bytes.length).writeBytes(bytes)); assertEquals("1234", buffer.toString(bytes.length, bytes.length, CharsetUtil.UTF_8)); } finally { buffer.release(); } } - @Test - public void testIsTextWithUtf8() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testIsTextWithUtf8(BufferType bufferType) { byte[][] validUtf8Bytes = { "netty".getBytes(CharsetUtil.UTF_8), {(byte) 0x24}, @@ -481,7 +741,7 @@ public void testIsTextWithUtf8() { (byte) 0xF0, (byte) 0x90, (byte) 0x8D, (byte) 0x88} // multiple characters }; for (byte[] bytes : validUtf8Bytes) { - assertIsText(bytes, true, CharsetUtil.UTF_8); + assertIsText(bufferType, bytes, true, CharsetUtil.UTF_8); } byte[][] invalidUtf8Bytes = { {(byte) 0x80}, @@ -497,31 +757,34 @@ public void testIsTextWithUtf8() { {(byte) 0xED, (byte) 0xAF, (byte) 0x80} // out of upper bound }; for (byte[] bytes : invalidUtf8Bytes) { - assertIsText(bytes, false, CharsetUtil.UTF_8); + assertIsText(bufferType, bytes, false, CharsetUtil.UTF_8); } } - @Test - public void testIsTextWithoutOptimization() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testIsTextWithoutOptimization(BufferType bufferType) { byte[] validBytes = {(byte) 0x01, (byte) 0xD8, (byte) 0x37, (byte) 0xDC}; byte[] invalidBytes = {(byte) 0x01, (byte) 0xD8}; - assertIsText(validBytes, true, CharsetUtil.UTF_16LE); - assertIsText(invalidBytes, false, CharsetUtil.UTF_16LE); + assertIsText(bufferType, validBytes, true, CharsetUtil.UTF_16LE); + assertIsText(bufferType, invalidBytes, false, CharsetUtil.UTF_16LE); } - @Test - public void testIsTextWithAscii() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testIsTextWithAscii(BufferType bufferType) { byte[] validBytes = {(byte) 0x00, (byte) 0x01, (byte) 0x37, (byte) 0x7F}; byte[] invalidBytes = {(byte) 0x80, (byte) 0xFF}; - assertIsText(validBytes, true, CharsetUtil.US_ASCII); - assertIsText(invalidBytes, false, CharsetUtil.US_ASCII); + assertIsText(bufferType, validBytes, true, CharsetUtil.US_ASCII); + assertIsText(bufferType, invalidBytes, false, CharsetUtil.US_ASCII); } - @Test - public void testIsTextWithInvalidIndexAndLength() { - ByteBuf buffer = Unpooled.buffer(); + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testIsTextWithInvalidIndexAndLength(BufferType bufferType) { + ByteBuf buffer = buffer(bufferType, 4); try { buffer.writeBytes(new byte[4]); int[][] validIndexLengthPairs = { @@ -553,33 +816,37 @@ public void testIsTextWithInvalidIndexAndLength() { } } - @Test - public void testUtf8Bytes() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testUtf8Bytes(BufferType bufferType) { final String s = "Some UTF-8 like äÄ∏ŒŒ"; - checkUtf8Bytes(s); + checkUtf8Bytes(bufferType, s); } - @Test - public void testUtf8BytesWithSurrogates() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testUtf8BytesWithSurrogates(BufferType bufferType) { final String s = "a\uD800\uDC00b"; - checkUtf8Bytes(s); + checkUtf8Bytes(bufferType, s); } - @Test - public void testUtf8BytesWithNonSurrogates3Bytes() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testUtf8BytesWithNonSurrogates3Bytes(BufferType bufferType) { final String s = "a\uE000b"; - checkUtf8Bytes(s); + checkUtf8Bytes(bufferType, s); } - @Test - public void testUtf8BytesWithNonSurrogatesNonAscii() { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testUtf8BytesWithNonSurrogatesNonAscii(BufferType bufferType) { final char nonAscii = (char) 0x81; final String s = "a" + nonAscii + "b"; - checkUtf8Bytes(s); + checkUtf8Bytes(bufferType, s); } - private static void checkUtf8Bytes(final CharSequence charSequence) { - final ByteBuf buf = Unpooled.buffer(ByteBufUtil.utf8MaxBytes(charSequence)); + private void checkUtf8Bytes(BufferType bufferType, final CharSequence charSequence) { + final ByteBuf buf = buffer(bufferType, ByteBufUtil.utf8MaxBytes(charSequence)); try { final int writtenBytes = ByteBufUtil.writeUtf8(buf, charSequence); final int utf8Bytes = ByteBufUtil.utf8Bytes(charSequence); @@ -589,8 +856,8 @@ private static void checkUtf8Bytes(final CharSequence charSequence) { } } - private static void assertIsText(byte[] bytes, boolean expected, Charset charset) { - ByteBuf buffer = Unpooled.buffer(); + private void assertIsText(BufferType bufferType, byte[] bytes, boolean expected, Charset charset) { + ByteBuf buffer = buffer(bufferType, bytes.length); try { buffer.writeBytes(bytes); assertEquals(expected, ByteBufUtil.isText(buffer, charset)); @@ -599,25 +866,24 @@ private static void assertIsText(byte[] bytes, boolean expected, Charset charset } } - @Test - public void testIsTextMultiThreaded() throws Throwable { + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testIsTextMultiThreaded(BufferType bufferType) throws Throwable { + assumeTrue(bufferType == BufferType.HEAP_UNPOOLED); final ByteBuf buffer = Unpooled.copiedBuffer("Hello, World!", CharsetUtil.ISO_8859_1); try { final AtomicInteger counter = new AtomicInteger(60000); - final AtomicReference errorRef = new AtomicReference(); - List threads = new ArrayList(); + final AtomicReference errorRef = new AtomicReference<>(); + List threads = new ArrayList<>(); for (int i = 0; i < 10; i++) { - Thread thread = new Thread(new Runnable() { - @Override - public void run() { - try { - while (errorRef.get() == null && counter.decrementAndGet() > 0) { - assertTrue(ByteBufUtil.isText(buffer, CharsetUtil.ISO_8859_1)); - } - } catch (Throwable cause) { - errorRef.compareAndSet(null, cause); + Thread thread = new Thread(() -> { + try { + while (errorRef.get() == null && counter.decrementAndGet() > 0) { + assertTrue(ByteBufUtil.isText(buffer, CharsetUtil.ISO_8859_1)); } + } catch (Throwable cause) { + errorRef.compareAndSet(null, cause); } }); threads.add(thread); @@ -638,4 +904,77 @@ public void run() { buffer.release(); } } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testGetBytes(BufferType bufferType) { + final ByteBuf buf = buffer(bufferType, 4); + try { + checkGetBytes(buf); + } finally { + buf.release(); + } + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testGetBytesHeapWithNonZeroArrayOffset(BufferType bufferType) { + assumeTrue(bufferType == BufferType.HEAP_UNPOOLED); + final ByteBuf buf = buffer(bufferType, 5); + try { + buf.setByte(0, 0x05); + + final ByteBuf slice = buf.slice(1, 4); + slice.writerIndex(0); + + assertTrue(slice.hasArray()); + assertThat(slice.arrayOffset(), is(1)); + assertThat(slice.array().length, is(buf.capacity())); + + checkGetBytes(slice); + } finally { + buf.release(); + } + } + + @ParameterizedTest(name = PARAMETERIZED_NAME) + @MethodSource("noUnsafe") + public void testGetBytesHeapWithArrayLengthGreaterThanCapacity(BufferType bufferType) { + assumeTrue(bufferType == BufferType.HEAP_UNPOOLED); + final ByteBuf buf = buffer(bufferType, 5); + try { + buf.setByte(4, 0x05); + + final ByteBuf slice = buf.slice(0, 4); + slice.writerIndex(0); + + assertTrue(slice.hasArray()); + assertThat(slice.arrayOffset(), is(0)); + assertThat(slice.array().length, greaterThan(slice.capacity())); + + checkGetBytes(slice); + } finally { + buf.release(); + } + } + + private static void checkGetBytes(final ByteBuf buf) { + buf.writeInt(0x01020304); + + byte[] expected = { 0x01, 0x02, 0x03, 0x04 }; + assertArrayEquals(expected, ByteBufUtil.getBytes(buf)); + assertArrayEquals(expected, ByteBufUtil.getBytes(buf, 0, buf.readableBytes(), false)); + + expected = new byte[] { 0x01, 0x02, 0x03 }; + assertArrayEquals(expected, ByteBufUtil.getBytes(buf, 0, 3)); + assertArrayEquals(expected, ByteBufUtil.getBytes(buf, 0, 3, false)); + + expected = new byte[] { 0x02, 0x03, 0x04 }; + assertArrayEquals(expected, ByteBufUtil.getBytes(buf, 1, 3)); + assertArrayEquals(expected, ByteBufUtil.getBytes(buf, 1, 3, false)); + + expected = new byte[] { 0x02, 0x03 }; + assertArrayEquals(expected, ByteBufUtil.getBytes(buf, 1, 2)); + assertArrayEquals(expected, ByteBufUtil.getBytes(buf, 1, 2, false)); + } } diff --git a/buffer/src/test/java/io/netty/buffer/ByteProcessorTest.java b/buffer/src/test/java/io/netty/buffer/ByteProcessorTest.java index b20f00715d4..01c7b7f82fe 100644 --- a/buffer/src/test/java/io/netty/buffer/ByteProcessorTest.java +++ b/buffer/src/test/java/io/netty/buffer/ByteProcessorTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,11 +16,11 @@ package io.netty.buffer; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; import io.netty.util.ByteProcessor; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class ByteProcessorTest { @Test diff --git a/buffer/src/test/java/io/netty/buffer/ConsolidationTest.java b/buffer/src/test/java/io/netty/buffer/ConsolidationTest.java index e8dcec21020..6d304ccae70 100644 --- a/buffer/src/test/java/io/netty/buffer/ConsolidationTest.java +++ b/buffer/src/test/java/io/netty/buffer/ConsolidationTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,10 +16,10 @@ package io.netty.buffer; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static io.netty.buffer.Unpooled.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Tests buffer consolidation diff --git a/buffer/src/test/java/io/netty/buffer/DefaultByteBufHolderTest.java b/buffer/src/test/java/io/netty/buffer/DefaultByteBufHolderTest.java index 4c60d0e7b6d..2a623aa0ee3 100644 --- a/buffer/src/test/java/io/netty/buffer/DefaultByteBufHolderTest.java +++ b/buffer/src/test/java/io/netty/buffer/DefaultByteBufHolderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,9 +15,12 @@ */ package io.netty.buffer; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class DefaultByteBufHolderTest { @@ -42,4 +45,64 @@ public void testEqualsAndHashCode() { copy.release(); } } + + @SuppressWarnings("SimplifiableJUnitAssertion") + @Test + public void testDifferentClassesAreNotEqual() { + // all objects here have EMPTY_BUFFER data but are instances of different classes + // so we want to check that none of them are equal to another. + ByteBufHolder dflt = new DefaultByteBufHolder(Unpooled.EMPTY_BUFFER); + ByteBufHolder other = new OtherByteBufHolder(Unpooled.EMPTY_BUFFER, 123); + ByteBufHolder constant1 = new DefaultByteBufHolder(Unpooled.EMPTY_BUFFER) { + // intentionally empty + }; + ByteBufHolder constant2 = new DefaultByteBufHolder(Unpooled.EMPTY_BUFFER) { + // intentionally empty + }; + try { + // not using 'assertNotEquals' to be explicit about which object we are calling .equals() on + assertFalse(dflt.equals(other)); + assertFalse(dflt.equals(constant1)); + assertFalse(constant1.equals(dflt)); + assertFalse(constant1.equals(other)); + assertFalse(constant1.equals(constant2)); + } finally { + dflt.release(); + other.release(); + constant1.release(); + constant2.release(); + } + } + + private static class OtherByteBufHolder extends DefaultByteBufHolder { + + private final int extraField; + + OtherByteBufHolder(final ByteBuf data, final int extraField) { + super(data); + this.extraField = extraField; + } + + @Override + public boolean equals(final Object o) { + if (this == o) { + return true; + } + if (o == null || getClass() != o.getClass()) { + return false; + } + if (!super.equals(o)) { + return false; + } + final OtherByteBufHolder that = (OtherByteBufHolder) o; + return extraField == that.extraField; + } + + @Override + public int hashCode() { + int result = super.hashCode(); + result = 31 * result + extraField; + return result; + } + } } diff --git a/buffer/src/test/java/io/netty/buffer/DuplicatedByteBufTest.java b/buffer/src/test/java/io/netty/buffer/DuplicatedByteBufTest.java index 2e88657b664..cc1b8ade21e 100644 --- a/buffer/src/test/java/io/netty/buffer/DuplicatedByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/DuplicatedByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,9 +15,10 @@ */ package io.netty.buffer; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; /** * Tests duplicated channel buffers @@ -33,9 +34,16 @@ protected ByteBuf newBuffer(int length, int maxCapacity) { return buffer; } - @Test(expected = NullPointerException.class) + @Test + public void testIsContiguous() { + ByteBuf buf = newBuffer(4); + assertEquals(buf.unwrap().isContiguous(), buf.isContiguous()); + buf.release(); + } + + @Test public void shouldNotAllowNullInConstructor() { - new DuplicatedByteBuf(null); + assertThrows(NullPointerException.class, () -> new DuplicatedByteBuf(null)); } // See https://github.com/netty/netty/issues/1800 @@ -50,26 +58,4 @@ public void testIncreaseCapacityWrapped() { assertEquals((byte) 0, buffer.readByte()); } - - @Test - public void testMarksInitialized() { - ByteBuf wrapped = Unpooled.buffer(8); - try { - wrapped.writerIndex(6); - wrapped.readerIndex(1); - ByteBuf duplicate = new DuplicatedByteBuf(wrapped); - - // Test writer mark - duplicate.writerIndex(duplicate.writerIndex() + 1); - duplicate.resetWriterIndex(); - assertEquals(wrapped.writerIndex(), duplicate.writerIndex()); - - // Test reader mark - duplicate.readerIndex(duplicate.readerIndex() + 1); - duplicate.resetReaderIndex(); - assertEquals(wrapped.readerIndex(), duplicate.readerIndex()); - } finally { - wrapped.release(); - } - } } diff --git a/buffer/src/test/java/io/netty/buffer/EmptyByteBufTest.java b/buffer/src/test/java/io/netty/buffer/EmptyByteBufTest.java index 92bdc8eb166..3ea80124bfd 100644 --- a/buffer/src/test/java/io/netty/buffer/EmptyByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/EmptyByteBufTest.java @@ -5,22 +5,34 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. - */package io.netty.buffer; + */ +package io.netty.buffer; -import org.junit.Test; +import io.netty.util.CharsetUtil; +import org.junit.jupiter.api.Test; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class EmptyByteBufTest { + @Test + public void testIsContiguous() { + EmptyByteBuf empty = new EmptyByteBuf(UnpooledByteBufAllocator.DEFAULT); + assertTrue(empty.isContiguous()); + } + @Test public void testIsWritable() { EmptyByteBuf empty = new EmptyByteBuf(UnpooledByteBufAllocator.DEFAULT); @@ -82,4 +94,22 @@ public void testMemoryAddress() { } } } + + @Test + public void consistentEqualsAndHashCodeWithAbstractBytebuf() { + ByteBuf empty = new EmptyByteBuf(UnpooledByteBufAllocator.DEFAULT); + ByteBuf emptyAbstract = new UnpooledHeapByteBuf(UnpooledByteBufAllocator.DEFAULT, 0, 0); + assertEquals(emptyAbstract, empty); + assertEquals(emptyAbstract.hashCode(), empty.hashCode()); + assertEquals(EmptyByteBuf.EMPTY_BYTE_BUF_HASH_CODE, empty.hashCode()); + assertTrue(emptyAbstract.release()); + assertFalse(empty.release()); + } + + @Test + public void testGetCharSequence() { + EmptyByteBuf empty = new EmptyByteBuf(UnpooledByteBufAllocator.DEFAULT); + assertEquals("", empty.readCharSequence(0, CharsetUtil.US_ASCII)); + } + } diff --git a/buffer/src/test/java/io/netty/buffer/FixedCompositeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/FixedCompositeByteBufTest.java index 3d3edc854d6..eab66930b94 100644 --- a/buffer/src/test/java/io/netty/buffer/FixedCompositeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/FixedCompositeByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,8 +15,8 @@ */ package io.netty.buffer; - -import org.junit.Test; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; import java.io.IOException; @@ -27,7 +27,12 @@ import java.nio.charset.Charset; import static io.netty.buffer.Unpooled.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class FixedCompositeByteBufTest { @@ -35,63 +40,64 @@ private static ByteBuf newBuffer(ByteBuf... buffers) { return new FixedCompositeByteBuf(UnpooledByteBufAllocator.DEFAULT, buffers); } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetBoolean() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setBoolean(0, true); + assertThrows(ReadOnlyBufferException.class, () -> buf.setBoolean(0, true)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetByte() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setByte(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setByte(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetBytesWithByteBuf() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); ByteBuf src = wrappedBuffer(new byte[4]); try { - buf.setBytes(0, src); + assertThrows(ReadOnlyBufferException.class, () -> buf.setBytes(0, src)); } finally { buf.release(); src.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetBytesWithByteBuffer() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setBytes(0, ByteBuffer.wrap(new byte[4])); + assertThrows(ReadOnlyBufferException.class, () -> buf.setBytes(0, ByteBuffer.wrap(new byte[4]))); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) - public void testSetBytesWithInputStream() throws IOException { + @Test + public void testSetBytesWithInputStream() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setBytes(0, new ByteArrayInputStream(new byte[4]), 4); + assertThrows(ReadOnlyBufferException.class, + () -> buf.setBytes(0, new ByteArrayInputStream(new byte[4]), 4)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) - public void testSetBytesWithChannel() throws IOException { + @Test + public void testSetBytesWithChannel() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setBytes(0, new ScatteringByteChannel() { + assertThrows(ReadOnlyBufferException.class, () -> buf.setBytes(0, new ScatteringByteChannel() { @Override public long read(ByteBuffer[] dsts, int offset, int length) { return 0; @@ -115,67 +121,67 @@ public boolean isOpen() { @Override public void close() { } - }, 4); + }, 4)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) - public void testSetChar() throws IOException { + @Test + public void testSetChar() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setChar(0, 'b'); + assertThrows(ReadOnlyBufferException.class, () -> buf.setChar(0, 'b')); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) - public void testSetDouble() throws IOException { + @Test + public void testSetDouble() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setDouble(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setDouble(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) - public void testSetFloat() throws IOException { + @Test + public void testSetFloat() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setFloat(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setFloat(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetInt() throws IOException { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setInt(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setInt(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetLong() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setLong(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setLong(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) - public void testSetMedium() throws IOException { + @Test + public void testSetMedium() { ByteBuf buf = newBuffer(wrappedBuffer(new byte[8])); try { - buf.setMedium(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setMedium(0, 1)); } finally { buf.release(); } @@ -377,4 +383,79 @@ public void testEmptyArray() { ByteBuf buf = newBuffer(new ByteBuf[0]); buf.release(); } + + @Test + public void testHasMemoryAddressWithSingleBuffer() { + ByteBuf buf1 = directBuffer(10); + if (!buf1.hasMemoryAddress()) { + buf1.release(); + return; + } + ByteBuf buf = newBuffer(buf1); + assertTrue(buf.hasMemoryAddress()); + assertEquals(buf1.memoryAddress(), buf.memoryAddress()); + buf.release(); + } + + @Test + public void testHasMemoryAddressWhenEmpty() { + Assumptions.assumeTrue(EMPTY_BUFFER.hasMemoryAddress()); + ByteBuf buf = newBuffer(new ByteBuf[0]); + assertTrue(buf.hasMemoryAddress()); + assertEquals(EMPTY_BUFFER.memoryAddress(), buf.memoryAddress()); + buf.release(); + } + + @Test + public void testHasNoMemoryAddressWhenMultipleBuffers() { + ByteBuf buf1 = directBuffer(10); + if (!buf1.hasMemoryAddress()) { + buf1.release(); + return; + } + + ByteBuf buf2 = directBuffer(10); + ByteBuf buf = newBuffer(buf1, buf2); + assertFalse(buf.hasMemoryAddress()); + try { + buf.memoryAddress(); + fail(); + } catch (UnsupportedOperationException expected) { + // expected + } finally { + buf.release(); + } + } + + @Test + public void testHasArrayWithSingleBuffer() { + ByteBuf buf1 = buffer(10); + ByteBuf buf = newBuffer(buf1); + assertTrue(buf.hasArray()); + assertArrayEquals(buf1.array(), buf.array()); + buf.release(); + } + + @Test + public void testHasArrayWhenEmptyAndIsDirect() { + ByteBuf buf = newBuffer(new ByteBuf[0]); + assertTrue(buf.hasArray()); + assertArrayEquals(EMPTY_BUFFER.array(), buf.array()); + assertEquals(EMPTY_BUFFER.isDirect(), buf.isDirect()); + assertEquals(EMPTY_BUFFER.memoryAddress(), buf.memoryAddress()); + buf.release(); + } + + @Test + public void testHasNoArrayWhenMultipleBuffers() { + ByteBuf buf1 = buffer(10); + ByteBuf buf2 = buffer(10); + ByteBuf buf = newBuffer(buf1, buf2); + assertFalse(buf.hasArray()); + try { + assertThrows(UnsupportedOperationException.class, buf::array); + } finally { + buf.release(); + } + } } diff --git a/buffer/src/test/java/io/netty/buffer/LittleEndianCompositeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/LittleEndianCompositeByteBufTest.java index 9a6db949d8d..b6b27de3360 100644 --- a/buffer/src/test/java/io/netty/buffer/LittleEndianCompositeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/LittleEndianCompositeByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/test/java/io/netty/buffer/LittleEndianDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/LittleEndianDirectByteBufTest.java index 8c3bcff1264..6fa0ed01613 100644 --- a/buffer/src/test/java/io/netty/buffer/LittleEndianDirectByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/LittleEndianDirectByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,8 @@ */ package io.netty.buffer; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; import java.nio.ByteOrder; diff --git a/buffer/src/test/java/io/netty/buffer/LittleEndianHeapByteBufTest.java b/buffer/src/test/java/io/netty/buffer/LittleEndianHeapByteBufTest.java index 8f72b20cee5..077873425cd 100644 --- a/buffer/src/test/java/io/netty/buffer/LittleEndianHeapByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/LittleEndianHeapByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ package io.netty.buffer; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; import java.nio.ByteOrder; diff --git a/buffer/src/test/java/io/netty/buffer/LittleEndianUnsafeDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/LittleEndianUnsafeDirectByteBufTest.java index 4fcae564817..57eff37ba43 100644 --- a/buffer/src/test/java/io/netty/buffer/LittleEndianUnsafeDirectByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/LittleEndianUnsafeDirectByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,15 +16,15 @@ package io.netty.buffer; import io.netty.util.internal.PlatformDependent; -import org.junit.Assume; -import org.junit.Before; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; public class LittleEndianUnsafeDirectByteBufTest extends LittleEndianDirectByteBufTest { - @Before + @BeforeEach @Override public void init() { - Assume.assumeTrue("sun.misc.Unsafe not found, skip tests", PlatformDependent.hasUnsafe()); + Assumptions.assumeTrue(PlatformDependent.hasUnsafe(), "sun.misc.Unsafe not found, skip tests"); super.init(); } diff --git a/buffer/src/test/java/io/netty/buffer/LittleEndianUnsafeNoCleanerDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/LittleEndianUnsafeNoCleanerDirectByteBufTest.java index af865ab5217..6396148e073 100644 --- a/buffer/src/test/java/io/netty/buffer/LittleEndianUnsafeNoCleanerDirectByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/LittleEndianUnsafeNoCleanerDirectByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,16 +16,16 @@ package io.netty.buffer; import io.netty.util.internal.PlatformDependent; -import org.junit.Assume; -import org.junit.Before; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; public class LittleEndianUnsafeNoCleanerDirectByteBufTest extends LittleEndianDirectByteBufTest { - @Before + @BeforeEach @Override public void init() { - Assume.assumeTrue("java.nio.DirectByteBuffer.(long, int) not found, skip tests", - PlatformDependent.useDirectBufferNoCleaner()); + Assumptions.assumeTrue(PlatformDependent.useDirectBufferNoCleaner(), + "java.nio.DirectByteBuffer.(long, int) not found, skip tests"); super.init(); } diff --git a/buffer/src/test/java/io/netty/buffer/NoopResourceLeakTracker.java b/buffer/src/test/java/io/netty/buffer/NoopResourceLeakTracker.java index 860f135dc4c..8f31f80f93b 100644 --- a/buffer/src/test/java/io/netty/buffer/NoopResourceLeakTracker.java +++ b/buffer/src/test/java/io/netty/buffer/NoopResourceLeakTracker.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/test/java/io/netty/buffer/PoolArenaTest.java b/buffer/src/test/java/io/netty/buffer/PoolArenaTest.java index 3fafef9c04e..0eb7a5e7e42 100644 --- a/buffer/src/test/java/io/netty/buffer/PoolArenaTest.java +++ b/buffer/src/test/java/io/netty/buffer/PoolArenaTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,35 +16,94 @@ package io.netty.buffer; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + public class PoolArenaTest { + private static final int PAGE_SIZE = 8192; + private static final int PAGE_SHIFTS = 11; + //chunkSize = pageSize * (2 ^ pageShifts) + private static final int CHUNK_SIZE = 16777216; + @Test - public void testNormalizeCapacity() throws Exception { - PoolArena arena = new PoolArena.DirectArena(null, 0, 0, 9, 999999, 0); + public void testNormalizeCapacity() { + PoolArena arena = new PoolArena.DirectArena(null, PAGE_SIZE, PAGE_SHIFTS, CHUNK_SIZE, 0); int[] reqCapacities = {0, 15, 510, 1024, 1023, 1025}; - int[] expectedResult = {0, 16, 512, 1024, 1024, 2048}; + int[] expectedResult = {16, 16, 512, 1024, 1024, 1280}; for (int i = 0; i < reqCapacities.length; i ++) { - Assert.assertEquals(expectedResult[i], arena.normalizeCapacity(reqCapacities[i])); + assertEquals(expectedResult[i], arena.sizeIdx2size(arena.size2SizeIdx(reqCapacities[i]))); } } @Test - public void testNormalizeAlignedCapacity() throws Exception { - PoolArena arena = new PoolArena.DirectArena(null, 0, 0, 9, 999999, 64); + public void testNormalizeAlignedCapacity() { + PoolArena arena = new PoolArena.DirectArena(null, PAGE_SIZE, PAGE_SHIFTS, CHUNK_SIZE, 64); int[] reqCapacities = {0, 15, 510, 1024, 1023, 1025}; - int[] expectedResult = {0, 64, 512, 1024, 1024, 2048}; + int[] expectedResult = {16, 64, 512, 1024, 1024, 1280}; for (int i = 0; i < reqCapacities.length; i ++) { - Assert.assertEquals(expectedResult[i], arena.normalizeCapacity(reqCapacities[i])); + assertEquals(expectedResult[i], arena.sizeIdx2size(arena.size2SizeIdx(reqCapacities[i]))); + } + } + + @Test + public void testSize2SizeIdx() { + PoolArena arena = new PoolArena.DirectArena(null, PAGE_SIZE, PAGE_SHIFTS, CHUNK_SIZE, 0); + + for (int sz = 0; sz <= CHUNK_SIZE; sz++) { + int sizeIdx = arena.size2SizeIdx(sz); + assertTrue(sz <= arena.sizeIdx2size(sizeIdx)); + if (sizeIdx > 0) { + assertTrue(sz > arena.sizeIdx2size(sizeIdx - 1)); + } + } + } + + @Test + public void testPages2PageIdx() { + int pageShifts = PAGE_SHIFTS; + + PoolArena arena = new PoolArena.DirectArena(null, PAGE_SIZE, PAGE_SHIFTS, CHUNK_SIZE, 0); + + int maxPages = CHUNK_SIZE >> pageShifts; + for (int pages = 1; pages <= maxPages; pages++) { + int pageIdxFloor = arena.pages2pageIdxFloor(pages); + assertTrue(pages << pageShifts >= arena.pageIdx2size(pageIdxFloor)); + if (pageIdxFloor > 0 && pages < maxPages) { + assertTrue(pages << pageShifts < arena.pageIdx2size(pageIdxFloor + 1)); + } + + int pageIdxCeiling = arena.pages2pageIdx(pages); + assertTrue(pages << pageShifts <= arena.pageIdx2size(pageIdxCeiling)); + if (pageIdxCeiling > 0) { + assertTrue(pages << pageShifts > arena.pageIdx2size(pageIdxCeiling - 1)); + } + } + } + + @Test + public void testSizeIdx2size() { + PoolArena arena = new PoolArena.DirectArena(null, PAGE_SIZE, PAGE_SHIFTS, CHUNK_SIZE, 0); + for (int i = 0; i < arena.nSizes; i++) { + assertEquals(arena.sizeIdx2sizeCompute(i), arena.sizeIdx2size(i)); + } + } + + @Test + public void testPageIdx2size() { + PoolArena arena = new PoolArena.DirectArena(null, PAGE_SIZE, PAGE_SHIFTS, CHUNK_SIZE, 0); + for (int i = 0; i < arena.nPSizes; i++) { + assertEquals(arena.pageIdx2sizeCompute(i), arena.pageIdx2size(i)); } } @Test - public final void testAllocationCounter() { + public void testAllocationCounter() { final PooledByteBufAllocator allocator = new PooledByteBufAllocator( true, // preferDirect 0, // nHeapArena @@ -57,34 +116,50 @@ public final void testAllocationCounter() { true // useCacheForAllThreads ); - // create tiny buffer - final ByteBuf b1 = allocator.directBuffer(24); // create small buffer - final ByteBuf b2 = allocator.directBuffer(800); + final ByteBuf b1 = allocator.directBuffer(800); // create normal buffer - final ByteBuf b3 = allocator.directBuffer(8192 * 2); + final ByteBuf b2 = allocator.directBuffer(8192 * 5); - Assert.assertNotNull(b1); - Assert.assertNotNull(b2); - Assert.assertNotNull(b3); + assertNotNull(b1); + assertNotNull(b2); // then release buffer to deallocated memory while threadlocal cache has been disabled // allocations counter value must equals deallocations counter value - Assert.assertTrue(b1.release()); - Assert.assertTrue(b2.release()); - Assert.assertTrue(b3.release()); + assertTrue(b1.release()); + assertTrue(b2.release()); - Assert.assertTrue(allocator.directArenas().size() >= 1); + assertTrue(allocator.directArenas().size() >= 1); final PoolArenaMetric metric = allocator.directArenas().get(0); - Assert.assertEquals(3, metric.numDeallocations()); - Assert.assertEquals(3, metric.numAllocations()); + assertEquals(2, metric.numDeallocations()); + assertEquals(2, metric.numAllocations()); + + assertEquals(1, metric.numSmallDeallocations()); + assertEquals(1, metric.numSmallAllocations()); + assertEquals(1, metric.numNormalDeallocations()); + assertEquals(1, metric.numNormalAllocations()); + } + + @Test + public void testDirectArenaMemoryCopy() { + ByteBuf src = PooledByteBufAllocator.DEFAULT.directBuffer(512); + ByteBuf dst = PooledByteBufAllocator.DEFAULT.directBuffer(512); + + PooledByteBuf pooledSrc = unwrapIfNeeded(src); + PooledByteBuf pooledDst = unwrapIfNeeded(dst); + + // This causes the internal reused ByteBuffer duplicate limit to be set to 128 + pooledDst.writeBytes(ByteBuffer.allocate(128)); + // Ensure internal ByteBuffer duplicate limit is properly reset (used in memoryCopy non-Unsafe case) + pooledDst.chunk.arena.memoryCopy(pooledSrc.memory, 0, pooledDst, 512); + + src.release(); + dst.release(); + } - Assert.assertEquals(1, metric.numTinyDeallocations()); - Assert.assertEquals(1, metric.numTinyAllocations()); - Assert.assertEquals(1, metric.numSmallDeallocations()); - Assert.assertEquals(1, metric.numSmallAllocations()); - Assert.assertEquals(1, metric.numNormalDeallocations()); - Assert.assertEquals(1, metric.numNormalAllocations()); + @SuppressWarnings("unchecked") + private PooledByteBuf unwrapIfNeeded(ByteBuf buf) { + return (PooledByteBuf) (buf instanceof PooledByteBuf ? buf : buf.unwrap()); } } diff --git a/buffer/src/test/java/io/netty/buffer/PooledAlignedBigEndianDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/PooledAlignedBigEndianDirectByteBufTest.java new file mode 100644 index 00000000000..5ee3de6f127 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/PooledAlignedBigEndianDirectByteBufTest.java @@ -0,0 +1,54 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer; + +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; + +import java.nio.ByteOrder; + +import static org.junit.jupiter.api.Assertions.assertSame; + +public class PooledAlignedBigEndianDirectByteBufTest extends PooledBigEndianDirectByteBufTest { + private static final int directMemoryCacheAlignment = 1; + private static PooledByteBufAllocator allocator; + + @BeforeAll + public static void setUpAllocator() { + allocator = new PooledByteBufAllocator( + true, + PooledByteBufAllocator.defaultNumHeapArena(), + PooledByteBufAllocator.defaultNumDirectArena(), + PooledByteBufAllocator.defaultPageSize(), + 11, + PooledByteBufAllocator.defaultSmallCacheSize(), + 64, + PooledByteBufAllocator.defaultUseCacheForAllThreads(), + directMemoryCacheAlignment); + } + + @AfterAll + public static void releaseAllocator() { + allocator = null; + } + + @Override + protected ByteBuf alloc(int length, int maxCapacity) { + ByteBuf buffer = allocator.directBuffer(length, maxCapacity); + assertSame(ByteOrder.BIG_ENDIAN, buffer.order()); + return buffer; + } +} diff --git a/buffer/src/test/java/io/netty/buffer/PooledBigEndianDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/PooledBigEndianDirectByteBufTest.java index 7d275f8e5bb..0494f774593 100644 --- a/buffer/src/test/java/io/netty/buffer/PooledBigEndianDirectByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/PooledBigEndianDirectByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,7 @@ import java.nio.ByteOrder; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertSame; /** * Tests big-endian direct channel buffers diff --git a/buffer/src/test/java/io/netty/buffer/PooledBigEndianHeapByteBufTest.java b/buffer/src/test/java/io/netty/buffer/PooledBigEndianHeapByteBufTest.java index 6840e62b5e3..12b57a858e9 100644 --- a/buffer/src/test/java/io/netty/buffer/PooledBigEndianHeapByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/PooledBigEndianHeapByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java index 495bb765406..3ea9f2251a0 100644 --- a/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java +++ b/buffer/src/test/java/io/netty/buffer/PooledByteBufAllocatorTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,9 +20,10 @@ import io.netty.util.concurrent.FastThreadLocalThread; import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.SystemPropertyUtil; -import org.junit.Assume; -import org.junit.Test; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Test; +import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; import java.util.Queue; @@ -31,12 +32,15 @@ import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicReference; import java.util.concurrent.locks.LockSupport; +import org.junit.jupiter.api.Timeout; +import static io.netty.buffer.PoolChunk.runOffset; +import static io.netty.buffer.PoolChunk.runPages; import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class PooledByteBufAllocatorTest extends AbstractByteBufAllocatorTest { @@ -63,6 +67,21 @@ protected long expectedUsedMemoryAfterRelease(PooledByteBufAllocator allocator, return allocator.metric().chunkSize(); } + @Test + public void testTrim() { + PooledByteBufAllocator allocator = newAllocator(true); + + // Should return false as we never allocated from this thread yet. + assertFalse(allocator.trimCurrentThreadCache()); + + ByteBuf directBuffer = allocator.directBuffer(); + + assertTrue(directBuffer.release()); + + // Should return true now a cache exists for the calling thread. + assertTrue(allocator.trimCurrentThreadCache()); + } + @Test public void testPooledUnsafeHeapBufferAndUnsafeDirectBuffer() { PooledByteBufAllocator allocator = newAllocator(true); @@ -77,6 +96,25 @@ public void testPooledUnsafeHeapBufferAndUnsafeDirectBuffer() { heapBuffer.release(); } + @Test + public void testIOBuffersAreDirectWhenUnsafeAvailableOrDirectBuffersPooled() { + PooledByteBufAllocator allocator = newAllocator(true); + ByteBuf ioBuffer = allocator.ioBuffer(); + + assertTrue(ioBuffer.isDirect()); + ioBuffer.release(); + + PooledByteBufAllocator unpooledAllocator = newUnpooledAllocator(); + ioBuffer = unpooledAllocator.ioBuffer(); + + if (PlatformDependent.hasUnsafe()) { + assertTrue(ioBuffer.isDirect()); + } else { + assertFalse(ioBuffer.isDirect()); + } + ioBuffer.release(); + } + @Test public void testWithoutUseCacheForAllThreads() { assertFalse(Thread.currentThread() instanceof FastThreadLocalThread); @@ -102,18 +140,18 @@ public void testArenaMetricsNoCache() { @Test public void testArenaMetricsCache() { - testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 11, 1000, 1000, 1000), 100, 1, 1, 0); + testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 11, 1000, 1000, 1000, true, 0), 100, 1, 1, 0); } @Test public void testArenaMetricsNoCacheAlign() { - Assume.assumeTrue(PooledByteBufAllocator.isDirectMemoryCacheAlignmentSupported()); + Assumptions.assumeTrue(PooledByteBufAllocator.isDirectMemoryCacheAlignmentSupported()); testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 11, 0, 0, 0, true, 64), 100, 0, 100, 100); } @Test public void testArenaMetricsCacheAlign() { - Assume.assumeTrue(PooledByteBufAllocator.isDirectMemoryCacheAlignmentSupported()); + Assumptions.assumeTrue(PooledByteBufAllocator.isDirectMemoryCacheAlignmentSupported()); testArenaMetrics0(new PooledByteBufAllocator(true, 2, 2, 8192, 11, 1000, 1000, 1000, true, 64), 100, 1, 1, 0); } @@ -178,19 +216,6 @@ public void testSmallSubpageMetric() { } } - @Test - public void testTinySubpageMetric() { - PooledByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 11, 0, 0, 0); - ByteBuf buffer = allocator.heapBuffer(1); - try { - PoolArenaMetric metric = allocator.metric().heapArenas().get(0); - PoolSubpageMetric subpageMetric = metric.tinySubpages().get(0); - assertEquals(1, subpageMetric.maxNumElements() - subpageMetric.numAvailable()); - } finally { - buffer.release(); - } - } - @Test public void testAllocNotNull() { PooledByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 11, 0, 0, 0); @@ -200,7 +225,6 @@ public void testAllocNotNull() { testAllocNotNull(allocator, 1024); // Small allocation testAllocNotNull(allocator, 512); - // Tiny allocation testAllocNotNull(allocator, 1); } @@ -240,12 +264,89 @@ public void testFreePoolChunk() { assertFalse(lists.get(5).iterator().hasNext()); } - @Test (timeout = 4000) + @Test + public void testCollapse() { + int pageSize = 8192; + //no cache + ByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 11, 0, 0, 0); + + ByteBuf b1 = allocator.buffer(pageSize * 4); + ByteBuf b2 = allocator.buffer(pageSize * 5); + ByteBuf b3 = allocator.buffer(pageSize * 6); + + b2.release(); + b3.release(); + + ByteBuf b4 = allocator.buffer(pageSize * 10); + + PooledByteBuf b = unwrapIfNeeded(b4); + + //b2 and b3 are collapsed, b4 should start at offset 4 + assertEquals(4, runOffset(b.handle)); + assertEquals(10, runPages(b.handle)); + + b1.release(); + b4.release(); + + //all ByteBuf are collapsed, b5 should start at offset 0 + ByteBuf b5 = allocator.buffer(pageSize * 20); + b = unwrapIfNeeded(b5); + + assertEquals(0, runOffset(b.handle)); + assertEquals(20, runPages(b.handle)); + + b5.release(); + } + + @Test + public void testAllocateSmallOffset() { + int pageSize = 8192; + ByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 11, 0, 0, 0); + + int size = pageSize * 5; + + ByteBuf[] bufs = new ByteBuf[10]; + for (int i = 0; i < 10; i++) { + bufs[i] = allocator.buffer(size); + } + + for (int i = 0; i < 5; i++) { + bufs[i].release(); + } + + //make sure we always allocate runs with small offset + for (int i = 0; i < 5; i++) { + ByteBuf buf = allocator.buffer(size); + PooledByteBuf unwrapedBuf = unwrapIfNeeded(buf); + assertEquals(runOffset(unwrapedBuf.handle), i * 5); + bufs[i] = buf; + } + + //release at reverse order + for (int i = 10 - 1; i >= 5; i--) { + bufs[i].release(); + } + + for (int i = 5; i < 10; i++) { + ByteBuf buf = allocator.buffer(size); + PooledByteBuf unwrapedBuf = unwrapIfNeeded(buf); + assertEquals(runOffset(unwrapedBuf.handle), i * 5); + bufs[i] = buf; + } + + for (int i = 0; i < 10; i++) { + bufs[i].release(); + } + } + + @Test + @Timeout(value = 4000, unit = MILLISECONDS) public void testThreadCacheDestroyedByThreadCleaner() throws InterruptedException { testThreadCacheDestroyed(false); } - @Test (timeout = 4000) + @Test + @Timeout(value = 4000, unit = MILLISECONDS) public void testThreadCacheDestroyedAfterExitRun() throws InterruptedException { testThreadCacheDestroyed(true); } @@ -257,23 +358,20 @@ private static void testThreadCacheDestroyed(boolean useRunnable) throws Interru final AtomicBoolean threadCachesCreated = new AtomicBoolean(true); - final Runnable task = new Runnable() { - @Override - public void run() { - ByteBuf buf = allocator.newHeapBuffer(1024, 1024); - for (int i = 0; i < buf.capacity(); i++) { - buf.writeByte(0); - } - - // Make sure that thread caches are actually created, - // so that down below we are not testing for zero - // thread caches without any of them ever having been initialized. - if (allocator.metric().numThreadLocalCaches() == 0) { - threadCachesCreated.set(false); - } + final Runnable task = () -> { + ByteBuf buf = allocator.newHeapBuffer(1024, 1024); + for (int i = 0; i < buf.capacity(); i++) { + buf.writeByte(0); + } - buf.release(); + // Make sure that thread caches are actually created, + // so that down below we are not testing for zero + // thread caches without any of them ever having been initialized. + if (allocator.metric().numThreadLocalCaches() == 0) { + threadCachesCreated.set(false); } + + buf.release(); }; for (int i = 0; i < numArenas; i++) { @@ -294,7 +392,7 @@ public void run() { thread.join(); } - // Wait for the ThreadDeathWatcher to have destroyed all thread caches + // Wait for all thread caches to be destroyed. while (allocator.metric().numThreadLocalCaches() > 0) { // Signal we want to have a GC run to ensure we can process our ThreadCleanerReference System.gc(); @@ -305,7 +403,8 @@ public void run() { assertTrue(threadCachesCreated.get()); } - @Test(timeout = 3000) + @Test + @Timeout(value = 3000, unit = MILLISECONDS) public void testNumThreadCachesWithNoDirectArenas() throws InterruptedException { int numHeapArenas = 1; final PooledByteBufAllocator allocator = @@ -324,7 +423,8 @@ public void testNumThreadCachesWithNoDirectArenas() throws InterruptedException assertEquals(0, allocator.metric().numThreadLocalCaches()); } - @Test(timeout = 3000) + @Test + @Timeout(value = 3000, unit = MILLISECONDS) public void testThreadCacheToArenaMappings() throws InterruptedException { int numArenas = 2; final PooledByteBufAllocator allocator = @@ -368,39 +468,32 @@ private static ThreadCache createNewThreadCache(final PooledByteBufAllocator all throws InterruptedException { final CountDownLatch latch = new CountDownLatch(1); final CountDownLatch cacheLatch = new CountDownLatch(1); - final Thread t = new FastThreadLocalThread(new Runnable() { + final Thread t = new FastThreadLocalThread(() -> { + ByteBuf buf = allocator.newHeapBuffer(1024, 1024); - @Override - public void run() { - ByteBuf buf = allocator.newHeapBuffer(1024, 1024); + // Countdown the latch after we allocated a buffer. At this point the cache must exists. + cacheLatch.countDown(); - // Countdown the latch after we allocated a buffer. At this point the cache must exists. - cacheLatch.countDown(); + buf.writeZero(buf.capacity()); - buf.writeZero(buf.capacity()); - - try { - latch.await(); - } catch (InterruptedException e) { - throw new IllegalStateException(e); - } + try { + latch.await(); + } catch (InterruptedException e) { + throw new IllegalStateException(e); + } - buf.release(); + buf.release(); - FastThreadLocal.removeAll(); - } + FastThreadLocal.removeAll(); }); t.start(); // Wait until we allocated a buffer and so be sure the thread was started and the cache exists. cacheLatch.await(); - return new ThreadCache() { - @Override - public void destroy() throws InterruptedException { - latch.countDown(); - t.join(); - } + return () -> { + latch.countDown(); + t.join(); }; } @@ -416,7 +509,7 @@ public void testConcurrentUsage() throws Throwable { // We use no caches and only one arena to maximize the chance of hitting the race-condition we // had before. ByteBufAllocator allocator = new PooledByteBufAllocator(true, 1, 1, 8192, 11, 0, 0, 0); - List threads = new ArrayList(); + List threads = new ArrayList<>(); try { for (int i = 0; i < 512; i++) { AllocationThread thread = new AllocationThread(allocator); @@ -430,8 +523,14 @@ public void testConcurrentUsage() throws Throwable { Thread.sleep(100); } } finally { + // First mark all AllocationThreads to complete their work and then wait until these are complete + // and rethrow if there was any error. for (AllocationThread t : threads) { - t.finish(); + t.markAsFinished(); + } + + for (AllocationThread t: threads) { + t.joinAndCheckForError(); } } } @@ -457,11 +556,11 @@ private static final class AllocationThread extends Thread { } } - private final Queue buffers = new ConcurrentLinkedQueue(); + private final Queue buffers = new ConcurrentLinkedQueue<>(); private final ByteBufAllocator allocator; - private final AtomicReference finish = new AtomicReference(); + private final AtomicReference finish = new AtomicReference<>(); - public AllocationThread(ByteBufAllocator allocator) { + AllocationThread(ByteBufAllocator allocator) { this.allocator = allocator; } @@ -471,49 +570,113 @@ public void run() { int idx = 0; while (finish.get() == null) { for (int i = 0; i < 10; i++) { - buffers.add(allocator.directBuffer( - ALLOCATION_SIZES[Math.abs(idx++ % ALLOCATION_SIZES.length)], - Integer.MAX_VALUE)); + int len = ALLOCATION_SIZES[Math.abs(idx++ % ALLOCATION_SIZES.length)]; + ByteBuf buf = allocator.directBuffer(len, Integer.MAX_VALUE); + assertEquals(len, buf.writableBytes()); + while (buf.isWritable()) { + buf.writeByte(i); + } + + buffers.offer(buf); } - releaseBuffers(); + releaseBuffersAndCheckContent(); } } catch (Throwable cause) { finish.set(cause); } finally { - releaseBuffers(); + releaseBuffersAndCheckContent(); } } - private void releaseBuffers() { - for (;;) { + private void releaseBuffersAndCheckContent() { + int i = 0; + while (!buffers.isEmpty()) { ByteBuf buf = buffers.poll(); - if (buf == null) { - break; + while (buf.isReadable()) { + assertEquals(i, buf.readByte()); } buf.release(); + i++; } } - public boolean isFinished() { + boolean isFinished() { return finish.get() != null; } - public void finish() throws Throwable { + void markAsFinished() { + finish.compareAndSet(null, Boolean.TRUE); + } + + void joinAndCheckForError() throws Throwable { try { // Mark as finish if not already done but ensure we not override the previous set error. - finish.compareAndSet(null, Boolean.TRUE); join(); } finally { - releaseBuffers(); + releaseBuffersAndCheckContent(); } checkForError(); } - public void checkForError() throws Throwable { + void checkForError() throws Throwable { Object obj = finish.get(); if (obj instanceof Throwable) { throw (Throwable) obj; } } } + + @SuppressWarnings("unchecked") + private static PooledByteBuf unwrapIfNeeded(ByteBuf buf) { + return (PooledByteBuf) (buf instanceof PooledByteBuf ? buf : buf.unwrap()); + } + + @Test + public void testCacheWorksForNormalAllocations() { + int maxCachedBufferCapacity = PooledByteBufAllocator.DEFAULT_MAX_CACHED_BUFFER_CAPACITY; + final PooledByteBufAllocator allocator = + new PooledByteBufAllocator(true, 1, 1, + PooledByteBufAllocator.defaultPageSize(), PooledByteBufAllocator.defaultMaxOrder(), + 128, 128, true); + ByteBuf buffer = allocator.directBuffer(maxCachedBufferCapacity); + assertEquals(1, allocator.metric().directArenas().get(0).numNormalAllocations()); + buffer.release(); + + buffer = allocator.directBuffer(maxCachedBufferCapacity); + // Should come out of the cache so the count should not be incremented + assertEquals(1, allocator.metric().directArenas().get(0).numNormalAllocations()); + buffer.release(); + + // Should be allocated without cache and also not put back in a cache. + buffer = allocator.directBuffer(maxCachedBufferCapacity + 1); + assertEquals(2, allocator.metric().directArenas().get(0).numNormalAllocations()); + buffer.release(); + + buffer = allocator.directBuffer(maxCachedBufferCapacity + 1); + assertEquals(3, allocator.metric().directArenas().get(0).numNormalAllocations()); + buffer.release(); + } + + @Test + public void testNormalPoolSubpageRelease() { + // 16 < elemSize <= 7168 or 8192 < elemSize <= 28672, 1 < subpage.maxNumElems <= 256 + // 7168 <= elemSize <= 8192, subpage.maxNumElems == 1 + int elemSize = 8192; + int length = 1024; + ByteBuf[] byteBufs = new ByteBuf[length]; + final PooledByteBufAllocator allocator = new PooledByteBufAllocator(false, 32, 32, 8192, 11, 256, 64, false, 0); + + for (int i = 0; i < length; i++) { + byteBufs[i] = allocator.heapBuffer(elemSize, elemSize); + } + PoolChunk chunk = unwrapIfNeeded(byteBufs[0]).chunk; + + int beforeFreeBytes = chunk.freeBytes(); + for (int i = 0; i < length; i++) { + byteBufs[i].release(); + } + int afterFreeBytes = chunk.freeBytes(); + + assertTrue(beforeFreeBytes < afterFreeBytes); + } } diff --git a/buffer/src/test/java/io/netty/buffer/PooledLittleEndianDirectByteBufTest.java b/buffer/src/test/java/io/netty/buffer/PooledLittleEndianDirectByteBufTest.java index 5a5606cd9ae..2e0300a67a9 100644 --- a/buffer/src/test/java/io/netty/buffer/PooledLittleEndianDirectByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/PooledLittleEndianDirectByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,7 @@ import java.nio.ByteOrder; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertSame; /** * Tests little-endian direct channel buffers diff --git a/buffer/src/test/java/io/netty/buffer/PooledLittleEndianHeapByteBufTest.java b/buffer/src/test/java/io/netty/buffer/PooledLittleEndianHeapByteBufTest.java index a89873708de..bbaa0075742 100644 --- a/buffer/src/test/java/io/netty/buffer/PooledLittleEndianHeapByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/PooledLittleEndianHeapByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,7 @@ import java.nio.ByteOrder; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertSame; /** * Tests little-endian heap channel buffers diff --git a/buffer/src/test/java/io/netty/buffer/ReadOnlyByteBufTest.java b/buffer/src/test/java/io/netty/buffer/ReadOnlyByteBufTest.java index e0ea934c0b5..0f0a957ff1d 100644 --- a/buffer/src/test/java/io/netty/buffer/ReadOnlyByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/ReadOnlyByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ package io.netty.buffer; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.io.InputStream; @@ -31,11 +31,11 @@ import static io.netty.buffer.Unpooled.LITTLE_ENDIAN; import static io.netty.buffer.Unpooled.buffer; import static io.netty.buffer.Unpooled.unmodifiableBuffer; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -44,9 +44,9 @@ */ public class ReadOnlyByteBufTest { - @Test(expected = NullPointerException.class) + @Test public void shouldNotAllowNullInConstructor() { - new ReadOnlyByteBuf(null); + assertThrows(NullPointerException.class, () -> new ReadOnlyByteBuf(null)); } @Test @@ -127,59 +127,65 @@ public void shouldForwardReadCallsBlindly() throws Exception { assertEquals(27, roBuf.capacity()); } - @Test(expected = UnsupportedOperationException.class) + @Test public void shouldRejectDiscardReadBytes() { - unmodifiableBuffer(EMPTY_BUFFER).discardReadBytes(); + assertThrows(UnsupportedOperationException.class, () -> unmodifiableBuffer(EMPTY_BUFFER).discardReadBytes()); } - @Test(expected = UnsupportedOperationException.class) + @Test public void shouldRejectSetByte() { - unmodifiableBuffer(EMPTY_BUFFER).setByte(0, (byte) 0); + assertThrows(UnsupportedOperationException.class, () -> unmodifiableBuffer(EMPTY_BUFFER).setByte(0, (byte) 0)); } - @Test(expected = UnsupportedOperationException.class) + @Test public void shouldRejectSetShort() { - unmodifiableBuffer(EMPTY_BUFFER).setShort(0, (short) 0); + assertThrows(UnsupportedOperationException.class, + () -> unmodifiableBuffer(EMPTY_BUFFER).setShort(0, (short) 0)); } - @Test(expected = UnsupportedOperationException.class) + @Test public void shouldRejectSetMedium() { - unmodifiableBuffer(EMPTY_BUFFER).setMedium(0, 0); + assertThrows(UnsupportedOperationException.class, () -> unmodifiableBuffer(EMPTY_BUFFER).setMedium(0, 0)); } - @Test(expected = UnsupportedOperationException.class) + @Test public void shouldRejectSetInt() { - unmodifiableBuffer(EMPTY_BUFFER).setInt(0, 0); + assertThrows(UnsupportedOperationException.class, () -> unmodifiableBuffer(EMPTY_BUFFER).setInt(0, 0)); } - @Test(expected = UnsupportedOperationException.class) + @Test public void shouldRejectSetLong() { - unmodifiableBuffer(EMPTY_BUFFER).setLong(0, 0); + assertThrows(UnsupportedOperationException.class, () -> unmodifiableBuffer(EMPTY_BUFFER).setLong(0, 0)); } - @Test(expected = UnsupportedOperationException.class) - public void shouldRejectSetBytes1() throws IOException { - unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (InputStream) null, 0); + @Test + public void shouldRejectSetBytes1() { + assertThrows(UnsupportedOperationException.class, + () -> unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (InputStream) null, 0)); } - @Test(expected = UnsupportedOperationException.class) - public void shouldRejectSetBytes2() throws IOException { - unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (ScatteringByteChannel) null, 0); + @Test + public void shouldRejectSetBytes2() { + assertThrows(UnsupportedOperationException.class, + () -> unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (ScatteringByteChannel) null, 0)); } - @Test(expected = UnsupportedOperationException.class) + @Test public void shouldRejectSetBytes3() { - unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (byte[]) null, 0, 0); + assertThrows(UnsupportedOperationException.class, + () -> unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (byte[]) null, 0, 0)); } - @Test(expected = UnsupportedOperationException.class) + @Test public void shouldRejectSetBytes4() { - unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (ByteBuf) null, 0, 0); + assertThrows(UnsupportedOperationException.class, + () -> unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (ByteBuf) null, 0, 0)); } - @Test(expected = UnsupportedOperationException.class) + @Test public void shouldRejectSetBytes5() { - unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (ByteBuffer) null); + assertThrows(UnsupportedOperationException.class, + () -> unmodifiableBuffer(EMPTY_BUFFER).setBytes(0, (ByteBuffer) null)); } @Test @@ -211,13 +217,12 @@ private static void ensureWritableIntStatusShouldFailButNotThrow(boolean force) readOnly.release(); } - @Test(expected = ReadOnlyBufferException.class) + @Test public void ensureWritableShouldThrow() { ByteBuf buf = buffer(1); ByteBuf readOnly = buf.asReadOnly(); try { - readOnly.ensureWritable(1); - fail(); + assertThrows(ReadOnlyBufferException.class, () -> readOnly.ensureWritable(1)); } finally { buf.release(); } diff --git a/buffer/src/test/java/io/netty/buffer/ReadOnlyByteBufferBufTest.java b/buffer/src/test/java/io/netty/buffer/ReadOnlyByteBufferBufTest.java index da0463eac59..be071104b1c 100644 --- a/buffer/src/test/java/io/netty/buffer/ReadOnlyByteBufferBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/ReadOnlyByteBufferBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,12 +15,12 @@ */ package io.netty.buffer; -import io.netty.util.internal.PlatformDependent; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; +import java.util.concurrent.ThreadLocalRandom; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public class ReadOnlyByteBufferBufTest extends ReadOnlyDirectByteBufferBufTest { @Override @@ -40,7 +40,7 @@ public void testCopyHeap() { private static void testCopy(boolean direct) { byte[] bytes = new byte[1024]; - PlatformDependent.threadLocalRandom().nextBytes(bytes); + ThreadLocalRandom.current().nextBytes(bytes); ByteBuffer nioBuffer = direct ? ByteBuffer.allocateDirect(bytes.length) : ByteBuffer.allocate(bytes.length); nioBuffer.put(bytes).flip(); diff --git a/buffer/src/test/java/io/netty/buffer/ReadOnlyDirectByteBufferBufTest.java b/buffer/src/test/java/io/netty/buffer/ReadOnlyDirectByteBufferBufTest.java index cbee31c4480..f2ca630ba3b 100644 --- a/buffer/src/test/java/io/netty/buffer/ReadOnlyDirectByteBufferBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/ReadOnlyDirectByteBufferBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,17 +16,22 @@ package io.netty.buffer; import io.netty.util.internal.PlatformDependent; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; +import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.ReadOnlyBufferException; import java.nio.channels.FileChannel; +import java.util.concurrent.ThreadLocalRandom; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class ReadOnlyDirectByteBufferBufTest { @@ -38,88 +43,148 @@ protected ByteBuffer allocate(int size) { return ByteBuffer.allocateDirect(size); } - @Test(expected = IllegalArgumentException.class) + @Test + public void testIsContiguous() { + ByteBuf buf = buffer(allocate(4).asReadOnlyBuffer()); + assertTrue(buf.isContiguous()); + buf.release(); + } + + @Test public void testConstructWithWritable() { - buffer(allocate(1)); + assertThrows(IllegalArgumentException.class, () -> buffer(allocate(1))); + } + + @Test + public void shouldIndicateNotWritable() { + ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()).clear(); + try { + assertFalse(buf.isWritable()); + } finally { + buf.release(); + } + } + + @Test + public void shouldIndicateNotWritableAnyNumber() { + ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()).clear(); + try { + assertFalse(buf.isWritable(1)); + } finally { + buf.release(); + } + } + + @Test + public void ensureWritableIntStatusShouldFailButNotThrow() { + ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()).clear(); + try { + int result = buf.ensureWritable(1, false); + assertEquals(1, result); + } finally { + buf.release(); + } } - @Test(expected = ReadOnlyBufferException.class) + @Test + public void ensureWritableForceIntStatusShouldFailButNotThrow() { + ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()).clear(); + try { + int result = buf.ensureWritable(1, true); + assertEquals(1, result); + } finally { + buf.release(); + } + } + + @Test + public void ensureWritableShouldThrow() { + ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()).clear(); + try { + assertThrows(ReadOnlyBufferException.class, () -> buf.ensureWritable(1)); + } finally { + buf.release(); + } + } + + @Test public void testSetByte() { ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()); try { - buf.setByte(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setByte(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetInt() { ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()); try { - buf.setInt(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setInt(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetShort() { ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()); try { - buf.setShort(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setShort(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetMedium() { ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()); try { - buf.setMedium(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setMedium(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetLong() { ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()); try { - buf.setLong(0, 1); + assertThrows(ReadOnlyBufferException.class, () -> buf.setLong(0, 1)); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetBytesViaArray() { ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()); try { - buf.setBytes(0, "test".getBytes()); + assertThrows(ReadOnlyBufferException.class, () -> buf.setBytes(0, "test".getBytes())); } finally { buf.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetBytesViaBuffer() { ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()); ByteBuf copy = Unpooled.copyInt(1); try { - buf.setBytes(0, copy); + assertThrows(ReadOnlyBufferException.class, () -> buf.setBytes(0, copy)); } finally { buf.release(); copy.release(); } } - @Test(expected = ReadOnlyBufferException.class) + @Test public void testSetBytesViaStream() throws IOException { ByteBuf buf = buffer(ByteBuffer.allocateDirect(8).asReadOnlyBuffer()); try { - buf.setBytes(0, new ByteArrayInputStream("test".getBytes()), 2); + assertThrows(ReadOnlyBufferException.class, + () -> buf.setBytes(0, new ByteArrayInputStream("test".getBytes()), 2)); } finally { buf.release(); } @@ -130,12 +195,12 @@ public void testGetReadByte() { ByteBuf buf = buffer( ((ByteBuffer) allocate(2).put(new byte[] { (byte) 1, (byte) 2 }).flip()).asReadOnlyBuffer()); - Assert.assertEquals(1, buf.getByte(0)); - Assert.assertEquals(2, buf.getByte(1)); + assertEquals(1, buf.getByte(0)); + assertEquals(2, buf.getByte(1)); - Assert.assertEquals(1, buf.readByte()); - Assert.assertEquals(2, buf.readByte()); - Assert.assertFalse(buf.isReadable()); + assertEquals(1, buf.readByte()); + assertEquals(2, buf.readByte()); + assertFalse(buf.isReadable()); buf.release(); } @@ -144,12 +209,12 @@ public void testGetReadByte() { public void testGetReadInt() { ByteBuf buf = buffer(((ByteBuffer) allocate(8).putInt(1).putInt(2).flip()).asReadOnlyBuffer()); - Assert.assertEquals(1, buf.getInt(0)); - Assert.assertEquals(2, buf.getInt(4)); + assertEquals(1, buf.getInt(0)); + assertEquals(2, buf.getInt(4)); - Assert.assertEquals(1, buf.readInt()); - Assert.assertEquals(2, buf.readInt()); - Assert.assertFalse(buf.isReadable()); + assertEquals(1, buf.readInt()); + assertEquals(2, buf.readInt()); + assertFalse(buf.isReadable()); buf.release(); } @@ -159,12 +224,12 @@ public void testGetReadShort() { ByteBuf buf = buffer(((ByteBuffer) allocate(8) .putShort((short) 1).putShort((short) 2).flip()).asReadOnlyBuffer()); - Assert.assertEquals(1, buf.getShort(0)); - Assert.assertEquals(2, buf.getShort(2)); + assertEquals(1, buf.getShort(0)); + assertEquals(2, buf.getShort(2)); - Assert.assertEquals(1, buf.readShort()); - Assert.assertEquals(2, buf.readShort()); - Assert.assertFalse(buf.isReadable()); + assertEquals(1, buf.readShort()); + assertEquals(2, buf.readShort()); + assertFalse(buf.isReadable()); buf.release(); } @@ -174,22 +239,36 @@ public void testGetReadLong() { ByteBuf buf = buffer(((ByteBuffer) allocate(16) .putLong(1).putLong(2).flip()).asReadOnlyBuffer()); - Assert.assertEquals(1, buf.getLong(0)); - Assert.assertEquals(2, buf.getLong(8)); + assertEquals(1, buf.getLong(0)); + assertEquals(2, buf.getLong(8)); - Assert.assertEquals(1, buf.readLong()); - Assert.assertEquals(2, buf.readLong()); - Assert.assertFalse(buf.isReadable()); + assertEquals(1, buf.readLong()); + assertEquals(2, buf.readLong()); + assertFalse(buf.isReadable()); buf.release(); } + @Test + public void testGetBytesByteBuffer() { + byte[] bytes = {'a', 'b', 'c', 'd', 'e', 'f', 'g'}; + // Ensure destination buffer is bigger then what is in the ByteBuf. + ByteBuffer nioBuffer = ByteBuffer.allocate(bytes.length + 1); + ByteBuf buffer = buffer(((ByteBuffer) allocate(bytes.length) + .put(bytes).flip()).asReadOnlyBuffer()); + try { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(buffer.readerIndex(), nioBuffer)); + } finally { + buffer.release(); + } + } + @Test public void testCopy() { ByteBuf buf = buffer(((ByteBuffer) allocate(16).putLong(1).putLong(2).flip()).asReadOnlyBuffer()); ByteBuf copy = buf.copy(); - Assert.assertEquals(buf, copy); + assertEquals(buf, copy); buf.release(); copy.release(); @@ -200,7 +279,7 @@ public void testCopyWithOffset() { ByteBuf buf = buffer(((ByteBuffer) allocate(16).putLong(1).putLong(2).flip()).asReadOnlyBuffer()); ByteBuf copy = buf.copy(1, 9); - Assert.assertEquals(buf.slice(1, 9), copy); + assertEquals(buf.slice(1, 9), copy); buf.release(); copy.release(); @@ -213,7 +292,7 @@ public void testWrapBufferWithNonZeroPosition() { .putLong(1).flip().position(1)).asReadOnlyBuffer()); ByteBuf slice = buf.slice(); - Assert.assertEquals(buf, slice); + assertEquals(buf, slice); buf.release(); } @@ -222,31 +301,31 @@ public void testWrapBufferWithNonZeroPosition() { public void testWrapBufferRoundTrip() { ByteBuf buf = buffer(((ByteBuffer) allocate(16).putInt(1).putInt(2).flip()).asReadOnlyBuffer()); - Assert.assertEquals(1, buf.readInt()); + assertEquals(1, buf.readInt()); ByteBuffer nioBuffer = buf.nioBuffer(); // Ensure this can be accessed without throwing a BufferUnderflowException - Assert.assertEquals(2, nioBuffer.getInt()); + assertEquals(2, nioBuffer.getInt()); buf.release(); } @Test public void testWrapMemoryMapped() throws Exception { - File file = File.createTempFile("netty-test", "tmp"); + File file = PlatformDependent.createTempFile("netty-test", "tmp", null); FileChannel output = null; FileChannel input = null; ByteBuf b1 = null; ByteBuf b2 = null; try { - output = new FileOutputStream(file).getChannel(); + output = new RandomAccessFile(file, "rw").getChannel(); byte[] bytes = new byte[1024]; - PlatformDependent.threadLocalRandom().nextBytes(bytes); + ThreadLocalRandom.current().nextBytes(bytes); output.write(ByteBuffer.wrap(bytes)); - input = new FileInputStream(file).getChannel(); + input = new RandomAccessFile(file, "r").getChannel(); ByteBuffer m = input.map(FileChannel.MapMode.READ_ONLY, 0, input.size()); b1 = buffer(m); @@ -257,7 +336,7 @@ public void testWrapMemoryMapped() throws Exception { b2 = buffer(dup); - Assert.assertEquals(b2, b1.slice(2, 2)); + assertEquals(b2, b1.slice(2, 2)); } finally { if (b1 != null) { b1.release(); @@ -279,10 +358,10 @@ public void testWrapMemoryMapped() throws Exception { public void testMemoryAddress() { ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()); try { - Assert.assertFalse(buf.hasMemoryAddress()); + assertFalse(buf.hasMemoryAddress()); try { buf.memoryAddress(); - Assert.fail(); + fail(); } catch (UnsupportedOperationException expected) { // expected } diff --git a/buffer/src/test/java/io/netty/buffer/ReadOnlyUnsafeDirectByteBufferBufTest.java b/buffer/src/test/java/io/netty/buffer/ReadOnlyUnsafeDirectByteBufferBufTest.java index efab33f469b..e091174a56e 100644 --- a/buffer/src/test/java/io/netty/buffer/ReadOnlyUnsafeDirectByteBufferBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/ReadOnlyUnsafeDirectByteBufferBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,22 +16,22 @@ package io.netty.buffer; import io.netty.util.internal.PlatformDependent; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; -import static org.junit.Assume.assumeTrue; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assumptions.assumeTrue; public class ReadOnlyUnsafeDirectByteBufferBufTest extends ReadOnlyDirectByteBufferBufTest { /** * Needs unsafe to run */ - @BeforeClass + @BeforeAll public static void assumeConditions() { - assumeTrue("sun.misc.Unsafe not found, skip tests", PlatformDependent.hasUnsafe()); + assumeTrue(PlatformDependent.hasUnsafe(), "sun.misc.Unsafe not found, skip tests"); } @Override @@ -44,7 +44,7 @@ protected ByteBuf buffer(ByteBuffer buffer) { public void testMemoryAddress() { ByteBuf buf = buffer(allocate(8).asReadOnlyBuffer()); try { - Assert.assertTrue(buf.hasMemoryAddress()); + assertTrue(buf.hasMemoryAddress()); buf.memoryAddress(); } finally { buf.release(); diff --git a/buffer/src/test/java/io/netty/buffer/RetainedDuplicatedByteBufTest.java b/buffer/src/test/java/io/netty/buffer/RetainedDuplicatedByteBufTest.java index d763d31b71e..5bc570270c5 100644 --- a/buffer/src/test/java/io/netty/buffer/RetainedDuplicatedByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/RetainedDuplicatedByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,7 +16,7 @@ package io.netty.buffer; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public class RetainedDuplicatedByteBufTest extends DuplicatedByteBufTest { @Override diff --git a/buffer/src/test/java/io/netty/buffer/RetainedSlicedByteBufTest.java b/buffer/src/test/java/io/netty/buffer/RetainedSlicedByteBufTest.java index 3427fc0ef3c..898ad2f377d 100644 --- a/buffer/src/test/java/io/netty/buffer/RetainedSlicedByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/RetainedSlicedByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,8 +16,7 @@ package io.netty.buffer; - -import org.junit.Assert; +import static org.junit.jupiter.api.Assertions.assertEquals; public class RetainedSlicedByteBufTest extends SlicedByteBufTest { @@ -25,7 +24,7 @@ public class RetainedSlicedByteBufTest extends SlicedByteBufTest { protected ByteBuf newSlice(ByteBuf buffer, int offset, int length) { ByteBuf slice = buffer.retainedSlice(offset, length); buffer.release(); - Assert.assertEquals(buffer.refCnt(), slice.refCnt()); + assertEquals(buffer.refCnt(), slice.refCnt()); return slice; } } diff --git a/buffer/src/test/java/io/netty/buffer/SimpleLeakAwareByteBufTest.java b/buffer/src/test/java/io/netty/buffer/SimpleLeakAwareByteBufTest.java index 66ca89fad36..329a3b5ac3e 100644 --- a/buffer/src/test/java/io/netty/buffer/SimpleLeakAwareByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/SimpleLeakAwareByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,19 +16,19 @@ package io.netty.buffer; import io.netty.util.ResourceLeakTracker; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.util.ArrayDeque; import java.util.Queue; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; public class SimpleLeakAwareByteBufTest extends BigEndianHeapByteBufTest { private final Class clazz = leakClass(); - private final Queue> trackers = new ArrayDeque>(); + private final Queue> trackers = new ArrayDeque<>(); @Override protected final ByteBuf newBuffer(int capacity, int maxCapacity) { @@ -36,7 +36,7 @@ protected final ByteBuf newBuffer(int capacity, int maxCapacity) { } private ByteBuf wrap(ByteBuf buffer) { - NoopResourceLeakTracker tracker = new NoopResourceLeakTracker(); + NoopResourceLeakTracker tracker = new NoopResourceLeakTracker<>(); ByteBuf leakAwareBuf = wrap(buffer, tracker); trackers.add(tracker); return leakAwareBuf; @@ -46,14 +46,14 @@ protected SimpleLeakAwareByteBuf wrap(ByteBuf buffer, ResourceLeakTracker clazz = leakClass(); - private final Queue> trackers = new ArrayDeque>(); + private final Queue> trackers = new ArrayDeque<>(); @Override protected final WrappedCompositeByteBuf wrap(CompositeByteBuf buffer) { - NoopResourceLeakTracker tracker = new NoopResourceLeakTracker(); + NoopResourceLeakTracker tracker = new NoopResourceLeakTracker<>(); WrappedCompositeByteBuf leakAwareBuf = wrap(buffer, tracker); trackers.add(tracker); return leakAwareBuf; @@ -43,14 +46,14 @@ protected SimpleLeakAwareCompositeByteBuf wrap(CompositeByteBuf buffer, Resource return new SimpleLeakAwareCompositeByteBuf(buffer, tracker); } - @Before + @BeforeEach @Override public void init() { super.init(); trackers.clear(); } - @After + @AfterEach @Override public void dispose() { super.dispose(); @@ -131,6 +134,21 @@ public void testWrapReadOnly() { assertWrapped(newBuffer(8).asReadOnly()); } + @Test + public void forEachByteUnderLeakDetectionShouldNotThrowException() { + CompositeByteBuf buf = (CompositeByteBuf) newBuffer(8); + assertThat(buf, CoreMatchers.instanceOf(SimpleLeakAwareCompositeByteBuf.class)); + CompositeByteBuf comp = (CompositeByteBuf) newBuffer(8); + assertThat(comp, CoreMatchers.instanceOf(SimpleLeakAwareCompositeByteBuf.class)); + + ByteBuf inner = comp.alloc().directBuffer(1).writeByte(0); + comp.addComponent(true, inner); + buf.addComponent(true, comp); + + assertEquals(-1, buf.forEachByte(value -> true)); + assertTrue(buf.release()); + } + protected final void assertWrapped(ByteBuf buf) { try { assertSame(clazz, buf.getClass()); diff --git a/buffer/src/test/java/io/netty/buffer/SlicedByteBufTest.java b/buffer/src/test/java/io/netty/buffer/SlicedByteBufTest.java index 4079571febf..c97e2173ce8 100644 --- a/buffer/src/test/java/io/netty/buffer/SlicedByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/SlicedByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,17 +15,17 @@ */ package io.netty.buffer; -import io.netty.util.internal.PlatformDependent; -import org.junit.Assume; -import org.junit.Ignore; -import org.junit.Test; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.Disabled; +import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; +import java.util.concurrent.ThreadLocalRandom; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests sliced channel buffers @@ -34,8 +34,8 @@ public class SlicedByteBufTest extends AbstractByteBufTest { @Override protected final ByteBuf newBuffer(int length, int maxCapacity) { - Assume.assumeTrue(maxCapacity == Integer.MAX_VALUE); - int offset = length == 0 ? 0 : PlatformDependent.threadLocalRandom().nextInt(length); + Assumptions.assumeTrue(maxCapacity == Integer.MAX_VALUE); + int offset = length == 0 ? 0 : ThreadLocalRandom.current().nextInt(length); ByteBuf buffer = Unpooled.buffer(length * 2); ByteBuf slice = newSlice(buffer, offset, length); assertEquals(0, slice.readerIndex()); @@ -47,69 +47,76 @@ protected ByteBuf newSlice(ByteBuf buffer, int offset, int length) { return buffer.slice(offset, length); } - @Test(expected = NullPointerException.class) + @Test + public void testIsContiguous() { + ByteBuf buf = newBuffer(4); + assertEquals(buf.unwrap().isContiguous(), buf.isContiguous()); + buf.release(); + } + + @Test public void shouldNotAllowNullInConstructor() { - new SlicedByteBuf(null, 0, 0); + assertThrows(NullPointerException.class, () -> new SlicedByteBuf(null, 0, 0)); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testInternalNioBuffer() { - super.testInternalNioBuffer(); + assertThrows(IndexOutOfBoundsException.class, super::testInternalNioBuffer); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testDuplicateReadGatheringByteChannelMultipleThreads() throws Exception { - super.testDuplicateReadGatheringByteChannelMultipleThreads(); + public void testDuplicateReadGatheringByteChannelMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testDuplicateReadGatheringByteChannelMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testSliceReadGatheringByteChannelMultipleThreads() throws Exception { - super.testSliceReadGatheringByteChannelMultipleThreads(); + public void testSliceReadGatheringByteChannelMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testSliceReadGatheringByteChannelMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testDuplicateReadOutputStreamMultipleThreads() throws Exception { - super.testDuplicateReadOutputStreamMultipleThreads(); + public void testDuplicateReadOutputStreamMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testDuplicateReadOutputStreamMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testSliceReadOutputStreamMultipleThreads() throws Exception { - super.testSliceReadOutputStreamMultipleThreads(); + public void testSliceReadOutputStreamMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testSliceReadOutputStreamMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testDuplicateBytesInArrayMultipleThreads() throws Exception { - super.testDuplicateBytesInArrayMultipleThreads(); + public void testDuplicateBytesInArrayMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testDuplicateBytesInArrayMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testSliceBytesInArrayMultipleThreads() throws Exception { - super.testSliceBytesInArrayMultipleThreads(); + public void testSliceBytesInArrayMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testSliceBytesInArrayMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testNioBufferExposeOnlyRegion() { - super.testNioBufferExposeOnlyRegion(); + assertThrows(IndexOutOfBoundsException.class, super::testNioBufferExposeOnlyRegion); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testGetReadOnlyDirectDst() { - super.testGetReadOnlyDirectDst(); + assertThrows(IndexOutOfBoundsException.class, super::testGetReadOnlyDirectDst); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testGetReadOnlyHeapDst() { - super.testGetReadOnlyHeapDst(); + assertThrows(IndexOutOfBoundsException.class, super::testGetReadOnlyHeapDst); } @Test @@ -136,34 +143,32 @@ public void testForEachByte2() { // Ignore for SlicedByteBuf } - @Ignore("Sliced ByteBuf objects don't allow the capacity to change. So this test would fail and shouldn't be run") + @Disabled("Sliced ByteBuf objects don't allow the capacity to change. So this test would fail and shouldn't be run") @Override public void testDuplicateCapacityChange() { } - @Ignore("Sliced ByteBuf objects don't allow the capacity to change. So this test would fail and shouldn't be run") + @Disabled("Sliced ByteBuf objects don't allow the capacity to change. So this test would fail and shouldn't be run") @Override public void testRetainedDuplicateCapacityChange() { } @Test - public void testReaderIndexAndMarks() { + public void testReaderIndex() { ByteBuf wrapped = Unpooled.buffer(16); try { wrapped.writerIndex(14); wrapped.readerIndex(2); - wrapped.markWriterIndex(); - wrapped.markReaderIndex(); ByteBuf slice = wrapped.slice(4, 4); assertEquals(0, slice.readerIndex()); assertEquals(4, slice.writerIndex()); slice.readerIndex(slice.readerIndex() + 1); - slice.resetReaderIndex(); + slice.readerIndex(0); assertEquals(0, slice.readerIndex()); slice.writerIndex(slice.writerIndex() - 1); - slice.resetWriterIndex(); + slice.writerIndex(0); assertEquals(0, slice.writerIndex()); } finally { wrapped.release(); @@ -194,41 +199,42 @@ public void sliceEmptyNotLeak() { } @Override - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testGetBytesByteBuffer() { byte[] bytes = {'a', 'b', 'c', 'd', 'e', 'f', 'g'}; // Ensure destination buffer is bigger then what is wrapped in the ByteBuf. ByteBuffer nioBuffer = ByteBuffer.allocate(bytes.length + 1); ByteBuf wrappedBuffer = Unpooled.wrappedBuffer(bytes).slice(0, bytes.length - 1); try { - wrappedBuffer.getBytes(wrappedBuffer.readerIndex(), nioBuffer); + assertThrows(IndexOutOfBoundsException.class, + () -> wrappedBuffer.getBytes(wrappedBuffer.readerIndex(), nioBuffer)); } finally { wrappedBuffer.release(); } } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testWriteUsAsciiCharSequenceExpand() { - super.testWriteUsAsciiCharSequenceExpand(); + assertThrows(IndexOutOfBoundsException.class, super::testWriteUsAsciiCharSequenceExpand); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testWriteUtf8CharSequenceExpand() { - super.testWriteUtf8CharSequenceExpand(); + assertThrows(IndexOutOfBoundsException.class, super::testWriteUtf8CharSequenceExpand); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testWriteIso88591CharSequenceExpand() { - super.testWriteIso88591CharSequenceExpand(); + assertThrows(IndexOutOfBoundsException.class, super::testWriteIso88591CharSequenceExpand); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testWriteUtf16CharSequenceExpand() { - super.testWriteUtf16CharSequenceExpand(); + assertThrows(IndexOutOfBoundsException.class, super::testWriteUtf16CharSequenceExpand); } @Test @@ -247,14 +253,13 @@ public void ensureWritableWithEnoughSpaceShouldNotThrow() { slice.release(); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void ensureWritableWithNotEnoughSpaceShouldThrow() { ByteBuf slice = newBuffer(10); ByteBuf unwrapped = slice.unwrap(); unwrapped.writerIndex(unwrapped.writerIndex() + 5); try { - slice.ensureWritable(1); - fail(); + assertThrows(IndexOutOfBoundsException.class, () -> slice.ensureWritable(1)); } finally { slice.release(); } diff --git a/buffer/src/test/java/io/netty/buffer/UnpooledByteBufAllocatorTest.java b/buffer/src/test/java/io/netty/buffer/UnpooledByteBufAllocatorTest.java index 4793b052bc5..bd51ef3c344 100644 --- a/buffer/src/test/java/io/netty/buffer/UnpooledByteBufAllocatorTest.java +++ b/buffer/src/test/java/io/netty/buffer/UnpooledByteBufAllocatorTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/buffer/src/test/java/io/netty/buffer/UnpooledTest.java b/buffer/src/test/java/io/netty/buffer/UnpooledTest.java index f7fd30ba915..75734cadf28 100644 --- a/buffer/src/test/java/io/netty/buffer/UnpooledTest.java +++ b/buffer/src/test/java/io/netty/buffer/UnpooledTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,12 +15,14 @@ */ package io.netty.buffer; -import org.junit.Test; +import io.netty.util.CharsetUtil; +import org.junit.jupiter.api.Test; import org.mockito.Mockito; import java.io.InputStream; import java.nio.ByteBuffer; import java.nio.channels.ScatteringByteChannel; +import java.nio.charset.Charset; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; @@ -29,7 +31,12 @@ import static io.netty.buffer.Unpooled.*; import static io.netty.util.internal.EmptyArrays.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** * Tests channel buffers @@ -60,7 +67,7 @@ public void testCompositeWrappedBuffer() { @Test public void testHashCode() { - Map map = new LinkedHashMap(); + Map map = new LinkedHashMap<>(); map.put(EMPTY_BYTES, 1); map.put(new byte[] { 1 }, 32); map.put(new byte[] { 2 }, 33); @@ -149,7 +156,7 @@ public void testEquals() { @Test public void testCompare() { - List expected = new ArrayList(); + List expected = new ArrayList<>(); expected.add(wrappedBuffer(new byte[]{1})); expected.add(wrappedBuffer(new byte[]{1, 2})); expected.add(wrappedBuffer(new byte[]{1, 2, 3, 4, 5, 6, 7, 8, 9, 10})); @@ -308,6 +315,30 @@ public void testMultiUnReadableByteBufReleased() { assertEquals(0, buf2.refCnt()); } + @Test + public void testCopiedBufferUtf8() { + testCopiedBufferCharSequence("Some UTF_8 like äÄ∏ŒŒ", CharsetUtil.UTF_8); + } + + @Test + public void testCopiedBufferAscii() { + testCopiedBufferCharSequence("Some US_ASCII", CharsetUtil.US_ASCII); + } + + @Test + public void testCopiedBufferSomeOtherCharset() { + testCopiedBufferCharSequence("Some ISO_8859_1", CharsetUtil.ISO_8859_1); + } + + private static void testCopiedBufferCharSequence(CharSequence sequence, Charset charset) { + ByteBuf copied = copiedBuffer(sequence, charset); + try { + assertEquals(sequence, copied.toString(charset)); + } finally { + copied.release(); + } + } + @Test public void testCopiedBuffer() { ByteBuf copied = copiedBuffer(ByteBuffer.allocateDirect(16)); @@ -668,11 +699,11 @@ public void wrappedReadOnlyDirectBuffer() { wrapped.release(); } - @Test(expected = IllegalArgumentException.class) + @Test public void skipBytesNegativeLength() { ByteBuf buf = buffer(8); try { - buf.skipBytes(-1); + assertThrows(IllegalArgumentException.class, () -> buf.skipBytes(-1)); } finally { buf.release(); } @@ -696,27 +727,29 @@ public void testWrapByteBufArrayStartsWithNonReadable() { assertEquals(0, wrapped.refCnt()); } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testGetBytesByteBuffer() { byte[] bytes = {'a', 'b', 'c', 'd', 'e', 'f', 'g'}; // Ensure destination buffer is bigger then what is wrapped in the ByteBuf. ByteBuffer nioBuffer = ByteBuffer.allocate(bytes.length + 1); ByteBuf wrappedBuffer = wrappedBuffer(bytes); try { - wrappedBuffer.getBytes(wrappedBuffer.readerIndex(), nioBuffer); + assertThrows(IndexOutOfBoundsException.class, + () -> wrappedBuffer.getBytes(wrappedBuffer.readerIndex(), nioBuffer)); } finally { wrappedBuffer.release(); } } - @Test(expected = IndexOutOfBoundsException.class) + @Test public void testGetBytesByteBuffer2() { byte[] bytes = {'a', 'b', 'c', 'd', 'e', 'f', 'g'}; // Ensure destination buffer is bigger then what is wrapped in the ByteBuf. ByteBuffer nioBuffer = ByteBuffer.allocate(bytes.length + 1); ByteBuf wrappedBuffer = wrappedBuffer(bytes, 0, bytes.length); try { - wrappedBuffer.getBytes(wrappedBuffer.readerIndex(), nioBuffer); + assertThrows(IndexOutOfBoundsException.class, + () -> wrappedBuffer.getBytes(wrappedBuffer.readerIndex(), nioBuffer)); } finally { wrappedBuffer.release(); } diff --git a/buffer/src/test/java/io/netty/buffer/UnreleaseableByteBufTest.java b/buffer/src/test/java/io/netty/buffer/UnreleaseableByteBufTest.java index 108cd6fde26..9ee22db4e7d 100644 --- a/buffer/src/test/java/io/netty/buffer/UnreleaseableByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/UnreleaseableByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,13 +15,13 @@ */ package io.netty.buffer; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static io.netty.buffer.Unpooled.buffer; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertSame; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; public class UnreleaseableByteBufTest { diff --git a/buffer/src/test/java/io/netty/buffer/UnsafeByteBufUtilTest.java b/buffer/src/test/java/io/netty/buffer/UnsafeByteBufUtilTest.java index d0bb4a115c3..0558d696742 100644 --- a/buffer/src/test/java/io/netty/buffer/UnsafeByteBufUtilTest.java +++ b/buffer/src/test/java/io/netty/buffer/UnsafeByteBufUtilTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,21 +16,22 @@ package io.netty.buffer; import io.netty.util.internal.PlatformDependent; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.nio.ByteBuffer; import static io.netty.util.internal.PlatformDependent.directBufferAddress; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; public class UnsafeByteBufUtilTest { - @Before + @BeforeEach public void checkHasUnsafe() { - Assume.assumeTrue("sun.misc.Unsafe not found, skip tests", PlatformDependent.hasUnsafe()); + Assumptions.assumeTrue(PlatformDependent.hasUnsafe(), "sun.misc.Unsafe not found, skip tests"); } @Test @@ -49,7 +50,7 @@ public void testSetBytesOnReadOnlyByteBuffer() throws Exception { byte[] check = new byte[length]; targetBuffer.getBytes(0, check, 0, length); - assertArrayEquals("The byte array's copy does not equal the original", testData, check); + assertArrayEquals(testData, check, "The byte array's copy does not equal the original"); } finally { targetBuffer.release(); } @@ -82,7 +83,7 @@ public void testSetBytesOnReadOnlyByteBufferWithPooledAlloc() throws Exception { byte[] check = new byte[length]; targetBuffer.getBytes(0, check, 0, length); - assertArrayEquals("The byte array's copy does not equal the original", testData, check); + assertArrayEquals(testData, check, "The byte array's copy does not equal the original"); } finally { targetBuffer.release(); b1.release(); @@ -90,4 +91,135 @@ public void testSetBytesOnReadOnlyByteBufferWithPooledAlloc() throws Exception { } } + @Test + public void testSetBytesWithByteArray() { + final byte[] testData = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + final int length = testData.length; + + final UnpooledByteBufAllocator alloc = new UnpooledByteBufAllocator(true); + final UnpooledDirectByteBuf targetBuffer = new UnpooledDirectByteBuf(alloc, length, length); + + try { + UnsafeByteBufUtil.setBytes(targetBuffer, + directBufferAddress(targetBuffer.nioBuffer()), 0, testData, 0, length); + + final byte[] check = new byte[length]; + targetBuffer.getBytes(0, check, 0, length); + + assertArrayEquals(testData, check, "The byte array's copy does not equal the original"); + } finally { + targetBuffer.release(); + } + } + + @Test + public void testSetBytesWithZeroLength() { + final byte[] testData = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}; + final int length = testData.length; + + final UnpooledByteBufAllocator alloc = new UnpooledByteBufAllocator(true); + final UnpooledDirectByteBuf targetBuffer = new UnpooledDirectByteBuf(alloc, length, length); + + try { + final byte[] beforeSet = new byte[length]; + targetBuffer.getBytes(0, beforeSet, 0, length); + + UnsafeByteBufUtil.setBytes(targetBuffer, + directBufferAddress(targetBuffer.nioBuffer()), 0, testData, 0, 0); + + final byte[] check = new byte[length]; + targetBuffer.getBytes(0, check, 0, length); + + assertArrayEquals(beforeSet, check); + } finally { + targetBuffer.release(); + } + } + + @Test + public void testSetBytesWithNullByteArray() { + + final UnpooledByteBufAllocator alloc = new UnpooledByteBufAllocator(true); + final UnpooledDirectByteBuf targetBuffer = new UnpooledDirectByteBuf(alloc, 8, 8); + + try { + assertThrows(NullPointerException.class, () -> UnsafeByteBufUtil.setBytes(targetBuffer, + directBufferAddress(targetBuffer.nioBuffer()), 0, (byte[]) null, 0, 8)); + } finally { + targetBuffer.release(); + } + } + + @Test + public void testSetBytesOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> { + // negative index + testSetBytesOutOfBounds0(4, 4, -1, 0, 4); + }); + } + + @Test + public void testSetBytesOutOfBounds2() { + assertThrows(IndexOutOfBoundsException.class, () -> { + // negative length + testSetBytesOutOfBounds0(4, 4, 0, 0, -1); + }); + } + + @Test + public void testSetBytesOutOfBounds3() { + assertThrows(IndexOutOfBoundsException.class, () -> { + // buffer length oversize + testSetBytesOutOfBounds0(4, 8, 0, 0, 5); + }); + } + + @Test + public void testSetBytesOutOfBounds4() { + assertThrows(IndexOutOfBoundsException.class, () -> { + // buffer length oversize + testSetBytesOutOfBounds0(4, 4, 3, 0, 3); + }); + } + + @Test + public void testSetBytesOutOfBounds5() { + assertThrows(IndexOutOfBoundsException.class, () -> { + // negative srcIndex + testSetBytesOutOfBounds0(4, 4, 0, -1, 4); + }); + } + + @Test + public void testSetBytesOutOfBounds6() { + assertThrows(IndexOutOfBoundsException.class, () -> { + // src length oversize + testSetBytesOutOfBounds0(8, 4, 0, 0, 5); + }); + } + + @Test + public void testSetBytesOutOfBounds7() { + assertThrows(IndexOutOfBoundsException.class, () -> { + // src length oversize + testSetBytesOutOfBounds0(4, 4, 0, 1, 4); + }); + } + + private static void testSetBytesOutOfBounds0(int lengthOfBuffer, + int lengthOfBytes, + int index, + int srcIndex, + int length) { + final UnpooledByteBufAllocator alloc = new UnpooledByteBufAllocator(true); + final UnpooledDirectByteBuf targetBuffer = new UnpooledDirectByteBuf(alloc, lengthOfBuffer, lengthOfBuffer); + + try { + UnsafeByteBufUtil.setBytes(targetBuffer, + directBufferAddress(targetBuffer.nioBuffer()), index, new byte[lengthOfBytes], srcIndex, length); + } finally { + targetBuffer.release(); + } + } + } diff --git a/buffer/src/test/java/io/netty/buffer/WrappedCompositeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/WrappedCompositeByteBufTest.java index 7b37177b435..c4993c809f4 100644 --- a/buffer/src/test/java/io/netty/buffer/WrappedCompositeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/WrappedCompositeByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -25,4 +25,9 @@ protected final ByteBuf newBuffer(int length, int maxCapacity) { protected WrappedCompositeByteBuf wrap(CompositeByteBuf buffer) { return new WrappedCompositeByteBuf(buffer); } + + @Override + protected CompositeByteBuf newCompositeBuffer() { + return wrap(super.newCompositeBuffer()); + } } diff --git a/buffer/src/test/java/io/netty/buffer/WrappedUnpooledUnsafeByteBufTest.java b/buffer/src/test/java/io/netty/buffer/WrappedUnpooledUnsafeByteBufTest.java index 36212580644..810522ec350 100644 --- a/buffer/src/test/java/io/netty/buffer/WrappedUnpooledUnsafeByteBufTest.java +++ b/buffer/src/test/java/io/netty/buffer/WrappedUnpooledUnsafeByteBufTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,134 +16,142 @@ package io.netty.buffer; import io.netty.util.internal.PlatformDependent; -import org.junit.Assume; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.Assumptions; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertThrows; public class WrappedUnpooledUnsafeByteBufTest extends BigEndianUnsafeDirectByteBufTest { - @Before + @BeforeEach @Override public void init() { - Assume.assumeTrue("PlatformDependent.useDirectBufferNoCleaner() returned false, skip tests", - PlatformDependent.useDirectBufferNoCleaner()); + Assumptions.assumeTrue(PlatformDependent.useDirectBufferNoCleaner(), + "PlatformDependent.useDirectBufferNoCleaner() returned false, skip tests"); super.init(); } @Override protected ByteBuf newBuffer(int length, int maxCapacity) { - Assume.assumeTrue(maxCapacity == Integer.MAX_VALUE); + Assumptions.assumeTrue(maxCapacity == Integer.MAX_VALUE); return new WrappedUnpooledUnsafeDirectByteBuf(UnpooledByteBufAllocator.DEFAULT, PlatformDependent.allocateMemory(length), length, true); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testInternalNioBuffer() { - super.testInternalNioBuffer(); + assertThrows(IndexOutOfBoundsException.class, super::testInternalNioBuffer); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testDuplicateReadGatheringByteChannelMultipleThreads() throws Exception { - super.testDuplicateReadGatheringByteChannelMultipleThreads(); + public void testDuplicateReadGatheringByteChannelMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testDuplicateReadGatheringByteChannelMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testSliceReadGatheringByteChannelMultipleThreads() throws Exception { - super.testSliceReadGatheringByteChannelMultipleThreads(); + public void testSliceReadGatheringByteChannelMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testSliceReadGatheringByteChannelMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testDuplicateReadOutputStreamMultipleThreads() throws Exception { - super.testDuplicateReadOutputStreamMultipleThreads(); + public void testDuplicateReadOutputStreamMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testDuplicateReadOutputStreamMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testSliceReadOutputStreamMultipleThreads() throws Exception { - super.testSliceReadOutputStreamMultipleThreads(); + public void testSliceReadOutputStreamMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testSliceReadOutputStreamMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testDuplicateBytesInArrayMultipleThreads() throws Exception { - super.testDuplicateBytesInArrayMultipleThreads(); + public void testDuplicateBytesInArrayMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testDuplicateBytesInArrayMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override - public void testSliceBytesInArrayMultipleThreads() throws Exception { - super.testSliceBytesInArrayMultipleThreads(); + public void testSliceBytesInArrayMultipleThreads() { + assertThrows(IndexOutOfBoundsException.class, super::testSliceBytesInArrayMultipleThreads); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testNioBufferExposeOnlyRegion() { - super.testNioBufferExposeOnlyRegion(); + assertThrows(IndexOutOfBoundsException.class, super::testNioBufferExposeOnlyRegion); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testGetReadOnlyDirectDst() { - super.testGetReadOnlyDirectDst(); + assertThrows(IndexOutOfBoundsException.class, super::testGetReadOnlyDirectDst); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testGetReadOnlyHeapDst() { - super.testGetReadOnlyHeapDst(); + assertThrows(IndexOutOfBoundsException.class, super::testGetReadOnlyHeapDst); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testReadBytes() { - super.testReadBytes(); + assertThrows(IndexOutOfBoundsException.class, super::testReadBytes); } - @Test(expected = IllegalArgumentException.class) + @Test @Override public void testDuplicateCapacityChange() { - super.testDuplicateCapacityChange(); + assertThrows(IllegalArgumentException.class, super::testDuplicateCapacityChange); } - @Test(expected = IllegalArgumentException.class) + @Test @Override public void testRetainedDuplicateCapacityChange() { - super.testRetainedDuplicateCapacityChange(); + assertThrows(IllegalArgumentException.class, super::testRetainedDuplicateCapacityChange); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testLittleEndianWithExpand() { - super.testLittleEndianWithExpand(); + assertThrows(IndexOutOfBoundsException.class, super::testLittleEndianWithExpand); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testWriteUsAsciiCharSequenceExpand() { - super.testWriteUsAsciiCharSequenceExpand(); + assertThrows(IndexOutOfBoundsException.class, super::testWriteUsAsciiCharSequenceExpand); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testWriteUtf8CharSequenceExpand() { - super.testWriteUtf8CharSequenceExpand(); + assertThrows(IndexOutOfBoundsException.class, super::testWriteUtf8CharSequenceExpand); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testWriteIso88591CharSequenceExpand() { - super.testWriteIso88591CharSequenceExpand(); + assertThrows(IndexOutOfBoundsException.class, super::testWriteIso88591CharSequenceExpand); } - @Test(expected = IndexOutOfBoundsException.class) + @Test @Override public void testWriteUtf16CharSequenceExpand() { - super.testWriteUtf16CharSequenceExpand(); + assertThrows(IndexOutOfBoundsException.class, super::testWriteUtf16CharSequenceExpand); + } + + @Test + @Override + public void testGetBytesByteBuffer() { + assertThrows(IndexOutOfBoundsException.class, super::testGetBytesByteBuffer); } @Test diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferBulkAccessTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferBulkAccessTest.java new file mode 100644 index 00000000000..ba9a495ffc0 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferBulkAccessTest.java @@ -0,0 +1,274 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.nio.ByteBuffer; + +import static io.netty.buffer.api.CompositeBuffer.compose; +import static org.assertj.core.api.Assertions.assertThat; + +public class BufferBulkAccessTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + void fill(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(16)) { + assertThat(buf.fill((byte) 0xA5)).isSameAs(buf); + buf.writerOffset(16); + assertEquals(0xA5A5A5A5_A5A5A5A5L, buf.readLong()); + assertEquals(0xA5A5A5A5_A5A5A5A5L, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoByteArray(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeLong(0x0102030405060708L); + byte[] array = new byte[8]; + buf.copyInto(0, array, 0, array.length); + assertThat(array).containsExactly(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08); + + array = new byte[6]; + buf.copyInto(1, array, 1, 3); + assertThat(array).containsExactly(0x00, 0x02, 0x03, 0x04, 0x00, 0x00); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoHeapByteBuffer(Fixture fixture) { + testCopyIntoByteBuffer(fixture, ByteBuffer::allocate); + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoDirectByteBuffer(Fixture fixture) { + testCopyIntoByteBuffer(fixture, ByteBuffer::allocateDirect); + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoOnHeapBuf(Fixture fixture) { + testCopyIntoBuf(fixture, BufferAllocator.onHeapUnpooled()::allocate); + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoOffHeapBuf(Fixture fixture) { + testCopyIntoBuf(fixture, BufferAllocator.offHeapUnpooled()::allocate); + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoCompositeOnHeapOnHeapBuf(Fixture fixture) { + try (var a = BufferAllocator.onHeapUnpooled(); + var b = BufferAllocator.onHeapUnpooled()) { + testCopyIntoBuf(fixture, size -> { + int first = size / 2; + int second = size - first; + try (var bufFirst = a.allocate(first); + var bufSecond = b.allocate(second)) { + return compose(a, bufFirst.send(), bufSecond.send()); + } + }); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoCompositeOnHeapOffHeapBuf(Fixture fixture) { + try (var a = BufferAllocator.onHeapUnpooled(); + var b = BufferAllocator.offHeapUnpooled()) { + testCopyIntoBuf(fixture, size -> { + int first = size / 2; + int second = size - first; + try (var bufFirst = a.allocate(first); + var bufSecond = b.allocate(second)) { + return compose(a, bufFirst.send(), bufSecond.send()); + } + }); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoCompositeOffHeapOnHeapBuf(Fixture fixture) { + try (var a = BufferAllocator.offHeapUnpooled(); + var b = BufferAllocator.onHeapUnpooled()) { + testCopyIntoBuf(fixture, size -> { + int first = size / 2; + int second = size - first; + try (var bufFirst = a.allocate(first); + var bufSecond = b.allocate(second)) { + return compose(a, bufFirst.send(), bufSecond.send()); + } + }); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoCompositeOffHeapOffHeapBuf(Fixture fixture) { + try (var a = BufferAllocator.offHeapUnpooled(); + var b = BufferAllocator.offHeapUnpooled()) { + testCopyIntoBuf(fixture, size -> { + int first = size / 2; + int second = size - first; + try (var bufFirst = a.allocate(first); + var bufSecond = b.allocate(second)) { + return compose(a, bufFirst.send(), bufSecond.send()); + } + }); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoCompositeOnHeapOnHeapBufCopy(Fixture fixture) { + try (var a = BufferAllocator.onHeapUnpooled(); + var b = BufferAllocator.onHeapUnpooled()) { + testCopyIntoBuf(fixture, size -> { + int first = size / 2; + int second = size - first; + try (var bufFirst = a.allocate(first); + var bufSecond = b.allocate(second)) { + return compose(a, bufFirst.send(), bufSecond.send()).writerOffset(size).copy(); + } + }); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoCompositeOnHeapOffHeapBufCopy(Fixture fixture) { + try (var a = BufferAllocator.onHeapUnpooled(); + var b = BufferAllocator.offHeapUnpooled()) { + testCopyIntoBuf(fixture, size -> { + int first = size / 2; + int second = size - first; + try (var bufFirst = a.allocate(first); + var bufSecond = b.allocate(second)) { + return compose(a, bufFirst.send(), bufSecond.send()).writerOffset(size).copy(); + } + }); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoCompositeOffHeapOnHeapBufCopy(Fixture fixture) { + try (var a = BufferAllocator.offHeapUnpooled(); + var b = BufferAllocator.onHeapUnpooled()) { + testCopyIntoBuf(fixture, size -> { + int first = size / 2; + int second = size - first; + try (var bufFirst = a.allocate(first); + var bufSecond = b.allocate(second)) { + return compose(a, bufFirst.send(), bufSecond.send()).writerOffset(size).copy(); + } + }); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyIntoCompositeOffHeapOffHeapBufCopy(Fixture fixture) { + try (var a = BufferAllocator.offHeapUnpooled(); + var b = BufferAllocator.offHeapUnpooled()) { + testCopyIntoBuf(fixture, size -> { + int first = size / 2; + int second = size - first; + try (var bufFirst = a.allocate(first); + var bufSecond = b.allocate(second)) { + return compose(a, bufFirst.send(), bufSecond.send()).writerOffset(size).copy(); + } + }); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void byteIterationOfBuffers(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + checkByteIteration(buf); + buf.resetOffsets(); + checkByteIterationOfRegion(buf); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void reverseByteIterationOfBuffers(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(0x28)) { + checkReverseByteIteration(buf); + buf.resetOffsets(); + checkReverseByteIterationOfRegion(buf); + } + } + + @ParameterizedTest + @MethodSource("heapAllocators") + public void heapBufferMustHaveZeroAddress(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThat(buf.nativeAddress()).isZero(); + } + } + + @ParameterizedTest + @MethodSource("directAllocators") + public void directBufferMustHaveNonZeroAddress(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThat(buf.nativeAddress()).isNotZero(); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void writeBytesMustWriteAllBytesFromByteArray(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buffer = allocator.allocate(8)) { + buffer.writeByte((byte) 1); + buffer.writeBytes(new byte[] {2, 3, 4, 5, 6, 7}); + assertThat(buffer.writerOffset()).isEqualTo(7); + assertThat(buffer.readerOffset()).isZero(); + assertThat(toByteArray(buffer)).containsExactly(1, 2, 3, 4, 5, 6, 7, 0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void writeBytesWithOffsetMustWriteAllBytesFromByteArray(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buffer = allocator.allocate(3)) { + buffer.writeByte((byte) 1); + buffer.writeBytes(new byte[] {2, 3, 4, 5, 6, 7}, 1, 2); + assertThat(buffer.writerOffset()).isEqualTo(3); + assertThat(buffer.readerOffset()).isZero(); + assertThat(toByteArray(buffer)).containsExactly(1, 3, 4); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferByteOffsettedAccessorsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferByteOffsettedAccessorsTest.java new file mode 100644 index 00000000000..d8c000cd80c --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferByteOffsettedAccessorsTest.java @@ -0,0 +1,338 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferByteOffsettedAccessorsTest extends BufferTestSupport { + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getByte(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getByte(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + byte value = 0x01; + buf.writeByte(value); + assertEquals(value, buf.getByte(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + byte value = 0x01; + buf.writeByte(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x10, buf.getByte(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + byte value = 0x01; + buf.writeByte(value); + assertThrows(IndexOutOfBoundsException.class, () -> buf.getByte(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + byte value = 0x01; + buf.writeByte(value); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getByte(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getByte(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getByte(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getByte(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfByteReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getByte(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedByte(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedByte(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01; + buf.writeUnsignedByte(value); + assertEquals(value, buf.getUnsignedByte(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01; + buf.writeUnsignedByte(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x10, buf.getUnsignedByte(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01; + buf.writeUnsignedByte(value); + buf.getUnsignedByte(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedByte(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteReadOnlyMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset( + Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01; + buf.writeUnsignedByte(value); + buf.makeReadOnly().getUnsignedByte(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity( + Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedByte(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getUnsignedByte(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedByte(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getUnsignedByte(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedByteReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedByte(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfByteMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + byte value = 0x01; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setByte(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfByteMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + byte value = 0x01; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setByte(8, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfByteMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + byte value = 0x01; + buf.setByte(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedByteMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x01; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setUnsignedByte(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedByteMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x01; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setUnsignedByte(8, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedByteMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01; + buf.setUnsignedByte(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferCharOffsettedAccessorsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferCharOffsettedAccessorsTest.java new file mode 100644 index 00000000000..216ab6ed0b7 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferCharOffsettedAccessorsTest.java @@ -0,0 +1,189 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferCharOffsettedAccessorsTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getChar(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getChar(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + char value = 0x0102; + buf.writeChar(value); + assertEquals(value, buf.getChar(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + char value = 0x0102; + buf.writeChar(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x1002, buf.getChar(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + char value = 0x0102; + buf.writeChar(value); + buf.getChar(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getChar(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharReadOnlyMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + char value = 0x0102; + buf.writeChar(value); + buf.makeReadOnly().getChar(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getChar(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getChar(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getChar(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getChar(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfCharReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getChar(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfCharMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + char value = 0x0102; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setChar(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfCharMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + char value = 0x0102; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setChar(7, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfCharMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + char value = 0x0102; + buf.setChar(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferCleanerTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferCleanerTest.java new file mode 100644 index 00000000000..c40153d17a1 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferCleanerTest.java @@ -0,0 +1,69 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.MemoryManager; +import io.netty.buffer.api.internal.Statics; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.List; +import java.util.Optional; +import java.util.stream.Collectors; +import java.util.stream.Stream; + +import static io.netty.buffer.api.MemoryManager.using; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public class BufferCleanerTest extends BufferTestSupport { + static Fixture[] unsafeAllocators() { + Optional maybeManager = MemoryManager.lookupImplementation("Unsafe"); + assumeTrue(maybeManager.isPresent()); + MemoryManager manager = maybeManager.get(); + List initFixtures = initialAllocators().stream().flatMap(f -> { + Stream.Builder builder = Stream.builder(); + builder.add(new Fixture(f + "/" + manager, () -> using(manager, f), f.getProperties())); + return builder.build(); + }).collect(Collectors.toList()); + return fixtureCombinations(initFixtures).filter(f -> f.isDirect()).toArray(Fixture[]::new); + } + + @ParameterizedTest + @MethodSource("unsafeAllocators") + public void bufferMustBeClosedByCleaner(Fixture fixture) throws InterruptedException { + var initial = Statics.MEM_USAGE_NATIVE.sum(); + int allocationSize = 1024; + allocateAndForget(fixture, allocationSize); + long sum = 0; + for (int i = 0; i < 15; i++) { + System.gc(); + System.runFinalization(); + sum = Statics.MEM_USAGE_NATIVE.sum() - initial; + if (sum < allocationSize) { + // The memory must have been cleaned. + return; + } + } + assertThat(sum).isLessThan(allocationSize); + } + + private static void allocateAndForget(Fixture fixture, int size) { + var allocator = fixture.createAllocator(); + allocator.allocate(size); + allocator.close(); + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferCompactTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferCompactTest.java new file mode 100644 index 00000000000..f0c4840b428 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferCompactTest.java @@ -0,0 +1,65 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.internal.ResourceSupport; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static io.netty.buffer.api.internal.Statics.acquire; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferCompactTest extends BufferTestSupport { + + @ParameterizedTest + @MethodSource("allocators") + public void compactMustDiscardReadBytes(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(16)) { + buf.writeLong(0x0102030405060708L).writeInt(0x090A0B0C); + assertEquals(0x01020304, buf.readInt()); + assertEquals(12, buf.writerOffset()); + assertEquals(4, buf.readerOffset()); + assertEquals(4, buf.writableBytes()); + assertEquals(8, buf.readableBytes()); + assertEquals(16, buf.capacity()); + buf.compact(); + assertEquals(8, buf.writerOffset()); + assertEquals(0, buf.readerOffset()); + assertEquals(8, buf.writableBytes()); + assertEquals(8, buf.readableBytes()); + assertEquals(16, buf.capacity()); + assertEquals(0x05060708090A0B0CL, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void compactMustThrowForUnownedBuffer(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeLong(0x0102030405060708L); + assertEquals((byte) 0x01, buf.readByte()); + try (Buffer ignore = acquire((ResourceSupport) buf)) { + assertThrows(IllegalStateException.class, () -> buf.compact()); + assertEquals(1, buf.readerOffset()); + } + assertEquals((byte) 0x02, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferComponentIterationTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferComponentIterationTest.java new file mode 100644 index 00000000000..509c9437217 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferComponentIterationTest.java @@ -0,0 +1,387 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferClosedException; +import io.netty.buffer.api.BufferReadOnlyException; +import io.netty.buffer.api.ByteCursor; +import io.netty.buffer.api.CompositeBuffer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.nio.ByteBuffer; +import java.nio.ReadOnlyBufferException; +import java.util.LinkedList; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferComponentIterationTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("nonCompositeAllocators") + public void componentCountOfNonCompositeBufferMustBeOne(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThat(buf.countComponents()).isOne(); + } + } + + @ParameterizedTest + @MethodSource("nonCompositeAllocators") + public void readableComponentCountMustBeOneIfThereAreReadableBytes(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThat(buf.countReadableComponents()).isZero(); + buf.writeByte((byte) 1); + assertThat(buf.countReadableComponents()).isOne(); + } + } + + @ParameterizedTest + @MethodSource("nonCompositeAllocators") + public void writableComponentCountMustBeOneIfThereAreWritableBytes(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThat(buf.countWritableComponents()).isOne(); + buf.writeLong(1); + assertThat(buf.countWritableComponents()).isZero(); + } + } + + @Test + public void compositeBufferComponentCountMustBeTransitiveSum() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + Buffer buf; + try (Buffer a = allocator.allocate(8); + Buffer b = allocator.allocate(8); + Buffer c = allocator.allocate(8); + Buffer x = CompositeBuffer.compose(allocator, b.send(), c.send())) { + buf = CompositeBuffer.compose(allocator, a.send(), x.send()); + } + assertThat(buf.countComponents()).isEqualTo(3); + assertThat(buf.countReadableComponents()).isZero(); + assertThat(buf.countWritableComponents()).isEqualTo(3); + buf.writeInt(1); + assertThat(buf.countReadableComponents()).isOne(); + assertThat(buf.countWritableComponents()).isEqualTo(3); + buf.writeInt(1); + assertThat(buf.countReadableComponents()).isOne(); + assertThat(buf.countWritableComponents()).isEqualTo(2); + buf.writeInt(1); + assertThat(buf.countReadableComponents()).isEqualTo(2); + assertThat(buf.countWritableComponents()).isEqualTo(2); + buf.writeInt(1); + assertThat(buf.countReadableComponents()).isEqualTo(2); + assertThat(buf.countWritableComponents()).isOne(); + buf.writeInt(1); + assertThat(buf.countReadableComponents()).isEqualTo(3); + assertThat(buf.countWritableComponents()).isOne(); + buf.writeInt(1); + assertThat(buf.countReadableComponents()).isEqualTo(3); + assertThat(buf.countWritableComponents()).isZero(); + } + } + + @ParameterizedTest + @MethodSource("nonCompositeAllocators") + public void forEachReadableMustVisitBuffer(Fixture fixture) { + long value = 0x0102030405060708L; + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer bufBERW = allocator.allocate(8).writeLong(value); + Buffer bufBERO = allocator.allocate(8).writeLong(value).makeReadOnly()) { + verifyForEachReadableSingleComponent(fixture, bufBERW); + verifyForEachReadableSingleComponent(fixture, bufBERO); + } + } + + @Test + public void forEachReadableMustVisitAllReadableConstituentBuffersInOrder() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + Buffer composite; + try (Buffer a = allocator.allocate(4); + Buffer b = allocator.allocate(4); + Buffer c = allocator.allocate(4)) { + a.writeInt(1); + b.writeInt(2); + c.writeInt(3); + composite = CompositeBuffer.compose(allocator, a.send(), b.send(), c.send()); + } + var list = new LinkedList(List.of(1, 2, 3)); + int count = composite.forEachReadable(0, (index, component) -> { + var buffer = component.readableBuffer(); + int bufferValue = buffer.getInt(); + assertEquals(list.pollFirst().intValue(), bufferValue); + assertEquals(bufferValue, index + 1); + assertThrows(ReadOnlyBufferException.class, () -> buffer.put(0, (byte) 0xFF)); + return true; + }); + assertEquals(3, count); + assertThat(list).isEmpty(); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachReadableMustReturnNegativeCountWhenProcessorReturnsFalse(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeLong(0x0102030405060708L); + int count = buf.forEachReadable(0, (index, component) -> false); + assertEquals(-1, count); + } + } + + @Test + public void forEachReadableMustStopIterationWhenProcessorReturnsFalse() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + Buffer composite; + try (Buffer a = allocator.allocate(4); + Buffer b = allocator.allocate(4); + Buffer c = allocator.allocate(4)) { + a.writeInt(1); + b.writeInt(2); + c.writeInt(3); + composite = CompositeBuffer.compose(allocator, a.send(), b.send(), c.send()); + } + int readPos = composite.readerOffset(); + int writePos = composite.writerOffset(); + var list = new LinkedList(List.of(1, 2, 3)); + int count = composite.forEachReadable(0, (index, component) -> { + var buffer = component.readableBuffer(); + int bufferValue = buffer.getInt(); + assertEquals(list.pollFirst().intValue(), bufferValue); + assertEquals(bufferValue, index + 1); + return false; + }); + assertEquals(-1, count); + assertThat(list).containsExactly(2, 3); + assertEquals(readPos, composite.readerOffset()); + assertEquals(writePos, composite.writerOffset()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachReadableOnClosedBufferMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + var buf = allocator.allocate(8); + buf.writeLong(0); + buf.close(); + assertThrows(BufferClosedException.class, () -> buf.forEachReadable(0, (component, index) -> true)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachReadableMustAllowCollectingBuffersInArray(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Buffer buf; + try (Buffer a = allocator.allocate(4); + Buffer b = allocator.allocate(4); + Buffer c = allocator.allocate(4)) { + buf = CompositeBuffer.compose(allocator, a.send(), b.send(), c.send()); + } + int i = 1; + while (buf.writableBytes() > 0) { + buf.writeByte((byte) i++); + } + ByteBuffer[] buffers = new ByteBuffer[buf.countReadableComponents()]; + buf.forEachReadable(0, (index, component) -> { + buffers[index] = component.readableBuffer(); + return true; + }); + i = 1; + assertThat(buffers.length).isGreaterThanOrEqualTo(1); + for (ByteBuffer buffer : buffers) { + while (buffer.hasRemaining()) { + assertEquals((byte) i++, buffer.get()); + } + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachReadableMustExposeByteCursors(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(32)) { + buf.writeLong(0x0102030405060708L); + buf.writeLong(0x1112131415161718L); + assertEquals(0x01020304, buf.readInt()); + try (Buffer actualData = allocator.allocate(buf.readableBytes()); + Buffer expectedData = allocator.allocate(12)) { + expectedData.writeInt(0x05060708); + expectedData.writeInt(0x11121314); + expectedData.writeInt(0x15161718); + + buf.forEachReadable(0, (i, component) -> { + ByteCursor forward = component.openCursor(); + while (forward.readByte()) { + actualData.writeByte(forward.getByte()); + } + return true; + }); + + assertEquals(expectedData.readableBytes(), actualData.readableBytes()); + while (expectedData.readableBytes() > 0) { + assertEquals(expectedData.readByte(), actualData.readByte()); + } + } + } + } + + @ParameterizedTest + @MethodSource("nonCompositeAllocators") + public void forEachWritableMustVisitBuffer(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer bufBERW = allocator.allocate(8)) { + verifyForEachWritableSingleComponent(fixture, bufBERW); + } + } + + @Test + public void forEachWritableMustVisitAllWritableConstituentBuffersInOrder() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + Buffer buf; + try (Buffer a = allocator.allocate(8); + Buffer b = allocator.allocate(8); + Buffer c = allocator.allocate(8)) { + buf = CompositeBuffer.compose(allocator, a.send(), b.send(), c.send()); + } + buf.forEachWritable(0, (index, component) -> { + component.writableBuffer().putLong(0x0102030405060708L + 0x1010101010101010L * index); + return true; + }); + buf.writerOffset(3 * 8); + assertEquals(0x0102030405060708L, buf.readLong()); + assertEquals(0x1112131415161718L, buf.readLong()); + assertEquals(0x2122232425262728L, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachWritableMustReturnNegativeCountWhenProcessorReturnsFalse(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int count = buf.forEachWritable(0, (index, component) -> false); + assertEquals(-1, count); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachWritableMustStopIterationWhenProcessorRetursFalse(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + AtomicInteger counter = new AtomicInteger(); + buf.forEachWritable(0, (index, component) -> { + counter.incrementAndGet(); + return false; + }); + assertEquals(1, counter.get()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachWritableChangesMadeToByteBufferComponentMustBeReflectedInBuffer(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(9)) { + buf.writeByte((byte) 0xFF); + AtomicInteger writtenCounter = new AtomicInteger(); + buf.forEachWritable(0, (index, component) -> { + var buffer = component.writableBuffer(); + while (buffer.hasRemaining()) { + buffer.put((byte) writtenCounter.incrementAndGet()); + } + return true; + }); + buf.writerOffset(9); + assertEquals((byte) 0xFF, buf.readByte()); + assertEquals(0x0102030405060708L, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void changesMadeToByteBufferComponentsShouldBeReflectedInBuffer(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + AtomicInteger counter = new AtomicInteger(); + buf.forEachWritable(0, (index, component) -> { + var buffer = component.writableBuffer(); + while (buffer.hasRemaining()) { + buffer.put((byte) counter.incrementAndGet()); + } + return true; + }); + buf.writerOffset(buf.capacity()); + for (int i = 0; i < 8; i++) { + assertEquals((byte) i + 1, buf.getByte(i)); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachWritableOnClosedBufferMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Buffer buf = allocator.allocate(8); + buf.close(); + assertThrows(BufferClosedException.class, () -> buf.forEachWritable(0, (index, component) -> true)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachWritableOnReadOnlyBufferMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8).makeReadOnly()) { + assertThrows(BufferReadOnlyException.class, () -> buf.forEachWritable(0, (index, component) -> true)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void forEachWritableMustAllowCollectingBuffersInArray(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + ByteBuffer[] buffers = new ByteBuffer[buf.countWritableComponents()]; + buf.forEachWritable(0, (index, component) -> { + buffers[index] = component.writableBuffer(); + return true; + }); + assertThat(buffers.length).isGreaterThanOrEqualTo(1); + int i = 1; + for (ByteBuffer buffer : buffers) { + while (buffer.hasRemaining()) { + buffer.put((byte) i++); + } + } + buf.writerOffset(buf.capacity()); + i = 1; + while (buf.readableBytes() > 0) { + assertEquals((byte) i++, buf.readByte()); + } + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferCompositionTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferCompositionTest.java new file mode 100644 index 00000000000..1736ebbfdfc --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferCompositionTest.java @@ -0,0 +1,566 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferClosedException; +import io.netty.buffer.api.BufferReadOnlyException; +import io.netty.buffer.api.CompositeBuffer; +import io.netty.buffer.api.Send; +import io.netty.buffer.api.internal.ResourceSupport; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static io.netty.buffer.api.internal.Statics.acquire; +import static io.netty.buffer.api.internal.Statics.isOwned; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class BufferCompositionTest extends BufferTestSupport { + @Test + public void compositeBuffersCannotHaveDuplicateComponents() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + Send a = allocator.allocate(4).send(); + var e = assertThrows(IllegalStateException.class, () -> CompositeBuffer.compose(allocator, a, a)); + assertThat(e).hasMessageContaining("already been received"); + + Send b = allocator.allocate(4).send(); + try (CompositeBuffer composite = CompositeBuffer.compose(allocator, b)) { + e = assertThrows(IllegalStateException.class, () -> composite.extendWith(b)); + assertThat(e).hasMessageContaining("already been received"); + } + } + } + + @Test + public void compositeBufferFromSends() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + assertEquals(24, composite.capacity()); + assertTrue(isOwned((ResourceSupport) composite)); + } + } + + @Test + public void compositeBufferMustNotBeAllowedToContainThemselves() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + CompositeBuffer bufA = CompositeBuffer.compose(allocator, allocator.allocate(4).send()); + Send sendA = bufA.send(); + try { + assertThrows(BufferClosedException.class, () -> bufA.extendWith(sendA)); + } finally { + sendA.close(); + } + + CompositeBuffer bufB = CompositeBuffer.compose(allocator, allocator.allocate(4).send()); + Send sendB = bufB.send(); + try (CompositeBuffer compositeBuffer = CompositeBuffer.compose(allocator, sendB)) { + assertThrows(IllegalStateException.class, () -> compositeBuffer.extendWith(sendB)); + } finally { + sendB.close(); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableOnCompositeBuffersMustRespectExistingBigEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Buffer composite; + try (Buffer a = allocator.allocate(4)) { + composite = CompositeBuffer.compose(allocator, a.send()); + } + try (composite) { + composite.writeInt(0x01020304); + composite.ensureWritable(4); + composite.writeInt(0x05060708); + assertEquals(0x0102030405060708L, composite.readLong()); + } + } + } + + @Test + public void extendOnNonCompositeBufferMustThrow() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer a = allocator.allocate(8); + Buffer b = allocator.allocate(8)) { + assertThrows(ClassCastException.class, () -> ((CompositeBuffer) a).extendWith(b.send())); + } + } + + @Test + public void extendingNonOwnedCompositeBufferMustThrow() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer a = allocator.allocate(8); + Buffer b = allocator.allocate(8); + CompositeBuffer composed = CompositeBuffer.compose(allocator, a.send())) { + try (Buffer ignore = acquire(composed)) { + var exc = assertThrows(IllegalStateException.class, () -> composed.extendWith(b.send())); + assertThat(exc).hasMessageContaining("owned"); + } + } + } + + @Test + public void extendingCompositeBufferWithItselfMustThrow() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + CompositeBuffer composite; + try (Buffer a = allocator.allocate(8)) { + composite = CompositeBuffer.compose(allocator, a.send()); + } + try (composite) { + assertThrows(BufferClosedException.class, () -> composite.extendWith(composite.send())); + } + } + } + + @Test + public void extendingWithZeroCapacityBufferHasNoEffect() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator)) { + composite.extendWith(CompositeBuffer.compose(allocator).send()); + assertThat(composite.capacity()).isZero(); + assertThat(composite.countComponents()).isZero(); + } + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + Buffer a = allocator.allocate(1); + CompositeBuffer composite = CompositeBuffer.compose(allocator, a.send()); + assertTrue(isOwned(composite)); + assertThat(composite.capacity()).isOne(); + assertThat(composite.countComponents()).isOne(); + try (Buffer b = CompositeBuffer.compose(allocator)) { + composite.extendWith(b.send()); + } + assertTrue(isOwned(composite)); + assertThat(composite.capacity()).isOne(); + assertThat(composite.countComponents()).isOne(); + } + } + + @Test + public void extendingCompositeBufferWithNullMustThrow() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator)) { + assertThrows(NullPointerException.class, () -> composite.extendWith(null)); + } + } + + @Test + public void extendingCompositeBufferMustIncreaseCapacityByGivenBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator)) { + assertThat(composite.capacity()).isZero(); + try (Buffer buf = allocator.allocate(8)) { + composite.extendWith(buf.send()); + } + assertThat(composite.capacity()).isEqualTo(8); + composite.writeLong(0x0102030405060708L); + assertThat(composite.readLong()).isEqualTo(0x0102030405060708L); + } + } + + @Test + public void emptyCompositeBufferMustAllowExtendingWithBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + try (CompositeBuffer composite = CompositeBuffer.compose(allocator)) { + try (Buffer b = allocator.allocate(8)) { + composite.extendWith(b.send()); + assertThat(composite.capacity()).isEqualTo(8); + } + } + } + } + + @Test + public void emptyCompositeBufferMustAllowExtendingWithReadOnlyBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + try (CompositeBuffer composite = CompositeBuffer.compose(allocator)) { + try (Buffer b = allocator.allocate(8).makeReadOnly()) { + composite.extendWith(b.send()); + assertTrue(composite.readOnly()); + } + } + } + } + + @Test + public void whenExtendingCompositeBufferWithWriteOffsetAtCapacityExtensionWriteOffsetCanBeNonZero() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + CompositeBuffer composite; + try (Buffer a = allocator.allocate(8)) { + composite = CompositeBuffer.compose(allocator, a.send()); + } + try (composite) { + composite.writeLong(0); + try (Buffer b = allocator.allocate(8)) { + b.writeInt(1); + composite.extendWith(b.send()); + assertThat(composite.capacity()).isEqualTo(16); + assertThat(composite.writerOffset()).isEqualTo(12); + } + } + } + } + + @Test + public void whenExtendingCompositeBufferWithWriteOffsetLessThanCapacityExtensionWriteOffsetMustZero() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + CompositeBuffer composite; + try (Buffer a = allocator.allocate(8)) { + composite = CompositeBuffer.compose(allocator, a.send()); + } + try (composite) { + composite.writeInt(0); + try (Buffer b = allocator.allocate(8)) { + b.writeInt(1); + var exc = assertThrows(IllegalArgumentException.class, + () -> composite.extendWith(b.send())); + assertThat(exc).hasMessageContaining("unwritten gap"); + } + try (Buffer b = allocator.allocate(8)) { + b.setInt(0, 1); + composite.extendWith(b.send()); + assertThat(composite.capacity()).isEqualTo(16); + assertThat(composite.writerOffset()).isEqualTo(4); + } + } + } + } + + @Test + public void whenExtendingCompositeBufferWithReadOffsetAtCapacityExtensionReadOffsetCanBeNonZero() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + CompositeBuffer composite; + try (Buffer a = allocator.allocate(8)) { + composite = CompositeBuffer.compose(allocator, a.send()); + } + try (composite) { + composite.writeLong(0); + composite.readLong(); + try (Buffer b = allocator.allocate(8)) { + b.writeInt(1); + b.readInt(); + composite.extendWith(b.send()); + assertThat(composite.capacity()).isEqualTo(16); + assertThat(composite.writerOffset()).isEqualTo(12); + } + } + } + } + + @Test + public void whenExtendingCompositeBufferWithReadOffsetLessThanCapacityExtensionReadOffsetMustZero() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, allocator.allocate(8).send())) { + composite.writeLong(0); + composite.readInt(); + + Buffer b = allocator.allocate(8); + b.writeInt(1); + b.readInt(); + var exc = assertThrows(IllegalArgumentException.class, + () -> composite.extendWith(b.send())); + assertThat(exc).hasMessageContaining("unread gap"); + assertThat(composite.capacity()).isEqualTo(8); + assertThat(composite.writerOffset()).isEqualTo(8); + assertThat(composite.readerOffset()).isEqualTo(4); + + composite.extendWith(allocator.allocate(8).writeInt(1).send()); + assertThat(composite.capacity()).isEqualTo(16); + assertThat(composite.writerOffset()).isEqualTo(12); + assertThat(composite.readerOffset()).isEqualTo(4); + } + } + + @Test + public void composingReadOnlyBuffersMustCreateReadOnlyCompositeBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer a = allocator.allocate(4).makeReadOnly(); + Buffer b = allocator.allocate(4).makeReadOnly(); + Buffer composite = CompositeBuffer.compose(allocator, a.send(), b.send())) { + assertTrue(composite.readOnly()); + verifyWriteInaccessible(composite, BufferReadOnlyException.class); + } + } + + @Test + public void composingReadOnlyAndWritableBuffersMustThrow() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + try (Buffer a = allocator.allocate(8).makeReadOnly(); + Buffer b = allocator.allocate(8)) { + assertThrows(IllegalArgumentException.class, + () -> CompositeBuffer.compose(allocator, a.send(), b.send())); + } + try (Buffer a = allocator.allocate(8).makeReadOnly(); + Buffer b = allocator.allocate(8)) { + assertThrows(IllegalArgumentException.class, + () -> CompositeBuffer.compose(allocator, b.send(), a.send())); + } + try (Buffer a = allocator.allocate(8).makeReadOnly(); + Buffer b = allocator.allocate(8); + Buffer c = allocator.allocate(8).makeReadOnly()) { + assertThrows(IllegalArgumentException.class, + () -> CompositeBuffer.compose(allocator, a.send(), b.send(), c.send())); + } + try (Buffer a = allocator.allocate(8).makeReadOnly(); + Buffer b = allocator.allocate(8); + Buffer c = allocator.allocate(8)) { + assertThrows(IllegalArgumentException.class, + () -> CompositeBuffer.compose(allocator, b.send(), a.send(), c.send())); + } + } + } + + @Test + public void compositeWritableBufferCannotBeExtendedWithReadOnlyBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + CompositeBuffer composite; + try (Buffer a = allocator.allocate(8)) { + composite = CompositeBuffer.compose(allocator, a.send()); + } + try (composite; Buffer b = allocator.allocate(8).makeReadOnly()) { + assertThrows(IllegalArgumentException.class, () -> composite.extendWith(b.send())); + } + } + } + + @Test + public void compositeReadOnlyBufferCannotBeExtendedWithWritableBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + CompositeBuffer composite; + try (Buffer a = allocator.allocate(8).makeReadOnly()) { + composite = CompositeBuffer.compose(allocator, a.send()); + } + try (composite; Buffer b = allocator.allocate(8)) { + assertThrows(IllegalArgumentException.class, () -> composite.extendWith(b.send())); + } + } + } + + @Test + public void splitComponentsFloorMustThrowOnOutOfBounds() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + assertThrows(IllegalArgumentException.class, () -> composite.splitComponentsFloor(-1)); + assertThrows(IllegalArgumentException.class, () -> composite.splitComponentsFloor(17)); + try (CompositeBuffer split = composite.splitComponentsFloor(16)) { + assertThat(split.capacity()).isEqualTo(16); + assertThat(composite.capacity()).isZero(); + } + } + } + + @Test + public void splitComponentsCeilMustThrowOnOutOfBounds() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + assertThrows(IllegalArgumentException.class, () -> composite.splitComponentsCeil(-1)); + assertThrows(IllegalArgumentException.class, () -> composite.splitComponentsCeil(17)); + try (CompositeBuffer split = composite.splitComponentsCeil(16)) { + assertThat(split.capacity()).isEqualTo(16); + assertThat(composite.capacity()).isZero(); + } + } + } + + @Test + public void splitComponentsFloorMustGiveEmptyBufferForOffsetInFirstComponent() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsFloor(4)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isZero(); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(16); + } + } + } + + @Test + public void splitComponentsFloorMustGiveEmptyBufferForOffsetLastByteInFirstComponent() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsFloor(7)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isZero(); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(16); + } + } + } + + @Test + public void splitComponentsFloorMustGiveBufferWithFirstComponentForOffsetInSecondComponent() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsFloor(12)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isEqualTo(8); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(8); + } + } + } + + @Test + public void splitComponentsFloorMustGiveBufferWithFirstComponentForOffsetOnFirstByteInSecondComponent() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsFloor(8)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isEqualTo(8); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(8); + } + } + } + + @Test + public void splitComponentsCeilMustGiveBufferWithFirstComponentForOffsetInFirstComponent() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsCeil(4)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isEqualTo(8); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(8); + } + } + } + + @Test + public void splitComponentsCeilMustGiveBufferWithFirstComponentFofOffsetOnLastByteInFirstComponent() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsCeil(7)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isEqualTo(8); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(8); + } + } + } + + @Test + public void splitComponentsCeilMustGiveBufferWithFirstAndSecondComponentForfOffsetInSecondComponent() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsCeil(12)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isEqualTo(16); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(0); + } + } + + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsCeil(12)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isEqualTo(16); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(8); + } + } + } + + @Test + public void splitComponentsCeilMustGiveBufferWithFirstComponentForfOffsetOnFirstByteInSecondComponent() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsCeil(7)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isEqualTo(8); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(8); + } + } + } + + @Test + public void splitComponentsCeilMustGiveEmptyBufferForOffsetOnFirstByteInFirstComponent() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + CompositeBuffer composite = CompositeBuffer.compose(allocator, + allocator.allocate(8).send(), + allocator.allocate(8).send())) { + try (CompositeBuffer split = composite.splitComponentsCeil(0)) { + assertTrue(isOwned(split)); + assertTrue(split.isAccessible()); + assertThat(split.capacity()).isZero(); + + assertTrue(isOwned(composite)); + assertTrue(composite.isAccessible()); + assertThat(composite.capacity()).isEqualTo(16); + } + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferDoubleOffsettedAccessorsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferDoubleOffsettedAccessorsTest.java new file mode 100644 index 00000000000..151e13d8b5d --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferDoubleOffsettedAccessorsTest.java @@ -0,0 +1,171 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferDoubleOffsettedAccessorsTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getDouble(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getDouble(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.writeDouble(value); + assertEquals(value, buf.getDouble(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.writeDouble(value); + buf.setByte(0, (byte) 0x10); + assertEquals(Double.longBitsToDouble(0x1002030405060708L), buf.getDouble(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.writeDouble(value); + assertThrows(IndexOutOfBoundsException.class, () -> buf.getDouble(1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.writeDouble(value); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getDouble(1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getDouble(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getDouble(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getDouble(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfDoubleReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getDouble(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfDoubleMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + double value = Double.longBitsToDouble(0x0102030405060708L); + assertThrows(IndexOutOfBoundsException.class, () -> buf.setDouble(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfDoubleMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + double value = Double.longBitsToDouble(0x0102030405060708L); + assertThrows(IndexOutOfBoundsException.class, () -> buf.setDouble(1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfDoubleMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.setDouble(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x05, buf.readByte()); + assertEquals((byte) 0x06, buf.readByte()); + assertEquals((byte) 0x07, buf.readByte()); + assertEquals((byte) 0x08, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferEnsureWritableTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferEnsureWritableTest.java new file mode 100644 index 00000000000..ccf45569633 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferEnsureWritableTest.java @@ -0,0 +1,147 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.CompositeBuffer; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferEnsureWritableTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableMustThrowForNegativeSize(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IllegalArgumentException.class, () -> buf.ensureWritable(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableMustThrowIfRequestedSizeWouldGrowBeyondMaxAllowed(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IllegalArgumentException.class, () -> buf.ensureWritable(Integer.MAX_VALUE - 7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableMustNotThrowWhenSpaceIsAlreadyAvailable(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.ensureWritable(8); + buf.writeLong(1); + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeByte((byte) 1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableMustExpandBufferCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThat(buf.writableBytes()).isEqualTo(8); + buf.writeLong(0x0102030405060708L); + assertThat(buf.writableBytes()).isEqualTo(0); + buf.ensureWritable(8); + assertThat(buf.writableBytes()).isGreaterThanOrEqualTo(8); + assertThat(buf.capacity()).isGreaterThanOrEqualTo(16); + buf.writeLong(0xA1A2A3A4A5A6A7A8L); + assertThat(buf.readableBytes()).isEqualTo(16); + assertThat(buf.readLong()).isEqualTo(0x0102030405060708L); + assertThat(buf.readLong()).isEqualTo(0xA1A2A3A4A5A6A7A8L); + assertThrows(IndexOutOfBoundsException.class, buf::readByte); + // Is it implementation dependent if the capacity increased by *exactly* the requested size, or more. + } + } + + @Test + public void ensureWritableMustExpandCapacityOfEmptyCompositeBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer buf = CompositeBuffer.compose(allocator)) { + assertThat(buf.writableBytes()).isEqualTo(0); + buf.ensureWritable(8); + assertThat(buf.writableBytes()).isGreaterThanOrEqualTo(8); + buf.writeLong(0xA1A2A3A4A5A6A7A8L); + assertThat(buf.readableBytes()).isEqualTo(8); + assertThat(buf.readLong()).isEqualTo(0xA1A2A3A4A5A6A7A8L); + assertThrows(IndexOutOfBoundsException.class, buf::readByte); + // Is it implementation dependent if the capacity increased by *exactly* the requested size, or more. + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void mustBeAbleToCopyAfterEnsureWritable(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(4)) { + buf.ensureWritable(8); + assertThat(buf.writableBytes()).isGreaterThanOrEqualTo(8); + assertThat(buf.capacity()).isGreaterThanOrEqualTo(8); + buf.writeLong(0x0102030405060708L); + try (Buffer copy = buf.copy()) { + long actual = copy.readLong(); + assertEquals(0x0102030405060708L, actual); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableWithCompactionMustNotAllocateIfCompactionIsEnough(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(64)) { + while (buf.writableBytes() > 0) { + buf.writeByte((byte) 42); + } + while (buf.readableBytes() > 0) { + buf.readByte(); + } + buf.ensureWritable(4, 4, true); + buf.writeInt(42); + assertThat(buf.capacity()).isEqualTo(64); + + buf.writerOffset(60).readerOffset(60); + buf.ensureWritable(8, 8, true); + buf.writeLong(42); + // Don't assert the capacity on this one, because single-component + // composite buffers may choose to allocate rather than compact. + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableWithLargeMinimumGrowthMustGrowByAtLeastThatMuch(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(16)) { + buf.writeLong(0).writeInt(0); + buf.readLong(); + buf.readInt(); // Compaction is now possible as well. + buf.ensureWritable(8, 32, true); // We don't need to allocate. + assertThat(buf.capacity()).isEqualTo(16); + buf.writeByte((byte) 1); + buf.ensureWritable(16, 32, true); // Now we DO need to allocate, because we can't compact. + assertThat(buf.capacity()).isEqualTo(16 /* existing capacity */ + 32 /* minimum growth */); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferFloatOffsettedAccessorsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferFloatOffsettedAccessorsTest.java new file mode 100644 index 00000000000..0c6369225e6 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferFloatOffsettedAccessorsTest.java @@ -0,0 +1,190 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferFloatOffsettedAccessorsTest extends BufferTestSupport { + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getFloat(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getFloat(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + float value = Float.intBitsToFloat(0x01020304); + buf.writeFloat(value); + assertEquals(value, buf.getFloat(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + float value = Float.intBitsToFloat(0x01020304); + buf.writeFloat(value); + buf.setByte(0, (byte) 0x10); + assertEquals(Float.intBitsToFloat(0x10020304), buf.getFloat(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + float value = Float.intBitsToFloat(0x01020304); + buf.writeFloat(value); + buf.getFloat(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getFloat(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatReadOnlyMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + float value = Float.intBitsToFloat(0x01020304); + buf.writeFloat(value); + buf.makeReadOnly().getFloat(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getFloat(5)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getFloat(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getFloat(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getFloat(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfFloatReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThan(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getFloat(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfFloatMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + float value = Float.intBitsToFloat(0x01020304); + assertThrows(IndexOutOfBoundsException.class, () -> buf.setFloat(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfFloatMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + float value = Float.intBitsToFloat(0x01020304); + assertThrows(IndexOutOfBoundsException.class, () -> buf.setFloat(5, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfFloatMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + float value = Float.intBitsToFloat(0x01020304); + buf.setFloat(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferIntOffsettedAccessorsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferIntOffsettedAccessorsTest.java new file mode 100644 index 00000000000..95b62466962 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferIntOffsettedAccessorsTest.java @@ -0,0 +1,337 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferIntOffsettedAccessorsTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getInt(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getInt(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01020304; + buf.writeInt(value); + assertEquals(value, buf.getInt(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01020304; + buf.writeInt(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x10020304, buf.getInt(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01020304; + buf.writeInt(value); + assertThrows(IndexOutOfBoundsException.class, () -> buf.getInt(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01020304; + buf.writeInt(value); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getInt(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getInt(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getInt(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getInt(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfIntReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getInt(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedInt(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedInt(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x01020304; + buf.writeUnsignedInt(value); + assertEquals(value, buf.getUnsignedInt(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x01020304; + buf.writeUnsignedInt(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x10020304, buf.getUnsignedInt(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x01020304; + buf.writeUnsignedInt(value); + buf.getUnsignedInt(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedInt(5)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntReadOnlyMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset( + Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x01020304; + buf.writeUnsignedInt(value); + buf.makeReadOnly().getUnsignedInt(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity( + Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedInt(5)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getUnsignedInt(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedInt(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getUnsignedInt(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedIntReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedInt(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfIntMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x01020304; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setInt(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfIntMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x01020304; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setInt(5, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfIntMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01020304; + buf.setInt(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedIntMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + long value = 0x01020304; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setUnsignedInt(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedIntMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + long value = 0x01020304; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setUnsignedInt(5, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedIntMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x01020304; + buf.setUnsignedInt(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferLifeCycleTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferLifeCycleTest.java new file mode 100644 index 00000000000..3e3e990aa95 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferLifeCycleTest.java @@ -0,0 +1,676 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferClosedException; +import io.netty.buffer.api.CompositeBuffer; +import io.netty.buffer.api.internal.ResourceSupport; +import io.netty.util.internal.EmptyArrays; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.concurrent.Future; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.Supplier; + +import static io.netty.buffer.api.internal.Statics.acquire; +import static io.netty.buffer.api.internal.Statics.isOwned; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class BufferLifeCycleTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + void allocateAndAccessingBuffer(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeByte((byte) 1); + buf.writeByte((byte) 2); + try (Buffer inner = acquire((ResourceSupport) buf)) { + inner.writeByte((byte) 3); + inner.writeByte((byte) 4); + inner.writeByte((byte) 5); + inner.writeByte((byte) 6); + inner.writeByte((byte) 7); + inner.writeByte((byte) 8); + var re = assertThrows(RuntimeException.class, () -> inner.writeByte((byte) 9)); + assertThat(re).hasMessageContaining("bound"); + re = assertThrows(RuntimeException.class, () -> inner.writeByte((byte) 9)); + assertThat(re).hasMessageContaining("bound"); + re = assertThrows(RuntimeException.class, () -> buf.writeByte((byte) 9)); + assertThat(re).hasMessageContaining("bound"); + } + assertEquals((byte) 1, buf.readByte()); + assertEquals((byte) 2, buf.readByte()); + assertEquals((byte) 3, buf.readByte()); + assertEquals((byte) 4, buf.readByte()); + assertEquals((byte) 5, buf.readByte()); + assertEquals((byte) 6, buf.readByte()); + assertEquals((byte) 7, buf.readByte()); + assertEquals((byte) 8, buf.readByte()); + var re = assertThrows(RuntimeException.class, buf::readByte); + assertThat(re).hasMessageContaining("bound"); + assertThat(toByteArray(buf)).containsExactly(1, 2, 3, 4, 5, 6, 7, 8); + } + } + + @ParameterizedTest + @MethodSource("initialCombinations") + public void allocatingZeroSizedBuffer(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Supplier supplier = allocator.constBufferSupplier(EmptyArrays.EMPTY_BYTES); + + try (Buffer empty = supplier.get()) { + assertThat(empty.capacity()).isZero(); + assertTrue(empty.readOnly()); + } + + try (Buffer empty = allocator.allocate(0)) { + assertThat(empty.capacity()).isZero(); + empty.ensureWritable(8); + assertThat(empty.capacity()).isGreaterThanOrEqualTo(8); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + void acquireOnClosedBufferMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + var buf = allocator.allocate(8); + buf.close(); + assertThrows(BufferClosedException.class, () -> acquire((ResourceSupport) buf)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void bufferShouldNotBeAccessibleAfterClose(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Buffer buf = allocator.allocate(24); + buf.writeLong(42); + buf.close(); + verifyInaccessible(buf); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void bufferMustNotBeThreadConfined(Fixture fixture) throws Exception { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeInt(42); + Future fut = executor.submit(() -> buf.readInt()); + assertEquals(42, fut.get()); + fut = executor.submit(() -> { + buf.writeInt(32); + return buf.readInt(); + }); + assertEquals(32, fut.get()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithoutOffsetAndSizeMustReturnReadableRegion(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + for (byte b : new byte[] { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 }) { + buf.writeByte(b); + } + assertEquals(0x01, buf.readByte()); + buf.writerOffset(buf.writerOffset() - 1); + try (Buffer copy = buf.copy()) { + assertThat(toByteArray(copy)).containsExactly(0x02, 0x03, 0x04, 0x05, 0x06, 0x07); + assertEquals(0, copy.readerOffset()); + assertEquals(6, copy.readableBytes()); + assertEquals(6, copy.writerOffset()); + assertEquals(6, copy.capacity()); + assertEquals(0x02, copy.readByte()); + assertEquals(0x03, copy.readByte()); + assertEquals(0x04, copy.readByte()); + assertEquals(0x05, copy.readByte()); + assertEquals(0x06, copy.readByte()); + assertEquals(0x07, copy.readByte()); + assertThrows(IndexOutOfBoundsException.class, copy::readByte); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithOffsetAndSizeMustReturnGivenRegion(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + for (byte b : new byte[] { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08 }) { + buf.writeByte(b); + } + buf.readerOffset(3); // Reader and writer offsets must be ignored. + buf.writerOffset(6); + try (Buffer copy = buf.copy(1, 6)) { + assertThat(toByteArray(copy)).containsExactly(0x02, 0x03, 0x04, 0x05, 0x06, 0x07); + assertEquals(0, copy.readerOffset()); + assertEquals(6, copy.readableBytes()); + assertEquals(6, copy.writerOffset()); + assertEquals(6, copy.capacity()); + assertEquals(0x02, copy.readByte()); + assertEquals(0x03, copy.readByte()); + assertEquals(0x04, copy.readByte()); + assertEquals(0x05, copy.readByte()); + assertEquals(0x06, copy.readByte()); + assertEquals(0x07, copy.readByte()); + assertThrows(IndexOutOfBoundsException.class, copy::readByte); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithoutOffsetAndSizeMustNotInfluenceOwnership(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + try (Buffer copy = buf.copy()) { + assertTrue(isOwned((ResourceSupport) buf)); + assertTrue(isOwned((ResourceSupport) copy)); + copy.send().close(); + } + assertTrue(isOwned((ResourceSupport) buf)); + buf.send().close(); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithOffsetAndSizeMustNotInfluenceOwnership(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + try (Buffer copy = buf.copy(0, 8)) { + assertTrue(isOwned((ResourceSupport) buf)); + assertTrue(isOwned((ResourceSupport) copy)); + copy.send().close(); + } + assertTrue(isOwned((ResourceSupport) buf)); + buf.send().close(); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithoutOffsetAndSizeHasSameEndianAsParent(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeLong(0x0102030405060708L); + try (Buffer copy = buf.copy()) { + assertEquals(0x0102030405060708L, copy.readLong()); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithOffsetAndSizeHasSameEndianAsParent(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeLong(0x0102030405060708L); + try (Buffer copy = buf.copy(0, 8)) { + assertEquals(0x0102030405060708L, copy.readLong()); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + void sendOnCopyWithoutOffsetAndSizeMustNotThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + try (Buffer copy = buf.copy()) { + assertTrue(isOwned((ResourceSupport) buf)); + copy.send().close(); + } + // Verify that the copy is closed properly afterwards. + assertTrue(isOwned((ResourceSupport) buf)); + buf.send().receive().close(); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void sendOnCopyWithOffsetAndSizeMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + try (Buffer copy = buf.copy(0, 8)) { + assertTrue(isOwned((ResourceSupport) buf)); + copy.send().close(); + } + // Verify that the copy is closed properly afterwards. + assertTrue(isOwned((ResourceSupport) buf)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithNegativeOffsetMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.copy(-1, 1)); + // Verify that the copy is closed properly afterwards. + assertTrue(isOwned((ResourceSupport) buf)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithNegativeSizeMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IllegalArgumentException.class, () -> buf.copy(0, -1)); + assertThrows(IllegalArgumentException.class, () -> buf.copy(2, -1)); + // Verify that the copy is closed properly afterwards. + assertTrue(isOwned((ResourceSupport) buf)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithSizeGreaterThanCapacityMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.copy(0, 9)); + buf.copy(0, 8).close(); // This is still fine. + assertThrows(IndexOutOfBoundsException.class, () -> buf.copy(1, 8)); + // Verify that the copy is closed properly afterwards. + assertTrue(isOwned((ResourceSupport) buf)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyWithZeroSizeMustBeAllowed(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.copy(0, 0).close(); // This is fine. + // Verify that the copy is closed properly afterwards. + assertTrue(isOwned((ResourceSupport) buf)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void copyMustBeOwned(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Buffer buf = allocator.allocate(8); + buf.writeInt(42); + try (Buffer copy = buf.copy()) { + assertTrue(isOwned((ResourceSupport) copy)); + assertTrue(isOwned((ResourceSupport) buf)); + buf.close(); + assertFalse(buf.isAccessible()); + assertTrue(isOwned((ResourceSupport) copy)); + try (Buffer receive = copy.send().receive()) { + assertTrue(isOwned((ResourceSupport) receive)); + assertFalse(copy.isAccessible()); + } + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void copyOfLastByte(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8).writeLong(0x0102030405060708L); + Buffer copy = buf.copy(7, 1)) { + assertThat(copy.capacity()).isOne(); + assertEquals((byte) 0x08, copy.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void pooledBuffersMustResetStateBeforeReuse(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer expected = allocator.allocate(8)) { + for (int i = 0; i < 10; i++) { + try (Buffer buf = allocator.allocate(8)) { + assertEquals(expected.capacity(), buf.capacity()); + assertEquals(expected.readableBytes(), buf.readableBytes()); + assertEquals(expected.readerOffset(), buf.readerOffset()); + assertEquals(expected.writableBytes(), buf.writableBytes()); + assertEquals(expected.writerOffset(), buf.writerOffset()); + byte[] bytes = new byte[8]; + buf.copyInto(0, bytes, 0, 8); + assertThat(bytes).containsExactly(0, 0, 0, 0, 0, 0, 0, 0); + + var tlr = ThreadLocalRandom.current(); + for (int j = 0; j < tlr.nextInt(0, 8); j++) { + buf.writeByte((byte) 1); + } + if (buf.readableBytes() > 0) { + for (int j = 0; j < tlr.nextInt(0, buf.readableBytes()); j++) { + buf.readByte(); + } + } + } + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitWithNegativeOffsetMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.split(0).close(); + assertThrows(IllegalArgumentException.class, () -> buf.split(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitWithOversizedOffsetMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IllegalArgumentException.class, () -> buf.split(9)); + buf.split(8).close(); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitOfNonOwnedBufferMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeInt(1); + try (Buffer acquired = acquire((ResourceSupport) buf)) { + var exc = assertThrows(IllegalStateException.class, () -> acquired.split()); + assertThat(exc).hasMessageContaining("owned"); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitOnOffsetOfNonOwnedBufferMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + try (Buffer acquired = acquire((ResourceSupport) buf)) { + var exc = assertThrows(IllegalStateException.class, () -> acquired.split(4)); + assertThat(exc).hasMessageContaining("owned"); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitOnOffsetMustTruncateGreaterOffsets(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeInt(0x01020304); + buf.writeByte((byte) 0x05); + buf.readInt(); + try (Buffer split = buf.split(2)) { + assertThat(buf.readerOffset()).isEqualTo(2); + assertThat(buf.writerOffset()).isEqualTo(3); + + assertThat(split.readerOffset()).isEqualTo(2); + assertThat(split.writerOffset()).isEqualTo(2); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitOnOffsetMustExtendLesserOffsets(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeInt(0x01020304); + buf.readInt(); + try (Buffer split = buf.split(6)) { + assertThat(buf.readerOffset()).isEqualTo(0); + assertThat(buf.writerOffset()).isEqualTo(0); + + assertThat(split.readerOffset()).isEqualTo(4); + assertThat(split.writerOffset()).isEqualTo(4); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitPartMustContainFirstHalfOfBuffer(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(16)) { + buf.writeLong(0x0102030405060708L); + assertThat(buf.readByte()).isEqualTo((byte) 0x01); + try (Buffer split = buf.split()) { + // Original buffer: + assertThat(buf.capacity()).isEqualTo(8); + assertThat(buf.readerOffset()).isZero(); + assertThat(buf.writerOffset()).isZero(); + assertThat(buf.readableBytes()).isZero(); + assertThrows(IndexOutOfBoundsException.class, () -> buf.readByte()); + + // Split part: + assertThat(split.capacity()).isEqualTo(8); + assertThat(split.readerOffset()).isOne(); + assertThat(split.writerOffset()).isEqualTo(8); + assertThat(split.readableBytes()).isEqualTo(7); + assertThat(split.readByte()).isEqualTo((byte) 0x02); + assertThat(split.readInt()).isEqualTo(0x03040506); + assertThat(split.readByte()).isEqualTo((byte) 0x07); + assertThat(split.readByte()).isEqualTo((byte) 0x08); + assertThrows(IndexOutOfBoundsException.class, () -> split.readByte()); + } + + // Split part does NOT return when closed: + assertThat(buf.capacity()).isEqualTo(8); + assertThat(buf.readerOffset()).isZero(); + assertThat(buf.writerOffset()).isZero(); + assertThat(buf.readableBytes()).isZero(); + assertThrows(IndexOutOfBoundsException.class, () -> buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitPartsMustBeIndividuallySendable(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(16)) { + buf.writeLong(0x0102030405060708L); + assertThat(buf.readByte()).isEqualTo((byte) 0x01); + try (Buffer sentSplit = buf.split().send().receive()) { + try (Buffer sentBuf = buf.send().receive()) { + assertThat(sentBuf.capacity()).isEqualTo(8); + assertThat(sentBuf.readerOffset()).isZero(); + assertThat(sentBuf.writerOffset()).isZero(); + assertThat(sentBuf.readableBytes()).isZero(); + assertThrows(IndexOutOfBoundsException.class, () -> sentBuf.readByte()); + } + + assertThat(sentSplit.capacity()).isEqualTo(8); + assertThat(sentSplit.readerOffset()).isOne(); + assertThat(sentSplit.writerOffset()).isEqualTo(8); + assertThat(sentSplit.readableBytes()).isEqualTo(7); + assertThat(sentSplit.readByte()).isEqualTo((byte) 0x02); + assertThat(sentSplit.readInt()).isEqualTo(0x03040506); + assertThat(sentSplit.readByte()).isEqualTo((byte) 0x07); + assertThat(sentSplit.readByte()).isEqualTo((byte) 0x08); + assertThrows(IndexOutOfBoundsException.class, () -> sentSplit.readByte()); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void mustBePossibleToSplitMoreThanOnce(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(16)) { + buf.writeLong(0x0102030405060708L); + try (Buffer a = buf.split()) { + a.writerOffset(4); + try (Buffer b = a.split()) { + assertEquals(0x01020304, b.readInt()); + a.writerOffset(4); + assertEquals(0x05060708, a.readInt()); + assertThrows(IndexOutOfBoundsException.class, () -> b.readByte()); + assertThrows(IndexOutOfBoundsException.class, () -> a.readByte()); + buf.writeLong(0xA1A2A3A4A5A6A7A8L); + buf.writerOffset(4); + try (Buffer c = buf.split()) { + assertEquals(0xA1A2A3A4, c.readInt()); + buf.writerOffset(4); + assertEquals(0xA5A6A7A8, buf.readInt()); + assertThrows(IndexOutOfBoundsException.class, () -> c.readByte()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.readByte()); + } + } + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void mustBePossibleToSplitCopies(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Buffer buf = allocator.allocate(16); + buf.writeLong(0x0102030405060708L); + try (Buffer copy = buf.copy()) { + buf.close(); + assertTrue(isOwned((ResourceSupport) copy)); + try (Buffer split = copy.split(4)) { + split.resetOffsets().ensureWritable(Long.BYTES); + copy.resetOffsets().ensureWritable(Long.BYTES); + assertThat(split.capacity()).isEqualTo(Long.BYTES); + assertThat(copy.capacity()).isEqualTo(Long.BYTES); + assertThat(split.getLong(0)).isEqualTo(0x01020304_00000000L); + assertThat(copy.getLong(0)).isEqualTo(0x05060708_00000000L); + } + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableOnSplitBuffers(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeLong(0x0102030405060708L); + try (Buffer a = buf.split()) { + assertEquals(0x0102030405060708L, a.readLong()); + a.ensureWritable(8); + a.writeLong(0xA1A2A3A4A5A6A7A8L); + assertEquals(0xA1A2A3A4A5A6A7A8L, a.readLong()); + + buf.ensureWritable(8); + buf.writeLong(0xA1A2A3A4A5A6A7A8L); + assertEquals(0xA1A2A3A4A5A6A7A8L, buf.readLong()); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableOnSplitBuffersWithOddOffsets(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(10)) { + buf.writeLong(0x0102030405060708L); + buf.writeByte((byte) 0x09); + buf.readByte(); + try (Buffer a = buf.split()) { + assertEquals(0x0203040506070809L, a.readLong()); + a.ensureWritable(8); + a.writeLong(0xA1A2A3A4A5A6A7A8L); + assertEquals(0xA1A2A3A4A5A6A7A8L, a.readLong()); + + buf.ensureWritable(8); + buf.writeLong(0xA1A2A3A4A5A6A7A8L); + assertEquals(0xA1A2A3A4A5A6A7A8L, buf.readLong()); + } + } + } + + @Test + public void splitOnEmptyCompositeBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer buf = CompositeBuffer.compose(allocator)) { + verifySplitEmptyCompositeBuffer(buf); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitBuffersMustBeAccessibleInOtherThreads(Fixture fixture) throws Exception { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeInt(42); + var send = buf.split().send(); + var fut = executor.submit(() -> { + try (Buffer receive = send.receive()) { + assertEquals(42, receive.readInt()); + receive.readerOffset(0).writerOffset(0).writeInt(24); + assertEquals(24, receive.readInt()); + } + }); + fut.get(); + buf.writeInt(32); + assertEquals(32, buf.readInt()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void acquireOfReadOnlyBufferMustBeReadOnly(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly(); + try (Buffer acquire = acquire((ResourceSupport) buf)) { + assertTrue(acquire.readOnly()); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void splitOfReadOnlyBufferMustBeReadOnly(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(16)) { + buf.writeLong(0x0102030405060708L); + buf.makeReadOnly(); + try (Buffer split = buf.split()) { + assertTrue(split.readOnly()); + assertTrue(buf.readOnly()); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void allocatingOnClosedAllocatorMustThrow(Fixture fixture) { + BufferAllocator allocator = fixture.createAllocator(); + Supplier supplier = allocator.constBufferSupplier(new byte[8]); + allocator.close(); + assertThrows(IllegalStateException.class, () -> allocator.allocate(8)); + assertThrows(IllegalStateException.class, () -> allocator.constBufferSupplier(EmptyArrays.EMPTY_BYTES)); + assertThrows(IllegalStateException.class, () -> allocator.constBufferSupplier(new byte[8])); + // Existing const suppliers continue to work because they hold on to static memory allocation. + supplier.get().close(); + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferLongOffsettedAccessorsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferLongOffsettedAccessorsTest.java new file mode 100644 index 00000000000..1df9eafd400 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferLongOffsettedAccessorsTest.java @@ -0,0 +1,171 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferLongOffsettedAccessorsTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getLong(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getLong(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x0102030405060708L; + buf.writeLong(value); + assertEquals(value, buf.getLong(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x0102030405060708L; + buf.writeLong(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x1002030405060708L, buf.getLong(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x0102030405060708L; + buf.writeLong(value); + assertThrows(IndexOutOfBoundsException.class, () -> buf.getLong(1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x0102030405060708L; + buf.writeLong(value); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getLong(1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getLong(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getLong(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getLong(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfLongReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getLong(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfLongMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + long value = 0x0102030405060708L; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setLong(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfLongMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + long value = 0x0102030405060708L; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setLong(1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfLongMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x0102030405060708L; + buf.setLong(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x05, buf.readByte()); + assertEquals((byte) 0x06, buf.readByte()); + assertEquals((byte) 0x07, buf.readByte()); + assertEquals((byte) 0x08, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferMediumOffsettedAccessorsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferMediumOffsettedAccessorsTest.java new file mode 100644 index 00000000000..30edd9e949a --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferMediumOffsettedAccessorsTest.java @@ -0,0 +1,357 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferMediumOffsettedAccessorsTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getMedium(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getMedium(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeMedium(value); + assertEquals(value, buf.getMedium(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeMedium(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x100203, buf.getMedium(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeMedium(value); + buf.getMedium(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeMedium(value); + assertThrows(IndexOutOfBoundsException.class, () -> buf.getMedium(6)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumReadOnlyMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeMedium(value); + buf.makeReadOnly().getMedium(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getMedium(6)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getMedium(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getMedium(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getMedium(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfMediumReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getMedium(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedMedium(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedMedium(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeUnsignedMedium(value); + assertEquals(value, buf.getUnsignedMedium(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeUnsignedMedium(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x100203, buf.getUnsignedMedium(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeUnsignedMedium(value); + buf.getUnsignedMedium(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedMedium(6)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumReadOnlyMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset( + Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeUnsignedMedium(value); + buf.makeReadOnly().getUnsignedMedium(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity( + Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedMedium(6)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getUnsignedMedium(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedMedium(6)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getUnsignedMedium(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedMediumReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedMedium(8)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfMediumMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x010203; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setMedium(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfMediumMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x010203; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setMedium(6, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfMediumMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.setMedium(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedMediumMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x010203; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setUnsignedMedium(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedMediumMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x010203; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setUnsignedMedium(6, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedMediumMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.setUnsignedMedium(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferOffsetsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferOffsetsTest.java new file mode 100644 index 00000000000..c700094942f --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferOffsetsTest.java @@ -0,0 +1,181 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferOffsetsTest extends BufferTestSupport { + + @ParameterizedTest + @MethodSource("initialCombinations") + void mustThrowWhenAllocatingNegativeSizedBuffer(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + assertThrows(IllegalArgumentException.class, () -> allocator.allocate(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void setReaderOffsetMustThrowOnNegativeIndex(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.readerOffset(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void setReaderOffsetMustThrowOnOversizedIndex(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.readerOffset(1)); + buf.writeLong(0); + assertThrows(IndexOutOfBoundsException.class, () -> buf.readerOffset(9)); + + buf.readerOffset(8); + assertThrows(IndexOutOfBoundsException.class, buf::readByte); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void setWriterOffsetMustThrowOutsideOfWritableRegion(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + // Writer offset cannot be negative. + assertThrows(IndexOutOfBoundsException.class, () -> buf.writerOffset(-1)); + + buf.writerOffset(4); + buf.readerOffset(4); + + // Cannot set writer offset before reader offset. + assertThrows(IndexOutOfBoundsException.class, () -> buf.writerOffset(3)); + assertThrows(IndexOutOfBoundsException.class, () -> buf.writerOffset(0)); + + buf.writerOffset(buf.capacity()); + + // Cannot set writer offset beyond capacity. + assertThrows(IndexOutOfBoundsException.class, () -> buf.writerOffset(buf.capacity() + 1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void setReaderOffsetMustNotThrowWithinBounds(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThat(buf.readerOffset(0)).isSameAs(buf); + buf.writeLong(0); + assertThat(buf.readerOffset(7)).isSameAs(buf); + assertThat(buf.readerOffset(8)).isSameAs(buf); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void capacityMustBeAllocatedSize(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(8, buf.capacity()); + try (Buffer b = allocator.allocate(13)) { + assertEquals(13, b.capacity()); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + void readerWriterOffsetUpdates(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(22)) { + assertEquals(0, buf.writerOffset()); + assertThat(buf.writerOffset(1)).isSameAs(buf); + assertEquals(1, buf.writerOffset()); + assertThat(buf.writeByte((byte) 7)).isSameAs(buf); + assertEquals(2, buf.writerOffset()); + assertThat(buf.writeShort((short) 3003)).isSameAs(buf); + assertEquals(4, buf.writerOffset()); + assertThat(buf.writeInt(0x5A55_BA55)).isSameAs(buf); + assertEquals(8, buf.writerOffset()); + assertThat(buf.writeLong(0x123456789ABCDEF0L)).isSameAs(buf); + assertEquals(16, buf.writerOffset()); + assertEquals(6, buf.writableBytes()); + assertEquals(16, buf.readableBytes()); + + assertEquals(0, buf.readerOffset()); + assertThat(buf.readerOffset(1)).isSameAs(buf); + assertEquals(1, buf.readerOffset()); + assertEquals((byte) 7, buf.readByte()); + assertEquals(2, buf.readerOffset()); + assertEquals((short) 3003, buf.readShort()); + assertEquals(4, buf.readerOffset()); + assertEquals(0x5A55_BA55, buf.readInt()); + assertEquals(8, buf.readerOffset()); + assertEquals(0x123456789ABCDEF0L, buf.readLong()); + assertEquals(16, buf.readerOffset()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void readAndWriteBoundsChecksWithIndexUpdates(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeLong(0); + + buf.readLong(); // Fine. + buf.readerOffset(1); + assertThrows(IndexOutOfBoundsException.class, buf::readLong); + + buf.readerOffset(4); + buf.readInt(); // Fine. + buf.readerOffset(5); + + assertThrows(IndexOutOfBoundsException.class, buf::readInt); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void resetMustSetReaderAndWriterOffsetsToTheirInitialPositions(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeInt(0).readShort(); + buf.resetOffsets(); + assertEquals(0, buf.readerOffset()); + assertEquals(0, buf.writerOffset()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void readableBytesMustMatchWhatWasWritten(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(16)) { + buf.writeLong(0); + assertEquals(Long.BYTES, buf.readableBytes()); + buf.readShort(); + assertEquals(Long.BYTES - Short.BYTES, buf.readableBytes()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferPrimitiveRelativeAccessorsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferPrimitiveRelativeAccessorsTest.java new file mode 100644 index 00000000000..840eeba8a13 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferPrimitiveRelativeAccessorsTest.java @@ -0,0 +1,1221 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferPrimitiveRelativeAccessorsTest extends BufferTestSupport { + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfByteMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + byte value = 0x01; + buf.writeByte(value); + assertEquals(1, buf.readableBytes()); + assertEquals(7, buf.writableBytes()); + assertEquals(value, buf.readByte()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfByteMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + byte value = 0x01; + buf.writeByte(value); + buf.setByte(0, (byte) 0x10); + assertEquals(1, buf.readableBytes()); + assertEquals(7, buf.writableBytes()); + assertEquals(0x10, buf.readByte()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfByteMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + byte value = 0x01; + buf.writeByte(value); + buf.readerOffset(1); + assertEquals(0, buf.readableBytes()); + assertEquals(7, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readByte); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedByteMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x01; + buf.writeUnsignedByte(value); + assertEquals(1, buf.readableBytes()); + assertEquals(7, buf.writableBytes()); + assertEquals(value, buf.readUnsignedByte()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedByteMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x01; + buf.writeUnsignedByte(value); + buf.setByte(0, (byte) 0x10); + assertEquals(1, buf.readableBytes()); + assertEquals(7, buf.writableBytes()); + assertEquals(0x10, buf.readUnsignedByte()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedByteMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x01; + buf.writeUnsignedByte(value); + buf.readerOffset(1); + assertEquals(0, buf.readableBytes()); + assertEquals(7, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.readUnsignedByte()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedByteReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x01; + buf.writeUnsignedByte(value); + buf.readerOffset(1); + assertEquals(0, buf.readableBytes()); + assertEquals(7, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readUnsignedByte()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfByteMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(8); + byte value = 0x01; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeByte(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfByteMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + byte value = 0x01; + buf.writeByte(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfUnsignedByteMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(8); + int value = 0x01; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeUnsignedByte(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfUnsignedByteMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01; + buf.writeUnsignedByte(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfCharMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + char value = 0x0102; + buf.writeChar(value); + assertEquals(2, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertEquals(value, buf.readChar()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfCharMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + char value = 0x0102; + buf.writeChar(value); + buf.setByte(0, (byte) 0x10); + assertEquals(2, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertEquals(0x1002, buf.readChar()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfCharMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + char value = 0x0102; + buf.writeChar(value); + buf.readerOffset(1); + assertEquals(1, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readChar); + assertEquals(1, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfCharReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + char value = 0x0102; + buf.writeChar(value); + buf.readerOffset(1); + assertEquals(1, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readChar()); + assertEquals(1, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfCharMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(7); + char value = 0x0102; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeChar(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfCharMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + char value = 0x0102; + buf.writeChar(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfShortMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + short value = 0x0102; + buf.writeShort(value); + assertEquals(2, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertEquals(value, buf.readShort()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfShortMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + short value = 0x0102; + buf.writeShort(value); + buf.setByte(0, (byte) 0x10); + assertEquals(2, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertEquals(0x1002, buf.readShort()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfShortMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + short value = 0x0102; + buf.writeShort(value); + buf.readerOffset(1); + assertEquals(1, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readShort); + assertEquals(1, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfShortReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + short value = 0x0102; + buf.writeShort(value); + buf.readerOffset(1); + assertEquals(1, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readShort()); + assertEquals(1, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedShortMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x0102; + buf.writeUnsignedShort(value); + assertEquals(2, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertEquals(value, buf.readUnsignedShort()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedShortMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x0102; + buf.writeUnsignedShort(value); + buf.setByte(0, (byte) 0x10); + assertEquals(2, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertEquals(0x1002, buf.readUnsignedShort()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedShortMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x0102; + buf.writeUnsignedShort(value); + buf.readerOffset(1); + assertEquals(1, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readUnsignedShort); + assertEquals(1, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedShortReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x0102; + buf.writeUnsignedShort(value); + buf.readerOffset(1); + assertEquals(1, buf.readableBytes()); + assertEquals(6, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readUnsignedShort()); + assertEquals(1, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfShortMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(7); + short value = 0x0102; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeShort(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfShortMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + short value = 0x0102; + buf.writeShort(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfUnsignedShortMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(7); + int value = 0x0102; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeUnsignedShort(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfUnsignedShortMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x0102; + buf.writeUnsignedShort(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfMediumMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x010203; + buf.writeMedium(value); + assertEquals(3, buf.readableBytes()); + assertEquals(5, buf.writableBytes()); + assertEquals(value, buf.readMedium()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x010203; + buf.writeMedium(value); + buf.setByte(0, (byte) 0x10); + assertEquals(3, buf.readableBytes()); + assertEquals(5, buf.writableBytes()); + assertEquals(0x100203, buf.readMedium()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfMediumMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x010203; + buf.writeMedium(value); + buf.readerOffset(1); + assertEquals(2, buf.readableBytes()); + assertEquals(5, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readMedium); + assertEquals(2, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfMediumReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x010203; + buf.writeMedium(value); + buf.readerOffset(1); + assertEquals(2, buf.readableBytes()); + assertEquals(5, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readMedium()); + assertEquals(2, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedMediumMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x010203; + buf.writeUnsignedMedium(value); + assertEquals(3, buf.readableBytes()); + assertEquals(5, buf.writableBytes()); + assertEquals(value, buf.readUnsignedMedium()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedMediumMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x010203; + buf.writeUnsignedMedium(value); + buf.setByte(0, (byte) 0x10); + assertEquals(3, buf.readableBytes()); + assertEquals(5, buf.writableBytes()); + assertEquals(0x100203, buf.readUnsignedMedium()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedMediumMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x010203; + buf.writeUnsignedMedium(value); + buf.readerOffset(1); + assertEquals(2, buf.readableBytes()); + assertEquals(5, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readUnsignedMedium); + assertEquals(2, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedMediumReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x010203; + buf.writeUnsignedMedium(value); + buf.readerOffset(1); + assertEquals(2, buf.readableBytes()); + assertEquals(5, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readUnsignedMedium()); + assertEquals(2, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfMediumMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(6); + int value = 0x010203; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeMedium(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfMediumMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeMedium(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfUnsignedMediumMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(6); + int value = 0x010203; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeUnsignedMedium(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfUnsignedMediumMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x010203; + buf.writeUnsignedMedium(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfIntMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x01020304; + buf.writeInt(value); + assertEquals(4, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertEquals(value, buf.readInt()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfIntMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x01020304; + buf.writeInt(value); + buf.setByte(0, (byte) 0x10); + assertEquals(4, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertEquals(0x10020304, buf.readInt()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfIntMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x01020304; + buf.writeInt(value); + buf.readerOffset(1); + assertEquals(3, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readInt); + assertEquals(3, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfIntReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + int value = 0x01020304; + buf.writeInt(value); + buf.readerOffset(1); + assertEquals(3, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readInt()); + assertEquals(3, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedIntMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + long value = 0x01020304; + buf.writeUnsignedInt(value); + assertEquals(4, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertEquals(value, buf.readUnsignedInt()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedIntMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + long value = 0x01020304; + buf.writeUnsignedInt(value); + buf.setByte(0, (byte) 0x10); + assertEquals(4, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertEquals(0x10020304, buf.readUnsignedInt()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedIntMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + long value = 0x01020304; + buf.writeUnsignedInt(value); + buf.readerOffset(1); + assertEquals(3, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readUnsignedInt); + assertEquals(3, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfUnsignedIntReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + long value = 0x01020304; + buf.writeUnsignedInt(value); + buf.readerOffset(1); + assertEquals(3, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readUnsignedInt()); + assertEquals(3, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfIntMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(5); + int value = 0x01020304; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeInt(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfIntMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x01020304; + buf.writeInt(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfUnsignedIntMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(5); + long value = 0x01020304; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeUnsignedInt(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfUnsignedIntMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x01020304; + buf.writeUnsignedInt(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfFloatMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + float value = Float.intBitsToFloat(0x01020304); + buf.writeFloat(value); + assertEquals(4, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertEquals(value, buf.readFloat()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfFloatMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + float value = Float.intBitsToFloat(0x01020304); + buf.writeFloat(value); + buf.setByte(0, (byte) 0x10); + assertEquals(4, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertEquals(Float.intBitsToFloat(0x10020304), buf.readFloat()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfFloatMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + float value = Float.intBitsToFloat(0x01020304); + buf.writeFloat(value); + buf.readerOffset(1); + assertEquals(3, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readFloat); + assertEquals(3, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfFloatReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + float value = Float.intBitsToFloat(0x01020304); + buf.writeFloat(value); + buf.readerOffset(1); + assertEquals(3, buf.readableBytes()); + assertEquals(4, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readFloat()); + assertEquals(3, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfFloatMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(5); + float value = Float.intBitsToFloat(0x01020304); + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeFloat(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfFloatMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + float value = Float.intBitsToFloat(0x01020304); + buf.writeFloat(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfLongMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + long value = 0x0102030405060708L; + buf.writeLong(value); + assertEquals(8, buf.readableBytes()); + assertEquals(0, buf.writableBytes()); + assertEquals(value, buf.readLong()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfLongMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + long value = 0x0102030405060708L; + buf.writeLong(value); + buf.setByte(0, (byte) 0x10); + assertEquals(8, buf.readableBytes()); + assertEquals(0, buf.writableBytes()); + assertEquals(0x1002030405060708L, buf.readLong()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfLongMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + long value = 0x0102030405060708L; + buf.writeLong(value); + buf.readerOffset(1); + assertEquals(7, buf.readableBytes()); + assertEquals(0, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readLong); + assertEquals(7, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfLongReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + long value = 0x0102030405060708L; + buf.writeLong(value); + buf.readerOffset(1); + assertEquals(7, buf.readableBytes()); + assertEquals(0, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readLong()); + assertEquals(7, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfLongMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(1); + long value = 0x0102030405060708L; + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeLong(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfLongMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + long value = 0x0102030405060708L; + buf.writeLong(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x05, buf.readByte()); + assertEquals((byte) 0x06, buf.readByte()); + assertEquals((byte) 0x07, buf.readByte()); + assertEquals((byte) 0x08, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfDoubleMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.writeDouble(value); + assertEquals(8, buf.readableBytes()); + assertEquals(0, buf.writableBytes()); + assertEquals(value, buf.readDouble()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfDoubleMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.writeDouble(value); + buf.setByte(0, (byte) 0x10); + assertEquals(8, buf.readableBytes()); + assertEquals(0, buf.writableBytes()); + assertEquals(Double.longBitsToDouble(0x1002030405060708L), buf.readDouble()); + assertEquals(0, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfDoubleMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.writeDouble(value); + buf.readerOffset(1); + assertEquals(7, buf.readableBytes()); + assertEquals(0, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, buf::readDouble); + assertEquals(7, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeReadOfDoubleReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsBeyondWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(0, buf.readableBytes()); + assertEquals(Long.BYTES, buf.writableBytes()); + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.writeDouble(value); + buf.readerOffset(1); + assertEquals(7, buf.readableBytes()); + assertEquals(0, buf.writableBytes()); + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().readDouble()); + assertEquals(7, buf.readableBytes()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfDoubleMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + buf.writerOffset(1); + double value = Double.longBitsToDouble(0x0102030405060708L); + assertThrows(IndexOutOfBoundsException.class, () -> buf.writeDouble(value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void relativeWriteOfDoubleMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + double value = Double.longBitsToDouble(0x0102030405060708L); + buf.writeDouble(value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x03, buf.readByte()); + assertEquals((byte) 0x04, buf.readByte()); + assertEquals((byte) 0x05, buf.readByte()); + assertEquals((byte) 0x06, buf.readByte()); + assertEquals((byte) 0x07, buf.readByte()); + assertEquals((byte) 0x08, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferReadOnlyTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferReadOnlyTest.java new file mode 100644 index 00000000000..e0731c65881 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferReadOnlyTest.java @@ -0,0 +1,266 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferReadOnlyException; +import io.netty.buffer.api.CompositeBuffer; +import io.netty.buffer.api.Send; +import io.netty.buffer.api.internal.ResourceSupport; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.function.Supplier; + +import static io.netty.buffer.api.internal.Statics.isOwned; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class BufferReadOnlyTest extends BufferTestSupport { + + @ParameterizedTest + @MethodSource("allocators") + public void readOnlyBufferMustPreventWriteAccess(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + var b = buf.makeReadOnly(); + assertThat(b).isSameAs(buf); + verifyWriteInaccessible(buf, BufferReadOnlyException.class); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void closedBuffersAreNotReadOnly(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Buffer buf = allocator.allocate(8); + buf.makeReadOnly(); + buf.close(); + assertFalse(buf.readOnly()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void readOnlyBufferMustMustStayReadOnlyAfterRepeatedToggles(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertFalse(buf.readOnly()); + buf.makeReadOnly(); + assertTrue(buf.readOnly()); + verifyWriteInaccessible(buf, BufferReadOnlyException.class); + + buf.makeReadOnly(); + assertTrue(buf.readOnly()); + + verifyWriteInaccessible(buf, BufferReadOnlyException.class); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void readOnlyBufferMustRemainReadOnlyAfterSend(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly(); + var send = buf.send(); + try (Buffer receive = send.receive()) { + assertTrue(receive.readOnly()); + verifyWriteInaccessible(receive, BufferReadOnlyException.class); + } + } + } + + @Test + public void readOnlyBufferMustRemainReadOnlyAfterSendForEmptyCompositeBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer buf = CompositeBuffer.compose(allocator)) { + buf.makeReadOnly(); + var send = buf.send(); + try (Buffer receive = send.receive()) { + assertTrue(receive.readOnly()); + } + } + } + + @ParameterizedTest + @MethodSource("pooledAllocators") + public void readOnlyBufferMustNotBeReadOnlyAfterBeingReusedFromPool(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + for (int i = 0; i < 1000; i++) { + try (Buffer buf = allocator.allocate(8)) { + assertFalse(buf.readOnly()); + buf.makeReadOnly(); + assertTrue(buf.readOnly()); + } + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void compactOnReadOnlyBufferMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly(); + assertThrows(BufferReadOnlyException.class, () -> buf.compact()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void ensureWritableOnReadOnlyBufferMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly(); + assertThrows(BufferReadOnlyException.class, () -> buf.ensureWritable(1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void copyIntoOnReadOnlyBufferMustThrow(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer dest = allocator.allocate(8)) { + dest.makeReadOnly(); + try (Buffer src = allocator.allocate(8)) { + assertThrows(BufferReadOnlyException.class, () -> src.copyInto(0, dest, 0, 1)); + assertThrows(BufferReadOnlyException.class, () -> src.copyInto(0, dest, 0, 0)); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void readOnlyBuffersCannotChangeWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8).makeReadOnly()) { + assertThrows(BufferReadOnlyException.class, () -> buf.writerOffset(4)); + } + } + + @ParameterizedTest + @MethodSource("initialCombinations") + public void constBufferInitialState(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.constBufferSupplier(new byte[] {1, 2, 3, 4}).get()) { + assertTrue(buf.readOnly()); + assertThat(buf.readerOffset()).isZero(); + assertThat(buf.capacity()).isEqualTo(4); + assertThat(buf.writerOffset()).isEqualTo(4); + assertTrue(isOwned((ResourceSupport) buf)); + assertTrue(buf.isAccessible()); + assertThat(buf.countComponents()).isOne(); + assertEquals((byte) 1, buf.readByte()); + assertEquals((byte) 2, buf.readByte()); + assertEquals((byte) 3, buf.readByte()); + assertEquals((byte) 4, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("initialCombinations") + public void constBuffersCanBeSplit(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Supplier supplier = allocator.constBufferSupplier(new byte[16]); + verifyConstBufferSplit(supplier); + // These shenanigans must not interfere with the parent const buffer. + verifyConstBufferSplit(supplier); + } + } + + private static void verifyConstBufferSplit(Supplier supplier) { + try (Buffer a = supplier.get(); + Buffer b = a.split(8)) { + assertTrue(a.readOnly()); + assertTrue(b.readOnly()); + assertTrue(isOwned((ResourceSupport) a)); + assertTrue(isOwned((ResourceSupport) b)); + assertThat(a.capacity()).isEqualTo(8); + assertThat(b.capacity()).isEqualTo(8); + try (Buffer c = b.copy()) { + assertFalse(c.readOnly()); // Buffer copies are never read-only. + assertTrue(isOwned((ResourceSupport) c)); + assertTrue(isOwned((ResourceSupport) b)); + assertThat(c.capacity()).isEqualTo(8); + } + } + } + + @ParameterizedTest + @MethodSource("initialCombinations") + public void compactOnConstBufferMustNotImpactSiblings(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator()) { + Supplier supplier = allocator.constBufferSupplier(new byte[] {1, 2, 3, 4}); + try (Buffer a = supplier.get(); + Buffer b = supplier.get(); + Buffer c = a.copy()) { + assertEquals(1, a.readByte()); + assertEquals(2, a.readByte()); + assertThrows(BufferReadOnlyException.class, () -> a.compact()); // Can't compact read-only buffer. + assertEquals(3, a.readByte()); + assertEquals(4, a.readByte()); + + assertEquals(1, b.readByte()); + assertEquals(2, b.readByte()); + assertThrows(BufferReadOnlyException.class, () -> b.compact()); // Can't compact read-only buffer. + assertEquals(3, b.readByte()); + assertEquals(4, b.readByte()); + + assertEquals(1, c.readByte()); + assertEquals(2, c.readByte()); + c.compact(); // Copies are not read-only, so we can compact this one. + assertEquals(3, c.readByte()); + assertEquals(4, c.readByte()); + } + } + } + + @ParameterizedTest + @MethodSource("initialCombinations") + public void constBuffersMustBeSendable(Fixture fixture) throws Exception { + try (BufferAllocator allocator = fixture.createAllocator()) { + Supplier supplier = allocator.constBufferSupplier(new byte[] {1, 2, 3, 4}); + try (Buffer buffer = supplier.get()) { + Send send = buffer.send(); + var future = executor.submit(() -> { + try (Buffer receive = send.receive()) { + return receive.readInt(); + } + }); + assertEquals(0x01020304, future.get()); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void copyOfReadOnlyBufferIsNotReadOnly(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8).writeLong(0x0102030405060708L).makeReadOnly(); + Buffer copy = buf.copy()) { + assertFalse(copy.readOnly()); + assertReadableEquals(buf, copy); + assertEquals(8, copy.readerOffset()); + copy.setLong(0, 0xA1A2A3A4A5A6A7A8L); + assertEquals(0xA1A2A3A4A5A6A7A8L, copy.getLong(0)); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferRefTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferRefTest.java new file mode 100644 index 00000000000..d4521adce7d --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferRefTest.java @@ -0,0 +1,96 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferClosedException; +import io.netty.buffer.api.BufferRef; +import io.netty.buffer.api.Send; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.atomic.AtomicReference; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; + +class BufferRefTest { + @Test + public void closingBufRefMustCloseOwnedBuf() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + BufferRef ref; + try (Buffer b = allocator.allocate(8)) { + ref = new BufferRef(b.send()); + } + ref.content().writeInt(42); + assertThat(ref.content().readInt()).isEqualTo(42); + ref.close(); + assertThrows(BufferClosedException.class, () -> ref.content().writeInt(32)); + } + } + + @Test + public void closingBufRefMustCloseOwnedBufFromSend() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer buf = allocator.allocate(8)) { + BufferRef ref = new BufferRef(buf.send()); + ref.content().writeInt(42); + assertThat(ref.content().readInt()).isEqualTo(42); + ref.close(); + assertThrows(BufferClosedException.class, () -> ref.content().writeInt(32)); + } + } + + @Test + public void mustCloseOwnedBufferWhenReplacedFromSend() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + AtomicReference orig = new AtomicReference<>(); + BufferRef ref; + Send s = allocator.allocate(8).send(); + ref = new BufferRef(Send.sending(Buffer.class, () -> { + Buffer b = s.receive(); + orig.set(b); + return b; + })); + + orig.get().writeInt(42); + assertThat(ref.content().readInt()).isEqualTo(42); + + try (Buffer buf = allocator.allocate(8)) { + ref.replace(buf.send()); // Pass replacement via send(). + } + + assertThrows(BufferClosedException.class, () -> orig.get().writeInt(32)); + ref.content().writeInt(42); + assertThat(ref.content().readInt()).isEqualTo(42); + ref.close(); + assertThrows(BufferClosedException.class, () -> ref.content().writeInt(32)); + } + } + + @Test + public void sendingRefMustSendBuffer() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + BufferRef refA = new BufferRef(allocator.allocate(8).send())) { + refA.content().writeInt(42); + Send send = refA.send(); + assertThrows(BufferClosedException.class, () -> refA.content().readInt()); + try (BufferRef refB = send.receive()) { + assertThat(refB.content().readInt()).isEqualTo(42); + } + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferSendTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferSendTest.java new file mode 100644 index 00000000000..e7c6015de39 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferSendTest.java @@ -0,0 +1,165 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferClosedException; +import io.netty.buffer.api.BufferRef; +import io.netty.buffer.api.Send; +import io.netty.buffer.api.internal.ResourceSupport; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.concurrent.ArrayBlockingQueue; +import java.util.concurrent.Future; +import java.util.concurrent.SynchronousQueue; + +import static io.netty.buffer.api.internal.Statics.acquire; +import static io.netty.buffer.api.internal.Statics.isOwned; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class BufferSendTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + void allocateAndSendToThread(Fixture fixture) throws Exception { + try (BufferAllocator allocator = fixture.createAllocator()) { + ArrayBlockingQueue> queue = new ArrayBlockingQueue<>(10); + Future future = executor.submit(() -> { + try (Buffer byteBuf = queue.take().receive()) { + return byteBuf.readByte(); + } + }); + + try (Buffer buf = allocator.allocate(8)) { + buf.writeByte((byte) 42); + assertTrue(queue.offer(buf.send())); + } + + assertEquals((byte) 42, future.get().byteValue()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void allocateAndSendToThreadViaSyncQueue(Fixture fixture) throws Exception { + SynchronousQueue> queue = new SynchronousQueue<>(); + Future future = executor.submit(() -> { + try (Buffer byteBuf = queue.take().receive()) { + return byteBuf.readByte(); + } + }); + + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThat(buf.writeByte((byte) 42)).isSameAs(buf); + queue.put(buf.send()); + } + + assertEquals((byte) 42, future.get().byteValue()); + } + + @ParameterizedTest + @MethodSource("allocators") + void sendMustThrowWhenBufIsAcquired(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + try (Buffer ignored = acquire((ResourceSupport) buf)) { + assertFalse(isOwned((ResourceSupport) buf)); + assertThrows(IllegalStateException.class, buf::send); + } + // Now send() should work again. + assertTrue(isOwned((ResourceSupport) buf)); + buf.send().receive().close(); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void originalBufferMustNotBeAccessibleAfterSend(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer orig = allocator.allocate(24)) { + orig.writeLong(42); + var send = orig.send(); + verifyInaccessible(orig); + try (Buffer receive = send.receive()) { + assertEquals(42, receive.readLong()); + } + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void cannotSendMoreThanOnce(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + var send = buf.send(); + var exc = assertThrows(BufferClosedException.class, () -> buf.send()); + send.receive().close(); + assertThat(exc).hasMessageContaining("closed"); + } + } + + @ParameterizedTest + @MethodSource("allocators") + public void sendMustNotMakeSplitBuffersInaccessible(Fixture fixture) throws Exception { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(16)) { + buf.writeInt(64); + var splitA = buf.split(); + buf.writeInt(42); + var send = buf.split().send(); + buf.writeInt(72); + var splitB = buf.split(); + var fut = executor.submit(() -> { + try (Buffer receive = send.receive()) { + assertEquals(42, receive.readInt()); + } + }); + fut.get(); + buf.writeInt(32); + assertEquals(32, buf.readInt()); + assertEquals(64, splitA.readInt()); + assertEquals(72, splitB.readInt()); + } + } + + @Test + public void isSendOfMustCheckObjectTypes() { + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled()) { + Send bufferSend = allocator.allocate(8).send(); + Send bufferRefSend = new BufferRef(allocator.allocate(8).send()).send(); + try { + assertTrue(Send.isSendOf(Buffer.class, bufferSend)); + assertFalse(Send.isSendOf(BufferRef.class, bufferSend)); + assertFalse(Send.isSendOf(Buffer.class, bufferRefSend)); + assertTrue(Send.isSendOf(BufferRef.class, bufferRefSend)); + assertFalse(Send.isSendOf(Buffer.class, new Object())); + assertFalse(Send.isSendOf(Object.class, new Object())); + } finally { + bufferSend.close(); + bufferRefSend.close(); + } + // Type checks must still pass after the sends have been received. + assertTrue(Send.isSendOf(Buffer.class, bufferSend)); + assertTrue(Send.isSendOf(BufferRef.class, bufferRefSend)); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferShortOffsettedAccessorsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferShortOffsettedAccessorsTest.java new file mode 100644 index 00000000000..d9eb8c89e33 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferShortOffsettedAccessorsTest.java @@ -0,0 +1,355 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BufferShortOffsettedAccessorsTest extends BufferTestSupport { + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getShort(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getShort(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + short value = 0x0102; + buf.writeShort(value); + assertEquals(value, buf.getShort(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + short value = 0x0102; + buf.writeShort(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x1002, buf.getShort(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + short value = 0x0102; + buf.writeShort(value); + buf.getShort(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getShort(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortReadOnlyMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + short value = 0x0102; + buf.writeShort(value); + buf.makeReadOnly().getShort(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getShort(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getShort(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getShort(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getShort(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfShortReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getShort(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedShort(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortReadOnlyMustBoundsCheckOnNegativeOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedShort(-1)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortMustNotBoundsCheckWhenReadOffsetAndSizeIsEqualToWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x0102; + buf.writeUnsignedShort(value); + assertEquals(value, buf.getUnsignedShort(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortMustReadWithDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x0102; + buf.writeUnsignedShort(value); + buf.setByte(0, (byte) 0x10); + assertEquals(0x1002, buf.getUnsignedShort(0)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x0102; + buf.writeUnsignedShort(value); + buf.getUnsignedShort(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedShort(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortReadOnlyMustNotBoundsCheckWhenReadOffsetAndSizeIsGreaterThanWriteOffset( + Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x0102; + buf.writeUnsignedShort(value); + buf.makeReadOnly().getUnsignedShort(1); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortReadOnlyMustBoundsCheckWhenReadOffsetAndSizeIsGreaterThanCapacity( + Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedShort(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.getUnsignedShort(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.getUnsignedShort(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortReadOnlyMustNotBoundsCheckWhenReadOffsetIsGreaterThanWriteOffset(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.makeReadOnly().getUnsignedShort(0); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedGetOfUnsignedShortReadOnlyMustBoundsCheckWhenReadOffsetIsGreaterThanCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertThrows(IndexOutOfBoundsException.class, () -> buf.makeReadOnly().getUnsignedShort(7)); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfShortMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + short value = 0x0102; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setShort(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfShortMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + short value = 0x0102; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setShort(7, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfShortMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + short value = 0x0102; + buf.setShort(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedShortMustBoundsCheckWhenWriteOffsetIsNegative(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x0102; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setUnsignedShort(-1, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedShortMustBoundsCheckWhenWriteOffsetAndSizeIsBeyondCapacity(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + assertEquals(Long.BYTES, buf.capacity()); + int value = 0x0102; + assertThrows(IndexOutOfBoundsException.class, () -> buf.setUnsignedShort(7, value)); + buf.writerOffset(Long.BYTES); + // Verify contents are unchanged. + assertEquals(0, buf.readLong()); + } + } + + @ParameterizedTest + @MethodSource("allocators") + void offsettedSetOfUnsignedShortMustHaveDefaultEndianByteOrder(Fixture fixture) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + int value = 0x0102; + buf.setUnsignedShort(0, value); + buf.writerOffset(Long.BYTES); + assertEquals((byte) 0x01, buf.readByte()); + assertEquals((byte) 0x02, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferTestSupport.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferTestSupport.java new file mode 100644 index 00000000000..ca24c9005e5 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferTestSupport.java @@ -0,0 +1,818 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.BufferClosedException; +import io.netty.buffer.api.CompositeBuffer; +import io.netty.buffer.api.MemoryManager; +import io.netty.buffer.api.internal.ResourceSupport; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.BeforeAll; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.nio.ReadOnlyBufferException; +import java.text.ParseException; +import java.time.Instant; +import java.time.temporal.ChronoUnit; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.ServiceConfigurationError; +import java.util.SplittableRandom; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.ThreadFactory; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Function; +import java.util.function.Predicate; +import java.util.function.Supplier; +import java.util.stream.Collectors; +import java.util.stream.Stream; +import java.util.stream.Stream.Builder; + +import static io.netty.buffer.api.internal.Statics.acquire; +import static io.netty.buffer.api.tests.Fixture.Properties.DIRECT; +import static io.netty.buffer.api.tests.Fixture.Properties.HEAP; +import static io.netty.buffer.api.tests.Fixture.Properties.POOLED; +import static java.nio.ByteOrder.BIG_ENDIAN; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public abstract class BufferTestSupport { + private static final InternalLogger logger = InternalLoggerFactory.getInstance(BufferTestSupport.class); + public static ExecutorService executor; + + private static final Memoize INITIAL_COMBINATIONS = new Memoize<>( + () -> initialFixturesForEachImplementation().toArray(Fixture[]::new)); + private static final Memoize ALL_COMBINATIONS = new Memoize<>( + () -> fixtureCombinations(initialFixturesForEachImplementation()).toArray(Fixture[]::new)); + private static final Memoize ALL_ALLOCATORS = new Memoize<>( + () -> Arrays.stream(ALL_COMBINATIONS.get()) + .toArray(Fixture[]::new)); + private static final Memoize NON_COMPOSITE = new Memoize<>( + () -> Arrays.stream(ALL_COMBINATIONS.get()) + .filter(f -> !f.isComposite()) + .toArray(Fixture[]::new)); + private static final Memoize HEAP_ALLOCS = new Memoize<>( + () -> Arrays.stream(ALL_COMBINATIONS.get()) + .filter(f -> f.isHeap()) + .toArray(Fixture[]::new)); + private static final Memoize DIRECT_ALLOCS = new Memoize<>( + () -> Arrays.stream(ALL_COMBINATIONS.get()) + .filter(f -> f.isDirect()) + .toArray(Fixture[]::new)); + private static final Memoize POOLED_ALLOCS = new Memoize<>( + () -> Arrays.stream(ALL_COMBINATIONS.get()) + .filter(f -> f.isPooled()) + .toArray(Fixture[]::new)); + + protected static Predicate filterOfTheDay(int percentage) { + Instant today = Instant.now().truncatedTo(ChronoUnit.DAYS); // New seed every day. + SplittableRandom rng = new SplittableRandom(today.hashCode()); + AtomicInteger counter = new AtomicInteger(); + return fixture -> counter.getAndIncrement() < 1 || rng.nextInt(0, 100) < percentage; + } + + static Fixture[] allocators() { + return ALL_ALLOCATORS.get(); + } + + static Fixture[] nonCompositeAllocators() { + return NON_COMPOSITE.get(); + } + + static Fixture[] heapAllocators() { + return HEAP_ALLOCS.get(); + } + + static Fixture[] directAllocators() { + return DIRECT_ALLOCS.get(); + } + + static Fixture[] pooledAllocators() { + return POOLED_ALLOCS.get(); + } + + static Fixture[] initialCombinations() { + return INITIAL_COMBINATIONS.get(); + } + + static List initialAllocators() { + return List.of( + new Fixture("heap", BufferAllocator::onHeapUnpooled, HEAP), + new Fixture("direct", BufferAllocator::offHeapUnpooled, DIRECT), + new Fixture("pooledHeap", BufferAllocator::onHeapPooled, POOLED, HEAP), + new Fixture("pooledDirect", BufferAllocator::offHeapPooled, POOLED, DIRECT)); + } + + static List initialFixturesForEachImplementation() { + List initFixtures = initialAllocators(); + + // Multiply by all MemoryManagers. + List failedManagers = new ArrayList<>(); + List loadableManagers = new ArrayList<>(); + MemoryManager.availableManagers().forEach(provider -> { + try { + loadableManagers.add(provider.get()); + } catch (ServiceConfigurationError | Exception e) { + logger.debug("Could not load implementation for testing", e); + failedManagers.add(e); + } + }); + if (loadableManagers.isEmpty()) { + AssertionError error = new AssertionError("Failed to load any memory managers implementations."); + for (Throwable failure : failedManagers) { + error.addSuppressed(failure); + } + throw error; + } + initFixtures = initFixtures.stream().flatMap(f -> { + Builder builder = Stream.builder(); + for (MemoryManager managers : loadableManagers) { + char[] chars = managers.implementationName().toCharArray(); + for (int i = 1, j = 1; i < chars.length; i++) { + if (Character.isUpperCase(chars[i])) { + chars[j++] = chars[i]; + } + } + String managersName = String.valueOf(chars, 0, 2); + builder.add(new Fixture(f + "/" + managersName, + () -> MemoryManager.using(managers, f), f.getProperties())); + } + return builder.build(); + }).collect(Collectors.toList()); + return initFixtures; + } + + private abstract static class TestAllocator implements BufferAllocator { + @Override + public Supplier constBufferSupplier(byte[] bytes) { + Buffer base = allocate(bytes.length).writeBytes(bytes).makeReadOnly(); + return () -> base; // Technically off-spec. + } + } + + static Stream fixtureCombinations(List initFixtures) { + Builder builder = Stream.builder(); + initFixtures.forEach(builder); + + // Add 2-way composite buffers of all combinations. + for (Fixture first : initFixtures) { + for (Fixture second : initFixtures) { + builder.add(new Fixture("compose(" + first + ", " + second + ')', () -> { + return new TestAllocator() { + final BufferAllocator a = first.get(); + final BufferAllocator b = second.get(); + @Override + public Buffer allocate(int size) { + int half = size / 2; + try (Buffer firstHalf = a.allocate(half); + Buffer secondHalf = b.allocate(size - half)) { + return CompositeBuffer.compose(a, firstHalf.send(), secondHalf.send()); + } + } + + @Override + public void close() { + a.close(); + b.close(); + } + }; + }, Fixture.Properties.COMPOSITE)); + } + } + + // Also add a 3-way composite buffer. + builder.add(new Fixture("compose(heap,heap,heap)", () -> { + return new TestAllocator() { + final BufferAllocator alloc = BufferAllocator.onHeapUnpooled(); + @Override + public Buffer allocate(int size) { + int part = size / 3; + try (Buffer a = alloc.allocate(part); + Buffer b = alloc.allocate(part); + Buffer c = alloc.allocate(size - part * 2)) { + return CompositeBuffer.compose(alloc, a.send(), b.send(), c.send()); + } + } + + @Override + public void close() { + alloc.close(); + } + }; + }, Fixture.Properties.COMPOSITE)); + + for (Fixture fixture : initFixtures) { + builder.add(new Fixture(fixture + ".ensureWritable", () -> { + return new TestAllocator() { + final BufferAllocator allocator = fixture.createAllocator(); + @Override + public Buffer allocate(int size) { + if (size < 2) { + return allocator.allocate(size); + } + var buf = allocator.allocate(size - 1); + buf.ensureWritable(size); + return buf; + } + + @Override + public void close() { + allocator.close(); + } + }; + }, fixture.getProperties())); + builder.add(new Fixture(fixture + ".compose.ensureWritable", () -> { + return new TestAllocator() { + final BufferAllocator allocator = fixture.createAllocator(); + @Override + public Buffer allocate(int size) { + if (size < 2) { + return allocator.allocate(size); + } + var buf = CompositeBuffer.compose(allocator); + buf.ensureWritable(size); + return buf; + } + + @Override + public void close() { + allocator.close(); + } + }; + }, Fixture.Properties.COMPOSITE)); + } + + var stream = builder.build(); + return stream.flatMap(BufferTestSupport::injectSplits); + } + + private static Stream injectSplits(Fixture f) { + Builder builder = Stream.builder(); + builder.add(f); + builder.add(new Fixture(f + ".split", () -> { + return new TestAllocator() { + final BufferAllocator allocatorBase = f.get(); + @Override + public Buffer allocate(int size) { + try (Buffer buf = allocatorBase.allocate(size + 1)) { + buf.writerOffset(size); + return buf.split().writerOffset(0); + } + } + + @Override + public void close() { + allocatorBase.close(); + } + }; + }, f.getProperties())); + return builder.build(); + } + + @BeforeAll + static void startExecutor() throws IOException, ParseException { + executor = Executors.newSingleThreadExecutor(new ThreadFactory() { + @Override + public Thread newThread(Runnable r) { + Thread thread = Executors.defaultThreadFactory().newThread(r); + thread.setName("BufferTest-" + thread.getName()); + thread.setDaemon(true); // Do not prevent shut down of test runner. + return thread; + } + }); + } + + @AfterAll + static void stopExecutor() throws IOException { + executor.shutdown(); + } + + public static void verifyInaccessible(Buffer buf) { + verifyReadInaccessible(buf); + + verifyWriteInaccessible(buf, BufferClosedException.class); + + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer target = allocator.allocate(24)) { + assertThrows(BufferClosedException.class, () -> buf.copyInto(0, target, 0, 1)); + assertThrows(BufferClosedException.class, () -> buf.copyInto(0, new byte[1], 0, 1)); + assertThrows(BufferClosedException.class, () -> buf.copyInto(0, ByteBuffer.allocate(1), 0, 1)); + if (CompositeBuffer.isComposite(buf)) { + assertThrows(BufferClosedException.class, () -> ((CompositeBuffer) buf).extendWith(target.send())); + } + } + + assertThrows(BufferClosedException.class, () -> buf.split()); + assertThrows(BufferClosedException.class, () -> buf.send()); + assertThrows(BufferClosedException.class, () -> acquire((ResourceSupport) buf)); + assertThrows(BufferClosedException.class, () -> buf.copy()); + assertThrows(BufferClosedException.class, () -> buf.openCursor()); + assertThrows(BufferClosedException.class, () -> buf.openCursor(0, 0)); + assertThrows(BufferClosedException.class, () -> buf.openReverseCursor()); + assertThrows(BufferClosedException.class, () -> buf.openReverseCursor(0, 0)); + } + + public static void verifyReadInaccessible(Buffer buf) { + assertThrows(BufferClosedException.class, () -> buf.readByte()); + assertThrows(BufferClosedException.class, () -> buf.readUnsignedByte()); + assertThrows(BufferClosedException.class, () -> buf.readChar()); + assertThrows(BufferClosedException.class, () -> buf.readShort()); + assertThrows(BufferClosedException.class, () -> buf.readUnsignedShort()); + assertThrows(BufferClosedException.class, () -> buf.readMedium()); + assertThrows(BufferClosedException.class, () -> buf.readUnsignedMedium()); + assertThrows(BufferClosedException.class, () -> buf.readInt()); + assertThrows(BufferClosedException.class, () -> buf.readUnsignedInt()); + assertThrows(BufferClosedException.class, () -> buf.readFloat()); + assertThrows(BufferClosedException.class, () -> buf.readLong()); + assertThrows(BufferClosedException.class, () -> buf.readDouble()); + + assertThrows(BufferClosedException.class, () -> buf.getByte(0)); + assertThrows(BufferClosedException.class, () -> buf.getUnsignedByte(0)); + assertThrows(BufferClosedException.class, () -> buf.getChar(0)); + assertThrows(BufferClosedException.class, () -> buf.getShort(0)); + assertThrows(BufferClosedException.class, () -> buf.getUnsignedShort(0)); + assertThrows(BufferClosedException.class, () -> buf.getMedium(0)); + assertThrows(BufferClosedException.class, () -> buf.getUnsignedMedium(0)); + assertThrows(BufferClosedException.class, () -> buf.getInt(0)); + assertThrows(BufferClosedException.class, () -> buf.getUnsignedInt(0)); + assertThrows(BufferClosedException.class, () -> buf.getFloat(0)); + assertThrows(BufferClosedException.class, () -> buf.getLong(0)); + assertThrows(BufferClosedException.class, () -> buf.getDouble(0)); + } + + public static void verifyWriteInaccessible(Buffer buf, Class expected) { + assertThrows(expected, () -> buf.writeByte((byte) 32)); + assertThrows(expected, () -> buf.writeUnsignedByte(32)); + assertThrows(expected, () -> buf.writeChar('3')); + assertThrows(expected, () -> buf.writeShort((short) 32)); + assertThrows(expected, () -> buf.writeUnsignedShort(32)); + assertThrows(expected, () -> buf.writeMedium(32)); + assertThrows(expected, () -> buf.writeUnsignedMedium(32)); + assertThrows(expected, () -> buf.writeInt(32)); + assertThrows(expected, () -> buf.writeUnsignedInt(32)); + assertThrows(expected, () -> buf.writeFloat(3.2f)); + assertThrows(expected, () -> buf.writeLong(32)); + assertThrows(expected, () -> buf.writeDouble(32)); + + assertThrows(expected, () -> buf.setByte(0, (byte) 32)); + assertThrows(expected, () -> buf.setUnsignedByte(0, 32)); + assertThrows(expected, () -> buf.setChar(0, '3')); + assertThrows(expected, () -> buf.setShort(0, (short) 32)); + assertThrows(expected, () -> buf.setUnsignedShort(0, 32)); + assertThrows(expected, () -> buf.setMedium(0, 32)); + assertThrows(expected, () -> buf.setUnsignedMedium(0, 32)); + assertThrows(expected, () -> buf.setInt(0, 32)); + assertThrows(expected, () -> buf.setUnsignedInt(0, 32)); + assertThrows(expected, () -> buf.setFloat(0, 3.2f)); + assertThrows(expected, () -> buf.setLong(0, 32)); + assertThrows(expected, () -> buf.setDouble(0, 32)); + + assertThrows(expected, () -> buf.ensureWritable(1)); + assertThrows(expected, () -> buf.fill((byte) 0)); + try (BufferAllocator allocator = BufferAllocator.onHeapUnpooled(); + Buffer source = allocator.allocate(8)) { + assertThrows(expected, () -> source.copyInto(0, buf, 0, 1)); + if (expected == BufferClosedException.class) { + assertThrows(expected, () -> buf.copyInto(0, source, 0, 1)); + } + } + } + + public static void testCopyIntoByteBuffer(Fixture fixture, Function bbAlloc) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeLong(0x0102030405060708L); + ByteBuffer buffer = bbAlloc.apply(8); + buf.copyInto(0, buffer, 0, buffer.capacity()); + assertEquals((byte) 0x01, buffer.get()); + assertEquals((byte) 0x02, buffer.get()); + assertEquals((byte) 0x03, buffer.get()); + assertEquals((byte) 0x04, buffer.get()); + assertEquals((byte) 0x05, buffer.get()); + assertEquals((byte) 0x06, buffer.get()); + assertEquals((byte) 0x07, buffer.get()); + assertEquals((byte) 0x08, buffer.get()); + buffer.clear(); + + buffer = bbAlloc.apply(6); + buf.copyInto(1, buffer, 1, 3); + assertEquals((byte) 0x00, buffer.get()); + assertEquals((byte) 0x02, buffer.get()); + assertEquals((byte) 0x03, buffer.get()); + assertEquals((byte) 0x04, buffer.get()); + assertEquals((byte) 0x00, buffer.get()); + assertEquals((byte) 0x00, buffer.get()); + buffer.clear(); + + buffer = bbAlloc.apply(6); + buffer.position(3).limit(3); + buf.copyInto(1, buffer, 1, 3); + assertEquals(3, buffer.position()); + assertEquals(3, buffer.limit()); + buffer.clear(); + assertEquals((byte) 0x00, buffer.get()); + assertEquals((byte) 0x02, buffer.get()); + assertEquals((byte) 0x03, buffer.get()); + assertEquals((byte) 0x04, buffer.get()); + assertEquals((byte) 0x00, buffer.get()); + assertEquals((byte) 0x00, buffer.get()); + + var roBuffer = bbAlloc.apply(6).asReadOnlyBuffer(); + assertThrows(ReadOnlyBufferException.class, () -> buf.copyInto(0, roBuffer, 0, 1)); + assertThrows(ReadOnlyBufferException.class, () -> buf.copyInto(0, roBuffer, 0, 0)); + } + } + + public static void testCopyIntoBuf(Fixture fixture, Function bbAlloc) { + try (BufferAllocator allocator = fixture.createAllocator(); + Buffer buf = allocator.allocate(8)) { + buf.writeLong(0x0102030405060708L); + try (Buffer buffer = bbAlloc.apply(8)) { + buffer.writerOffset(8); + buf.copyInto(0, buffer, 0, buffer.capacity()); + assertEquals((byte) 0x01, buffer.readByte()); + assertEquals((byte) 0x02, buffer.readByte()); + assertEquals((byte) 0x03, buffer.readByte()); + assertEquals((byte) 0x04, buffer.readByte()); + assertEquals((byte) 0x05, buffer.readByte()); + assertEquals((byte) 0x06, buffer.readByte()); + assertEquals((byte) 0x07, buffer.readByte()); + assertEquals((byte) 0x08, buffer.readByte()); + buffer.resetOffsets(); + } + + try (Buffer buffer = bbAlloc.apply(6)) { + buf.copyInto(1, buffer, 1, 3); + buffer.writerOffset(6); + assertEquals((byte) 0x00, buffer.readByte()); + assertEquals((byte) 0x02, buffer.readByte()); + assertEquals((byte) 0x03, buffer.readByte()); + assertEquals((byte) 0x04, buffer.readByte()); + assertEquals((byte) 0x00, buffer.readByte()); + assertEquals((byte) 0x00, buffer.readByte()); + } + + try (Buffer buffer = bbAlloc.apply(6)) { + buffer.writerOffset(3).readerOffset(3); + buf.copyInto(1, buffer, 1, 3); + assertEquals(3, buffer.readerOffset()); + assertEquals(3, buffer.writerOffset()); + buffer.resetOffsets(); + buffer.writerOffset(6); + assertEquals((byte) 0x00, buffer.readByte()); + assertEquals((byte) 0x02, buffer.readByte()); + assertEquals((byte) 0x03, buffer.readByte()); + assertEquals((byte) 0x04, buffer.readByte()); + assertEquals((byte) 0x00, buffer.readByte()); + assertEquals((byte) 0x00, buffer.readByte()); + } + + buf.resetOffsets(); + buf.writeLong(0x0102030405060708L); + // Testing copyInto for overlapping writes: + // + // 0x0102030405060708 + // └──â”Ŧ──â”Ŧ──┘ │ + // └─â–ļ└â”Ŧ───────┘ + // â–ŧ + // 0x0102030102030405 + buf.copyInto(0, buf, 3, 5); + assertThat(toByteArray(buf)).containsExactly(0x01, 0x02, 0x03, 0x01, 0x02, 0x03, 0x04, 0x05); + } + } + + public static void checkByteIteration(Buffer buf) { + var cursor = buf.openCursor(); + assertFalse(cursor.readByte()); + assertEquals(0, cursor.bytesLeft()); + assertEquals((byte) -1, cursor.getByte()); + + buf.writeBytes(new byte[] {1, 2, 3, 4}); + int roff = buf.readerOffset(); + int woff = buf.writerOffset(); + cursor = buf.openCursor(); + assertEquals(4, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 0x01, cursor.getByte()); + assertEquals((byte) 0x01, cursor.getByte()); + assertEquals(3, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 0x02, cursor.getByte()); + assertEquals((byte) 0x02, cursor.getByte()); + assertEquals(2, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 0x03, cursor.getByte()); + assertEquals(1, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 0x04, cursor.getByte()); + assertEquals(0, cursor.bytesLeft()); + assertFalse(cursor.readByte()); + assertEquals((byte) 0x04, cursor.getByte()); + assertEquals((byte) 0x04, cursor.getByte()); + assertEquals(roff, buf.readerOffset()); + assertEquals(woff, buf.writerOffset()); + } + + public static void checkByteIterationOfRegion(Buffer buf) { + assertThrows(IllegalArgumentException.class, () -> buf.openCursor(-1, 1)); + assertThrows(IllegalArgumentException.class, () -> buf.openCursor(1, -1)); + assertThrows(IllegalArgumentException.class, () -> buf.openCursor(buf.capacity(), 1)); + assertThrows(IllegalArgumentException.class, () -> buf.openCursor(buf.capacity() - 1, 2)); + assertThrows(IllegalArgumentException.class, () -> buf.openCursor(buf.capacity() - 2, 3)); + + var cursor = buf.openCursor(1, 0); + assertFalse(cursor.readByte()); + assertEquals(0, cursor.bytesLeft()); + assertEquals((byte) -1, cursor.getByte()); + + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6}); + int roff = buf.readerOffset(); + int woff = buf.writerOffset(); + cursor = buf.openCursor(buf.readerOffset() + 1, buf.readableBytes() - 2); + assertEquals(4, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 2, cursor.getByte()); + assertEquals((byte) 2, cursor.getByte()); + assertEquals(3, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 3, cursor.getByte()); + assertEquals(2, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 4, cursor.getByte()); + assertEquals(1, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 5, cursor.getByte()); + assertEquals(0, cursor.bytesLeft()); + assertEquals(0, cursor.bytesLeft()); + assertFalse(cursor.readByte()); + assertEquals(0, cursor.bytesLeft()); + assertEquals((byte) 5, cursor.getByte()); + + cursor = buf.openCursor(buf.readerOffset() + 1, 2); + assertEquals(2, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 2, cursor.getByte()); + assertEquals(1, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 3, cursor.getByte()); + assertEquals(0, cursor.bytesLeft()); + assertFalse(cursor.readByte()); + assertEquals(roff, buf.readerOffset()); + assertEquals(woff, buf.writerOffset()); + } + + public static void checkReverseByteIteration(Buffer buf) { + var cursor = buf.openReverseCursor(); + assertFalse(cursor.readByte()); + assertEquals(0, cursor.bytesLeft()); + assertEquals((byte) -1, cursor.getByte()); + + buf.writeBytes(new byte[] {1, 2, 3, 4}); + int roff = buf.readerOffset(); + int woff = buf.writerOffset(); + cursor = buf.openReverseCursor(); + assertEquals(4, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 4, cursor.getByte()); + assertEquals((byte) 4, cursor.getByte()); + assertEquals(3, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 3, cursor.getByte()); + assertEquals(2, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 2, cursor.getByte()); + assertEquals(1, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 1, cursor.getByte()); + assertEquals(0, cursor.bytesLeft()); + assertFalse(cursor.readByte()); + assertEquals((byte) 1, cursor.getByte()); + assertFalse(cursor.readByte()); + assertEquals(0, cursor.bytesLeft()); + assertEquals(roff, buf.readerOffset()); + assertEquals(woff, buf.writerOffset()); + } + + public static void checkReverseByteIterationOfRegion(Buffer buf) { + assertThrows(IllegalArgumentException.class, () -> buf.openReverseCursor(-1, 0)); + assertThrows(IllegalArgumentException.class, () -> buf.openReverseCursor(0, -1)); + assertThrows(IllegalArgumentException.class, () -> buf.openReverseCursor(0, 2)); + assertThrows(IllegalArgumentException.class, () -> buf.openReverseCursor(1, 3)); + assertThrows(IllegalArgumentException.class, () -> buf.openReverseCursor(buf.capacity(), 0)); + + var cursor = buf.openReverseCursor(1, 0); + assertFalse(cursor.readByte()); + assertEquals(0, cursor.bytesLeft()); + assertEquals((byte) -1, cursor.getByte()); + + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7}); + int roff = buf.readerOffset(); + int woff = buf.writerOffset(); + cursor = buf.openReverseCursor(buf.writerOffset() - 2, buf.readableBytes() - 2); + assertEquals(5, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 6, cursor.getByte()); + assertEquals((byte) 6, cursor.getByte()); + assertEquals(4, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 5, cursor.getByte()); + assertEquals(3, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 4, cursor.getByte()); + assertEquals(2, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 3, cursor.getByte()); + assertEquals(1, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 2, cursor.getByte()); + assertEquals(0, cursor.bytesLeft()); + assertFalse(cursor.readByte()); + assertEquals((byte) 2, cursor.getByte()); + assertFalse(cursor.readByte()); + assertEquals(0, cursor.bytesLeft()); + + cursor = buf.openReverseCursor(buf.readerOffset() + 2, 2); + assertEquals(2, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 3, cursor.getByte()); + assertEquals(1, cursor.bytesLeft()); + assertTrue(cursor.readByte()); + assertEquals((byte) 2, cursor.getByte()); + assertEquals(0, cursor.bytesLeft()); + assertFalse(cursor.readByte()); + assertEquals(roff, buf.readerOffset()); + assertEquals(woff, buf.writerOffset()); + } + + public static void verifySplitEmptyCompositeBuffer(Buffer buf) { + try (Buffer a = buf.split()) { + a.ensureWritable(4); + buf.ensureWritable(4); + a.writeInt(1); + buf.writeInt(2); + assertEquals(1, a.readInt()); + assertEquals(2, buf.readInt()); + } + } + + public static void verifyForEachReadableSingleComponent(Fixture fixture, Buffer buf) { + buf.forEachReadable(0, (index, component) -> { + var buffer = component.readableBuffer(); + assertThat(buffer.position()).isZero(); + assertThat(buffer.limit()).isEqualTo(8); + assertThat(buffer.capacity()).isEqualTo(8); + assertEquals(0x0102030405060708L, buffer.getLong()); + + if (fixture.isDirect()) { + assertThat(component.readableNativeAddress()).isNotZero(); + } else { + assertThat(component.readableNativeAddress()).isZero(); + } + + if (component.hasReadableArray()) { + byte[] array = component.readableArray(); + byte[] arrayCopy = new byte[component.readableArrayLength()]; + System.arraycopy(array, component.readableArrayOffset(), arrayCopy, 0, arrayCopy.length); + if (buffer.order() == BIG_ENDIAN) { + assertThat(arrayCopy).containsExactly(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08); + } else { + assertThat(arrayCopy).containsExactly(0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01); + } + } + + assertThrows(ReadOnlyBufferException.class, () -> buffer.put(0, (byte) 0xFF)); + return true; + }); + } + + public static void verifyForEachWritableSingleComponent(Fixture fixture, Buffer buf) { + buf.forEachWritable(0, (index, component) -> { + var buffer = component.writableBuffer(); + assertThat(buffer.position()).isZero(); + assertThat(buffer.limit()).isEqualTo(8); + assertThat(buffer.capacity()).isEqualTo(8); + buffer.putLong(0x0102030405060708L); + buffer.flip(); + assertEquals(0x0102030405060708L, buffer.getLong()); + buf.writerOffset(8); + assertEquals(0x0102030405060708L, buf.getLong(0)); + + if (fixture.isDirect()) { + assertThat(component.writableNativeAddress()).isNotZero(); + } else { + assertThat(component.writableNativeAddress()).isZero(); + } + + buf.writerOffset(0); + if (component.hasWritableArray()) { + byte[] array = component.writableArray(); + int offset = component.writableArrayOffset(); + byte[] arrayCopy = new byte[component.writableArrayLength()]; + System.arraycopy(array, offset, arrayCopy, 0, arrayCopy.length); + if (buffer.order() == BIG_ENDIAN) { + assertThat(arrayCopy).containsExactly(0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08); + } else { + assertThat(arrayCopy).containsExactly(0x08, 0x07, 0x06, 0x05, 0x04, 0x03, 0x02, 0x01); + } + } + + buffer.put(0, (byte) 0xFF); + assertEquals((byte) 0xFF, buffer.get(0)); + assertEquals((byte) 0xFF, buf.getByte(0)); + return true; + }); + } + + public static byte[] toByteArray(Buffer buf) { + byte[] bs = new byte[buf.capacity()]; + buf.copyInto(0, bs, 0, bs.length); + return bs; + } + + public static byte[] readByteArray(Buffer buf) { + byte[] bs = new byte[buf.readableBytes()]; + buf.copyInto(buf.readerOffset(), bs, 0, bs.length); + buf.readerOffset(buf.writerOffset()); + return bs; + } + + public static void assertEquals(Buffer expected, Buffer actual) { + assertThat(toByteArray(actual)).containsExactly(toByteArray(expected)); + } + + public static void assertReadableEquals(Buffer expected, Buffer actual) { + assertThat(readByteArray(actual)).containsExactly(readByteArray(expected)); + } + + public static void assertEquals(byte expected, byte actual) { + if (expected != actual) { + fail(String.format("expected: %1$s (0x%1$X) but was: %2$s (0x%2$X)", expected, actual)); + } + } + + public static void assertEquals(char expected, char actual) { + if (expected != actual) { + fail(String.format("expected: %s (0x%X) but was: %s (0x%X)", + expected, (int) expected, actual, (int) actual)); + } + } + + public static void assertEquals(short expected, short actual) { + if (expected != actual) { + fail(String.format("expected: %1$s (0x%1$X) but was: %2$s (0x%2$X)", expected, actual)); + } + } + + public static void assertEquals(int expected, int actual) { + if (expected != actual) { + fail(String.format("expected: %1$s (0x%1$X) but was: %2$s (0x%2$X)", expected, actual)); + } + } + + public static void assertEquals(long expected, long actual) { + if (expected != actual) { + fail(String.format("expected: %1$s (0x%1$X) but was: %2$s (0x%2$X)", expected, actual)); + } + } + + public static void assertEquals(float expected, float actual) { + //noinspection FloatingPointEquality + if (expected != actual) { + fail(String.format("expected: %s (0x%X) but was: %s (0x%X)", + expected, Float.floatToRawIntBits(expected), + actual, Float.floatToRawIntBits(actual))); + } + } + + public static void assertEquals(double expected, double actual) { + //noinspection FloatingPointEquality + if (expected != actual) { + fail(String.format("expected: %s (0x%X) but was: %s (0x%X)", + expected, Double.doubleToRawLongBits(expected), + actual, Double.doubleToRawLongBits(actual))); + } + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/BufferWriteBytesCombinationsTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/BufferWriteBytesCombinationsTest.java new file mode 100644 index 00000000000..9eb63ced4ca --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/BufferWriteBytesCombinationsTest.java @@ -0,0 +1,62 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.Buffer; +import io.netty.buffer.api.BufferAllocator; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; + +import static org.assertj.core.api.Assertions.assertThat; + +public class BufferWriteBytesCombinationsTest extends BufferTestSupport { + private static final Memoize OTHER_FIXTURES = new Memoize( + () -> Arrays.stream(allocators()).filter(filterOfTheDay(10)).toArray(Fixture[]::new)); + + @ParameterizedTest + @MethodSource("allocators") + public void writeBytesMustTransferDataAndUpdateOffsets(Fixture fixture) { + try (BufferAllocator alloc1 = fixture.createAllocator()) { + // Only test 10% of available combinations. Otherwise, this takes too long. + Fixture[] allocators = OTHER_FIXTURES.get(); + Arrays.stream(allocators).parallel().forEach(otherFixture -> { + try (BufferAllocator alloc2 = otherFixture.createAllocator(); + Buffer target = alloc1.allocate(37); + Buffer source = alloc2.allocate(35)) { + verifyWriteBytes(target, source); + } catch (Exception e) { + e.addSuppressed(new RuntimeException("other fixture was: " + otherFixture)); + throw e; + } + }); + } + } + + private static void verifyWriteBytes(Buffer target, Buffer source) { + for (int i = 0; i < 35; i++) { + source.writeByte((byte) (i + 1)); + } + target.writeBytes(source); + assertThat(target.readerOffset()).isZero(); + assertThat(target.writerOffset()).isEqualTo(35); + assertThat(source.readerOffset()).isEqualTo(35); + assertThat(source.writerOffset()).isEqualTo(35); + source.readerOffset(0); + assertReadableEquals(source, target); + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/Fixture.java b/buffer/src/test/java/io/netty/buffer/api/tests/Fixture.java new file mode 100644 index 00000000000..ebbf6095e48 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/Fixture.java @@ -0,0 +1,75 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import io.netty.buffer.api.BufferAllocator; + +import java.util.Arrays; +import java.util.EnumSet; +import java.util.function.Supplier; + +public final class Fixture implements Supplier { + private final String name; + private final Supplier factory; + private final EnumSet properties; + + public Fixture(String name, Supplier factory, Properties... props) { + this.name = name; + this.factory = factory; + properties = EnumSet.copyOf(Arrays.asList(props)); + } + + public BufferAllocator createAllocator() { + return factory.get(); + } + + @Override + public BufferAllocator get() { + return factory.get(); + } + + @Override + public String toString() { + return name; + } + + public Properties[] getProperties() { + return properties.toArray(Properties[]::new); + } + + public boolean isHeap() { + return properties.contains(Properties.HEAP); + } + + public boolean isDirect() { + return properties.contains(Properties.DIRECT); + } + + public boolean isComposite() { + return properties.contains(Properties.COMPOSITE); + } + + public boolean isPooled() { + return properties.contains(Properties.POOLED); + } + + public enum Properties { + HEAP, + DIRECT, + COMPOSITE, + POOLED + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/Memoize.java b/buffer/src/test/java/io/netty/buffer/api/tests/Memoize.java new file mode 100644 index 00000000000..fbfda6b6ec8 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/Memoize.java @@ -0,0 +1,36 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests; + +import java.util.function.Supplier; + +final class Memoize implements Supplier { + private final Supplier supplier; + private volatile T memo; + + Memoize(Supplier supplier) { + this.supplier = supplier; + } + + @Override + public T get() { + T val = memo; + if (val == null) { + memo = val = supplier.get(); + } + return val; + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/AbstractByteBufTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/AbstractByteBufTest.java new file mode 100644 index 00000000000..f45da25b651 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/AbstractByteBufTest.java @@ -0,0 +1,4976 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests.adaptor; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.util.ByteProcessor; +import io.netty.util.CharsetUtil; +import io.netty.util.IllegalReferenceCountException; +import io.netty.util.internal.PlatformDependent; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.nio.ByteBuffer; +import java.nio.ByteOrder; +import java.nio.CharBuffer; +import java.nio.ReadOnlyBufferException; +import java.nio.channels.Channels; +import java.nio.channels.FileChannel; +import java.nio.channels.GatheringByteChannel; +import java.nio.channels.ScatteringByteChannel; +import java.nio.channels.WritableByteChannel; +import java.nio.charset.Charset; +import java.time.Duration; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Random; +import java.util.Set; +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.CyclicBarrier; +import java.util.concurrent.ThreadLocalRandom; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicReference; + +import static io.netty.buffer.Unpooled.LITTLE_ENDIAN; +import static io.netty.buffer.Unpooled.buffer; +import static io.netty.buffer.Unpooled.copiedBuffer; +import static io.netty.buffer.Unpooled.directBuffer; +import static io.netty.buffer.Unpooled.unreleasableBuffer; +import static io.netty.buffer.Unpooled.wrappedBuffer; +import static io.netty.util.internal.EmptyArrays.EMPTY_BYTES; +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTimeoutPreemptively; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; +import static org.junit.jupiter.api.Assumptions.assumeFalse; +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +/** + * An abstract test class for channel buffers + */ +public abstract class AbstractByteBufTest { + + private static final int CAPACITY = 4096; // Must be even + private static final int BLOCK_SIZE = 128; + private static final int JAVA_BYTEBUFFER_CONSISTENCY_ITERATIONS = 100; + + private long seed; + private Random random; + private ByteBuf buffer; + + protected final ByteBuf newBuffer(int capacity) { + return newBuffer(capacity, Integer.MAX_VALUE); + } + + protected abstract ByteBuf newBuffer(int capacity, int maxCapacity); + + protected boolean discardReadBytesDoesNotMoveWritableBytes() { + return true; + } + + @BeforeEach + public void init() { + buffer = newBuffer(CAPACITY); + seed = System.currentTimeMillis(); + random = new Random(seed); + } + + @AfterEach + public void dispose() { + if (buffer != null) { + assertTrue(buffer.release()); + assertEquals(0, buffer.refCnt()); + + try { + buffer.release(); + } catch (Exception e) { + // Ignore. + } + buffer = null; + } + } + + @Test + public void comparableInterfaceNotViolated() { + assumeFalse(buffer.isReadOnly()); + buffer.writerIndex(buffer.readerIndex()); + assumeTrue(buffer.writableBytes() >= 4); + + buffer.writeLong(0); + ByteBuf buffer2 = newBuffer(CAPACITY); + assumeFalse(buffer2.isReadOnly()); + buffer2.writerIndex(buffer2.readerIndex()); + // Write an unsigned integer that will cause buffer.getUnsignedInt() - buffer2.getUnsignedInt() to underflow the + // int type and wrap around on the negative side. + buffer2.writeLong(0xF0000000L); + assertTrue(buffer.compareTo(buffer2) < 0); + assertTrue(buffer2.compareTo(buffer) > 0); + buffer2.release(); + } + + @Test + public void initialState() { + assertEquals(CAPACITY, buffer.capacity()); + assertEquals(0, buffer.readerIndex()); + } + + @Test + public void readerIndexBoundaryCheck1() { + buffer.writerIndex(0); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.readerIndex(-1)); + } + + @Test + public void readerIndexBoundaryCheck2() { + buffer.writerIndex(buffer.capacity()); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.readerIndex(buffer.capacity() + 1)); + } + + @Test + public void readerIndexBoundaryCheck3() { + buffer.writerIndex(CAPACITY / 2); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.readerIndex(CAPACITY * 3 / 2)); + } + + @Test + public void readerIndexBoundaryCheck4() { + buffer.writerIndex(0); + buffer.readerIndex(0); + buffer.writerIndex(buffer.capacity()); + buffer.readerIndex(buffer.capacity()); + } + + @Test + public void writerIndexBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.writerIndex(-1)); + } + + @Test + public void writerIndexBoundaryCheck2() { + buffer.writerIndex(CAPACITY); + buffer.readerIndex(CAPACITY); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.writerIndex(buffer.capacity() + 1)); + } + + @Test + public void writerIndexBoundaryCheck3() { + buffer.writerIndex(CAPACITY); + buffer.readerIndex(CAPACITY / 2); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.writerIndex(CAPACITY / 4)); + } + + @Test + public void writerIndexBoundaryCheck4() { + buffer.writerIndex(0); + buffer.readerIndex(0); + buffer.writerIndex(CAPACITY); + + buffer.writeBytes(ByteBuffer.wrap(EMPTY_BYTES)); + } + + @Test + public void getBooleanBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBoolean(-1)); + } + + @Test + public void getBooleanBoundaryCheck2() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBoolean(buffer.capacity())); + } + + @Test + public void getByteBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getByte(-1)); + } + + @Test + public void getByteBoundaryCheck2() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getByte(buffer.capacity())); + } + + @Test + public void getShortBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getShort(-1)); + } + + @Test + public void getShortBoundaryCheck2() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getShort(buffer.capacity() - 1)); + } + + @Test + public void getMediumBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getMedium(-1)); + } + + @Test + public void getMediumBoundaryCheck2() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getMedium(buffer.capacity() - 2)); + } + + @Test + public void getIntBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getInt(-1)); + } + + @Test + public void getIntBoundaryCheck2() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getInt(buffer.capacity() - 3)); + } + + @Test + public void getLongBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getLong(-1)); + } + + @Test + public void getLongBoundaryCheck2() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getLong(buffer.capacity() - 7)); + } + + @Test + public void getByteArrayBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(-1, EMPTY_BYTES)); + } + + @Test + public void getByteArrayBoundaryCheck2() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(-1, EMPTY_BYTES, 0, 0)); + } + + @Test + public void getByteArrayBoundaryCheck3() { + byte[] dst = new byte[4]; + buffer.setInt(0, 0x01020304); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(0, dst, -1, 4)); + + // No partial copy is expected. + assertEquals(0, dst[0]); + assertEquals(0, dst[1]); + assertEquals(0, dst[2]); + assertEquals(0, dst[3]); + } + + @Test + public void getByteArrayBoundaryCheck4() { + byte[] dst = new byte[4]; + buffer.setInt(0, 0x01020304); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(0, dst, 1, 4)); + + // No partial copy is expected. + assertEquals(0, dst[0]); + assertEquals(0, dst[1]); + assertEquals(0, dst[2]); + assertEquals(0, dst[3]); + } + + @Test + public void getByteBufferBoundaryCheck() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(-1, ByteBuffer.allocate(0))); + } + + @Test + public void copyBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.copy(-1, 0)); + } + + @Test + public void copyBoundaryCheck2() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.copy(0, buffer.capacity() + 1)); + } + + @Test + public void copyBoundaryCheck3() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.copy(buffer.capacity() + 1, 0)); + } + + @Test + public void copyBoundaryCheck4() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.copy(buffer.capacity(), 1)); + } + + @Test + public void setIndexBoundaryCheck1() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.setIndex(-1, CAPACITY)); + } + + @Test + public void setIndexBoundaryCheck2() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.setIndex(CAPACITY / 2, CAPACITY / 4)); + } + + @Test + public void setIndexBoundaryCheck3() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.setIndex(0, CAPACITY + 1)); + } + + @Test + public void getByteBufferState() { + ByteBuffer dst = ByteBuffer.allocate(4); + dst.position(1); + dst.limit(3); + + buffer.setByte(0, (byte) 1); + buffer.setByte(1, (byte) 2); + buffer.setByte(2, (byte) 3); + buffer.setByte(3, (byte) 4); + buffer.getBytes(1, dst); + + assertEquals(3, dst.position()); + assertEquals(3, dst.limit()); + + dst.clear(); + assertEquals(0, dst.get(0)); + assertEquals(2, dst.get(1)); + assertEquals(3, dst.get(2)); + assertEquals(0, dst.get(3)); + } + + @Test + public void getDirectByteBufferBoundaryCheck() { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.getBytes(-1, ByteBuffer.allocateDirect(0))); + } + + @Test + public void getDirectByteBufferState() { + ByteBuffer dst = ByteBuffer.allocateDirect(4); + dst.position(1); + dst.limit(3); + + buffer.setByte(0, (byte) 1); + buffer.setByte(1, (byte) 2); + buffer.setByte(2, (byte) 3); + buffer.setByte(3, (byte) 4); + buffer.getBytes(1, dst); + + assertEquals(3, dst.position()); + assertEquals(3, dst.limit()); + + dst.clear(); + assertEquals(0, dst.get(0)); + assertEquals(2, dst.get(1)); + assertEquals(3, dst.get(2)); + assertEquals(0, dst.get(3)); + } + + @Test + public void testRandomByteAccess() { + for (int i = 0; i < buffer.capacity(); i ++) { + byte value = (byte) random.nextInt(); + buffer.setByte(i, value); + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity(); i ++) { + byte value = (byte) random.nextInt(); + assertEquals(value, buffer.getByte(i)); + } + } + + @Test + public void testRandomUnsignedByteAccess() { + for (int i = 0; i < buffer.capacity(); i ++) { + byte value = (byte) random.nextInt(); + buffer.setByte(i, value); + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity(); i ++) { + int value = random.nextInt() & 0xFF; + assertEquals(value, buffer.getUnsignedByte(i)); + } + } + + @Test + public void testRandomShortAccess() { + testRandomShortAccess(true); + } + @Test + public void testRandomShortLEAccess() { + testRandomShortAccess(false); + } + + private void testRandomShortAccess(boolean testBigEndian) { + for (int i = 0; i < buffer.capacity() - 1; i += 2) { + short value = (short) random.nextInt(); + if (testBigEndian) { + buffer.setShort(i, value); + } else { + buffer.setShortLE(i, value); + } + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() - 1; i += 2) { + short value = (short) random.nextInt(); + if (testBigEndian) { + assertEquals(value, buffer.getShort(i)); + } else { + assertEquals(value, buffer.getShortLE(i)); + } + } + } + + @Test + public void testShortConsistentWithByteBuffer() { + testShortConsistentWithByteBuffer(true, true); + testShortConsistentWithByteBuffer(true, false); + testShortConsistentWithByteBuffer(false, true); + testShortConsistentWithByteBuffer(false, false); + } + + private void testShortConsistentWithByteBuffer(boolean direct, boolean testBigEndian) { + for (int i = 0; i < JAVA_BYTEBUFFER_CONSISTENCY_ITERATIONS; ++i) { + ByteBuffer javaBuffer = direct ? ByteBuffer.allocateDirect(buffer.capacity()) + : ByteBuffer.allocate(buffer.capacity()); + if (!testBigEndian) { + javaBuffer = javaBuffer.order(ByteOrder.LITTLE_ENDIAN); + } + + short expected = (short) (random.nextInt() & 0xFFFF); + javaBuffer.putShort(expected); + + final int bufferIndex = buffer.capacity() - 2; + if (testBigEndian) { + buffer.setShort(bufferIndex, expected); + } else { + buffer.setShortLE(bufferIndex, expected); + } + javaBuffer.flip(); + + short javaActual = javaBuffer.getShort(); + assertEquals(expected, javaActual); + assertEquals(javaActual, testBigEndian ? buffer.getShort(bufferIndex) + : buffer.getShortLE(bufferIndex)); + } + } + + @Test + public void testRandomUnsignedShortAccess() { + testRandomUnsignedShortAccess(true); + } + + @Test + public void testRandomUnsignedShortLEAccess() { + testRandomUnsignedShortAccess(false); + } + + private void testRandomUnsignedShortAccess(boolean testBigEndian) { + for (int i = 0; i < buffer.capacity() - 1; i += 2) { + short value = (short) random.nextInt(); + if (testBigEndian) { + buffer.setShort(i, value); + } else { + buffer.setShortLE(i, value); + } + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() - 1; i += 2) { + int value = random.nextInt() & 0xFFFF; + if (testBigEndian) { + assertEquals(value, buffer.getUnsignedShort(i)); + } else { + assertEquals(value, buffer.getUnsignedShortLE(i)); + } + } + } + + @Test + public void testRandomMediumAccess() { + testRandomMediumAccess(true); + } + + @Test + public void testRandomMediumLEAccess() { + testRandomMediumAccess(false); + } + + private void testRandomMediumAccess(boolean testBigEndian) { + for (int i = 0; i < buffer.capacity() - 2; i += 3) { + int value = random.nextInt(); + if (testBigEndian) { + buffer.setMedium(i, value); + } else { + buffer.setMediumLE(i, value); + } + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() - 2; i += 3) { + int value = random.nextInt() << 8 >> 8; + if (testBigEndian) { + assertEquals(value, buffer.getMedium(i)); + } else { + assertEquals(value, buffer.getMediumLE(i)); + } + } + } + + @Test + public void testRandomUnsignedMediumAccess() { + testRandomUnsignedMediumAccess(true); + } + + @Test + public void testRandomUnsignedMediumLEAccess() { + testRandomUnsignedMediumAccess(false); + } + + private void testRandomUnsignedMediumAccess(boolean testBigEndian) { + for (int i = 0; i < buffer.capacity() - 2; i += 3) { + int value = random.nextInt(); + if (testBigEndian) { + buffer.setMedium(i, value); + } else { + buffer.setMediumLE(i, value); + } + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() - 2; i += 3) { + int value = random.nextInt() & 0x00FFFFFF; + if (testBigEndian) { + assertEquals(value, buffer.getUnsignedMedium(i)); + } else { + assertEquals(value, buffer.getUnsignedMediumLE(i)); + } + } + } + + @Test + public void testMediumConsistentWithByteBuffer() { + testMediumConsistentWithByteBuffer(true, true); + testMediumConsistentWithByteBuffer(true, false); + testMediumConsistentWithByteBuffer(false, true); + testMediumConsistentWithByteBuffer(false, false); + } + + private void testMediumConsistentWithByteBuffer(boolean direct, boolean testBigEndian) { + for (int i = 0; i < JAVA_BYTEBUFFER_CONSISTENCY_ITERATIONS; ++i) { + ByteBuffer javaBuffer = direct ? ByteBuffer.allocateDirect(buffer.capacity()) + : ByteBuffer.allocate(buffer.capacity()); + if (!testBigEndian) { + javaBuffer = javaBuffer.order(ByteOrder.LITTLE_ENDIAN); + } + + int expected = random.nextInt() & 0x00FFFFFF; + javaBuffer.putInt(expected); + + final int bufferIndex = buffer.capacity() - 3; + if (testBigEndian) { + buffer.setMedium(bufferIndex, expected); + } else { + buffer.setMediumLE(bufferIndex, expected); + } + javaBuffer.flip(); + + int javaActual = javaBuffer.getInt(); + assertEquals(expected, javaActual); + assertEquals(javaActual, testBigEndian ? buffer.getUnsignedMedium(bufferIndex) + : buffer.getUnsignedMediumLE(bufferIndex)); + } + } + + @Test + public void testRandomIntAccess() { + testRandomIntAccess(true); + } + + @Test + public void testRandomIntLEAccess() { + testRandomIntAccess(false); + } + + private void testRandomIntAccess(boolean testBigEndian) { + for (int i = 0; i < buffer.capacity() - 3; i += 4) { + int value = random.nextInt(); + if (testBigEndian) { + buffer.setInt(i, value); + } else { + buffer.setIntLE(i, value); + } + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() - 3; i += 4) { + int value = random.nextInt(); + if (testBigEndian) { + assertEquals(value, buffer.getInt(i)); + } else { + assertEquals(value, buffer.getIntLE(i)); + } + } + } + + @Test + public void testIntConsistentWithByteBuffer() { + testIntConsistentWithByteBuffer(true, true); + testIntConsistentWithByteBuffer(true, false); + testIntConsistentWithByteBuffer(false, true); + testIntConsistentWithByteBuffer(false, false); + } + + private void testIntConsistentWithByteBuffer(boolean direct, boolean testBigEndian) { + for (int i = 0; i < JAVA_BYTEBUFFER_CONSISTENCY_ITERATIONS; ++i) { + ByteBuffer javaBuffer = direct ? ByteBuffer.allocateDirect(buffer.capacity()) + : ByteBuffer.allocate(buffer.capacity()); + if (!testBigEndian) { + javaBuffer = javaBuffer.order(ByteOrder.LITTLE_ENDIAN); + } + + int expected = random.nextInt(); + javaBuffer.putInt(expected); + + final int bufferIndex = buffer.capacity() - 4; + if (testBigEndian) { + buffer.setInt(bufferIndex, expected); + } else { + buffer.setIntLE(bufferIndex, expected); + } + javaBuffer.flip(); + + int javaActual = javaBuffer.getInt(); + assertEquals(expected, javaActual); + assertEquals(javaActual, testBigEndian ? buffer.getInt(bufferIndex) + : buffer.getIntLE(bufferIndex)); + } + } + + @Test + public void testRandomUnsignedIntAccess() { + testRandomUnsignedIntAccess(true); + } + + @Test + public void testRandomUnsignedIntLEAccess() { + testRandomUnsignedIntAccess(false); + } + + private void testRandomUnsignedIntAccess(boolean testBigEndian) { + for (int i = 0; i < buffer.capacity() - 3; i += 4) { + int value = random.nextInt(); + if (testBigEndian) { + buffer.setInt(i, value); + } else { + buffer.setIntLE(i, value); + } + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() - 3; i += 4) { + long value = random.nextInt() & 0xFFFFFFFFL; + if (testBigEndian) { + assertEquals(value, buffer.getUnsignedInt(i)); + } else { + assertEquals(value, buffer.getUnsignedIntLE(i)); + } + } + } + + @Test + public void testRandomLongAccess() { + testRandomLongAccess(true); + } + + @Test + public void testRandomLongLEAccess() { + testRandomLongAccess(false); + } + + private void testRandomLongAccess(boolean testBigEndian) { + for (int i = 0; i < buffer.capacity() - 7; i += 8) { + long value = random.nextLong(); + if (testBigEndian) { + buffer.setLong(i, value); + } else { + buffer.setLongLE(i, value); + } + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() - 7; i += 8) { + long value = random.nextLong(); + if (testBigEndian) { + assertEquals(value, buffer.getLong(i)); + } else { + assertEquals(value, buffer.getLongLE(i)); + } + } + } + + @Test + public void testLongConsistentWithByteBuffer() { + testLongConsistentWithByteBuffer(true, true); + testLongConsistentWithByteBuffer(true, false); + testLongConsistentWithByteBuffer(false, true); + testLongConsistentWithByteBuffer(false, false); + } + + private void testLongConsistentWithByteBuffer(boolean direct, boolean testBigEndian) { + for (int i = 0; i < JAVA_BYTEBUFFER_CONSISTENCY_ITERATIONS; ++i) { + ByteBuffer javaBuffer = direct ? ByteBuffer.allocateDirect(buffer.capacity()) + : ByteBuffer.allocate(buffer.capacity()); + if (!testBigEndian) { + javaBuffer = javaBuffer.order(ByteOrder.LITTLE_ENDIAN); + } + + long expected = random.nextLong(); + javaBuffer.putLong(expected); + + final int bufferIndex = buffer.capacity() - 8; + if (testBigEndian) { + buffer.setLong(bufferIndex, expected); + } else { + buffer.setLongLE(bufferIndex, expected); + } + javaBuffer.flip(); + + long javaActual = javaBuffer.getLong(); + assertEquals(expected, javaActual); + assertEquals(javaActual, testBigEndian ? buffer.getLong(bufferIndex) + : buffer.getLongLE(bufferIndex)); + } + } + + @Test + public void testRandomFloatAccess() { + testRandomFloatAccess(true); + } + + @Test + public void testRandomFloatLEAccess() { + testRandomFloatAccess(false); + } + + private void testRandomFloatAccess(boolean testBigEndian) { + for (int i = 0; i < buffer.capacity() - 7; i += 8) { + float value = random.nextFloat(); + if (testBigEndian) { + buffer.setFloat(i, value); + } else { + buffer.setFloatLE(i, value); + } + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() - 7; i += 8) { + float expected = random.nextFloat(); + float actual = testBigEndian? buffer.getFloat(i) : buffer.getFloatLE(i); + assertEquals(expected, actual, 0.01); + } + } + + @Test + public void testRandomDoubleAccess() { + testRandomDoubleAccess(true); + } + + @Test + public void testRandomDoubleLEAccess() { + testRandomDoubleAccess(false); + } + + private void testRandomDoubleAccess(boolean testBigEndian) { + for (int i = 0; i < buffer.capacity() - 7; i += 8) { + double value = random.nextDouble(); + if (testBigEndian) { + buffer.setDouble(i, value); + } else { + buffer.setDoubleLE(i, value); + } + } + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() - 7; i += 8) { + double expected = random.nextDouble(); + double actual = testBigEndian? buffer.getDouble(i) : buffer.getDoubleLE(i); + assertEquals(expected, actual, 0.01); + } + } + + @Test + public void testSetZero() { + buffer.clear(); + while (buffer.isWritable()) { + buffer.writeByte((byte) 0xFF); + } + + for (int i = 0; i < buffer.capacity();) { + int length = Math.min(buffer.capacity() - i, random.nextInt(32)); + buffer.setZero(i, length); + i += length; + } + + for (int i = 0; i < buffer.capacity(); i ++) { + assertEquals(0, buffer.getByte(i)); + } + } + + @Test + public void testSequentialByteAccess() { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity(); i ++) { + byte value = (byte) random.nextInt(); + assertEquals(i, buffer.writerIndex()); + assertTrue(buffer.isWritable()); + buffer.writeByte(value); + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isWritable()); + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity(); i ++) { + byte value = (byte) random.nextInt(); + assertEquals(i, buffer.readerIndex()); + assertTrue(buffer.isReadable()); + assertEquals(value, buffer.readByte()); + } + + assertEquals(buffer.capacity(), buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isReadable()); + assertFalse(buffer.isWritable()); + } + + @Test + public void testSequentialUnsignedByteAccess() { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity(); i ++) { + byte value = (byte) random.nextInt(); + assertEquals(i, buffer.writerIndex()); + assertTrue(buffer.isWritable()); + buffer.writeByte(value); + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isWritable()); + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity(); i ++) { + int value = random.nextInt() & 0xFF; + assertEquals(i, buffer.readerIndex()); + assertTrue(buffer.isReadable()); + assertEquals(value, buffer.readUnsignedByte()); + } + + assertEquals(buffer.capacity(), buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isReadable()); + assertFalse(buffer.isWritable()); + } + + @Test + public void testSequentialShortAccess() { + testSequentialShortAccess(true); + } + + @Test + public void testSequentialShortLEAccess() { + testSequentialShortAccess(false); + } + + private void testSequentialShortAccess(boolean testBigEndian) { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity(); i += 2) { + short value = (short) random.nextInt(); + assertEquals(i, buffer.writerIndex()); + assertTrue(buffer.isWritable()); + if (testBigEndian) { + buffer.writeShort(value); + } else { + buffer.writeShortLE(value); + } + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isWritable()); + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity(); i += 2) { + short value = (short) random.nextInt(); + assertEquals(i, buffer.readerIndex()); + assertTrue(buffer.isReadable()); + if (testBigEndian) { + assertEquals(value, buffer.readShort()); + } else { + assertEquals(value, buffer.readShortLE()); + } + } + + assertEquals(buffer.capacity(), buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isReadable()); + assertFalse(buffer.isWritable()); + } + + @Test + public void testSequentialUnsignedShortAccess() { + testSequentialUnsignedShortAccess(true); + } + + @Test + public void testSequentialUnsignedShortLEAccess() { + testSequentialUnsignedShortAccess(true); + } + + private void testSequentialUnsignedShortAccess(boolean testBigEndian) { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity(); i += 2) { + short value = (short) random.nextInt(); + assertEquals(i, buffer.writerIndex()); + assertTrue(buffer.isWritable()); + if (testBigEndian) { + buffer.writeShort(value); + } else { + buffer.writeShortLE(value); + } + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isWritable()); + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity(); i += 2) { + int value = random.nextInt() & 0xFFFF; + assertEquals(i, buffer.readerIndex()); + assertTrue(buffer.isReadable()); + if (testBigEndian) { + assertEquals(value, buffer.readUnsignedShort()); + } else { + assertEquals(value, buffer.readUnsignedShortLE()); + } + } + + assertEquals(buffer.capacity(), buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isReadable()); + assertFalse(buffer.isWritable()); + } + + @Test + public void testSequentialMediumAccess() { + testSequentialMediumAccess(true); + } + @Test + public void testSequentialMediumLEAccess() { + testSequentialMediumAccess(false); + } + + private void testSequentialMediumAccess(boolean testBigEndian) { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() / 3 * 3; i += 3) { + int value = random.nextInt(); + assertEquals(i, buffer.writerIndex()); + assertTrue(buffer.isWritable()); + if (testBigEndian) { + buffer.writeMedium(value); + } else { + buffer.writeMediumLE(value); + } + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity() / 3 * 3, buffer.writerIndex()); + assertEquals(buffer.capacity() % 3, buffer.writableBytes()); + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() / 3 * 3; i += 3) { + int value = random.nextInt() << 8 >> 8; + assertEquals(i, buffer.readerIndex()); + assertTrue(buffer.isReadable()); + if (testBigEndian) { + assertEquals(value, buffer.readMedium()); + } else { + assertEquals(value, buffer.readMediumLE()); + } + } + + assertEquals(buffer.capacity() / 3 * 3, buffer.readerIndex()); + assertEquals(buffer.capacity() / 3 * 3, buffer.writerIndex()); + assertEquals(0, buffer.readableBytes()); + assertEquals(buffer.capacity() % 3, buffer.writableBytes()); + } + + @Test + public void testSequentialUnsignedMediumAccess() { + testSequentialUnsignedMediumAccess(true); + } + + @Test + public void testSequentialUnsignedMediumLEAccess() { + testSequentialUnsignedMediumAccess(false); + } + + private void testSequentialUnsignedMediumAccess(boolean testBigEndian) { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() / 3 * 3; i += 3) { + int value = random.nextInt() & 0x00FFFFFF; + assertEquals(i, buffer.writerIndex()); + assertTrue(buffer.isWritable()); + if (testBigEndian) { + buffer.writeMedium(value); + } else { + buffer.writeMediumLE(value); + } + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity() / 3 * 3, buffer.writerIndex()); + assertEquals(buffer.capacity() % 3, buffer.writableBytes()); + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity() / 3 * 3; i += 3) { + int value = random.nextInt() & 0x00FFFFFF; + assertEquals(i, buffer.readerIndex()); + assertTrue(buffer.isReadable()); + if (testBigEndian) { + assertEquals(value, buffer.readUnsignedMedium()); + } else { + assertEquals(value, buffer.readUnsignedMediumLE()); + } + } + + assertEquals(buffer.capacity() / 3 * 3, buffer.readerIndex()); + assertEquals(buffer.capacity() / 3 * 3, buffer.writerIndex()); + assertEquals(0, buffer.readableBytes()); + assertEquals(buffer.capacity() % 3, buffer.writableBytes()); + } + + @Test + public void testSequentialIntAccess() { + testSequentialIntAccess(true); + } + + @Test + public void testSequentialIntLEAccess() { + testSequentialIntAccess(false); + } + + private void testSequentialIntAccess(boolean testBigEndian) { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity(); i += 4) { + int value = random.nextInt(); + assertEquals(i, buffer.writerIndex()); + assertTrue(buffer.isWritable()); + if (testBigEndian) { + buffer.writeInt(value); + } else { + buffer.writeIntLE(value); + } + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isWritable()); + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity(); i += 4) { + int value = random.nextInt(); + assertEquals(i, buffer.readerIndex()); + assertTrue(buffer.isReadable()); + if (testBigEndian) { + assertEquals(value, buffer.readInt()); + } else { + assertEquals(value, buffer.readIntLE()); + } + } + + assertEquals(buffer.capacity(), buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isReadable()); + assertFalse(buffer.isWritable()); + } + + @Test + public void testSequentialUnsignedIntAccess() { + testSequentialUnsignedIntAccess(true); + } + + @Test + public void testSequentialUnsignedIntLEAccess() { + testSequentialUnsignedIntAccess(false); + } + + private void testSequentialUnsignedIntAccess(boolean testBigEndian) { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity(); i += 4) { + int value = random.nextInt(); + assertEquals(i, buffer.writerIndex()); + assertTrue(buffer.isWritable()); + if (testBigEndian) { + buffer.writeInt(value); + } else { + buffer.writeIntLE(value); + } + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isWritable()); + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity(); i += 4) { + long value = random.nextInt() & 0xFFFFFFFFL; + assertEquals(i, buffer.readerIndex()); + assertTrue(buffer.isReadable()); + if (testBigEndian) { + assertEquals(value, buffer.readUnsignedInt()); + } else { + assertEquals(value, buffer.readUnsignedIntLE()); + } + } + + assertEquals(buffer.capacity(), buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isReadable()); + assertFalse(buffer.isWritable()); + } + + @Test + public void testSequentialLongAccess() { + testSequentialLongAccess(true); + } + + @Test + public void testSequentialLongLEAccess() { + testSequentialLongAccess(false); + } + + private void testSequentialLongAccess(boolean testBigEndian) { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity(); i += 8) { + long value = random.nextLong(); + assertEquals(i, buffer.writerIndex()); + assertTrue(buffer.isWritable()); + if (testBigEndian) { + buffer.writeLong(value); + } else { + buffer.writeLongLE(value); + } + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isWritable()); + + random.setSeed(seed); + for (int i = 0; i < buffer.capacity(); i += 8) { + long value = random.nextLong(); + assertEquals(i, buffer.readerIndex()); + assertTrue(buffer.isReadable()); + if (testBigEndian) { + assertEquals(value, buffer.readLong()); + } else { + assertEquals(value, buffer.readLongLE()); + } + } + + assertEquals(buffer.capacity(), buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + assertFalse(buffer.isReadable()); + assertFalse(buffer.isWritable()); + } + + @Test + public void testByteArrayTransfer() { + byte[] value = new byte[BLOCK_SIZE * 2]; + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(value); + buffer.setBytes(i, value, random.nextInt(BLOCK_SIZE), BLOCK_SIZE); + } + + random.setSeed(seed); + byte[] expectedValue = new byte[BLOCK_SIZE * 2]; + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValue); + int valueOffset = random.nextInt(BLOCK_SIZE); + buffer.getBytes(i, value, valueOffset, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue[j], value[j]); + } + } + } + + @Test + public void testRandomByteArrayTransfer1() { + byte[] value = new byte[BLOCK_SIZE]; + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(value); + buffer.setBytes(i, value); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + buffer.getBytes(i, value); + for (int j = 0; j < BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value[j]); + } + } + } + + @Test + public void testRandomByteArrayTransfer2() { + byte[] value = new byte[BLOCK_SIZE * 2]; + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(value); + buffer.setBytes(i, value, random.nextInt(BLOCK_SIZE), BLOCK_SIZE); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + int valueOffset = random.nextInt(BLOCK_SIZE); + buffer.getBytes(i, value, valueOffset, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value[j]); + } + } + } + + @Test + public void testRandomHeapBufferTransfer1() { + byte[] valueContent = new byte[BLOCK_SIZE]; + ByteBuf value = wrappedBuffer(valueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(valueContent); + value.setIndex(0, BLOCK_SIZE); + buffer.setBytes(i, value); + assertEquals(BLOCK_SIZE, value.readerIndex()); + assertEquals(BLOCK_SIZE, value.writerIndex()); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + value.clear(); + buffer.getBytes(i, value); + assertEquals(0, value.readerIndex()); + assertEquals(BLOCK_SIZE, value.writerIndex()); + for (int j = 0; j < BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value.getByte(j)); + } + } + } + + @Test + public void testRandomHeapBufferTransfer2() { + byte[] valueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf value = wrappedBuffer(valueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(valueContent); + buffer.setBytes(i, value, random.nextInt(BLOCK_SIZE), BLOCK_SIZE); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + int valueOffset = random.nextInt(BLOCK_SIZE); + buffer.getBytes(i, value, valueOffset, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value.getByte(j)); + } + } + } + + @Test + public void testRandomDirectBufferTransfer() { + byte[] tmp = new byte[BLOCK_SIZE * 2]; + ByteBuf value = directBuffer(BLOCK_SIZE * 2); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(tmp); + value.setBytes(0, tmp, 0, value.capacity()); + buffer.setBytes(i, value, random.nextInt(BLOCK_SIZE), BLOCK_SIZE); + } + + random.setSeed(seed); + ByteBuf expectedValue = directBuffer(BLOCK_SIZE * 2); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(tmp); + expectedValue.setBytes(0, tmp, 0, expectedValue.capacity()); + int valueOffset = random.nextInt(BLOCK_SIZE); + buffer.getBytes(i, value, valueOffset, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value.getByte(j)); + } + } + value.release(); + expectedValue.release(); + } + + @Test + public void testRandomByteBufferTransfer() { + ByteBuffer value = ByteBuffer.allocate(BLOCK_SIZE * 2); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(value.array()); + value.clear().position(random.nextInt(BLOCK_SIZE)); + value.limit(value.position() + BLOCK_SIZE); + buffer.setBytes(i, value); + } + + random.setSeed(seed); + ByteBuffer expectedValue = ByteBuffer.allocate(BLOCK_SIZE * 2); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValue.array()); + int valueOffset = random.nextInt(BLOCK_SIZE); + value.clear().position(valueOffset).limit(valueOffset + BLOCK_SIZE); + buffer.getBytes(i, value); + assertEquals(valueOffset + BLOCK_SIZE, value.position()); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.get(j), value.get(j)); + } + } + } + + @Test + public void testSequentialByteArrayTransfer1() { + byte[] value = new byte[BLOCK_SIZE]; + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(value); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + buffer.writeBytes(value); + } + + random.setSeed(seed); + byte[] expectedValue = new byte[BLOCK_SIZE]; + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValue); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + buffer.readBytes(value); + for (int j = 0; j < BLOCK_SIZE; j ++) { + assertEquals(expectedValue[j], value[j]); + } + } + } + + @Test + public void testSequentialByteArrayTransfer2() { + byte[] value = new byte[BLOCK_SIZE * 2]; + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(value); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + int readerIndex = random.nextInt(BLOCK_SIZE); + buffer.writeBytes(value, readerIndex, BLOCK_SIZE); + } + + random.setSeed(seed); + byte[] expectedValue = new byte[BLOCK_SIZE * 2]; + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValue); + int valueOffset = random.nextInt(BLOCK_SIZE); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + buffer.readBytes(value, valueOffset, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue[j], value[j]); + } + } + } + + @Test + public void testSequentialHeapBufferTransfer1() { + byte[] valueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf value = wrappedBuffer(valueContent); + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(valueContent); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + buffer.writeBytes(value, random.nextInt(BLOCK_SIZE), BLOCK_SIZE); + assertEquals(0, value.readerIndex()); + assertEquals(valueContent.length, value.writerIndex()); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + int valueOffset = random.nextInt(BLOCK_SIZE); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + buffer.readBytes(value, valueOffset, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value.getByte(j)); + } + assertEquals(0, value.readerIndex()); + assertEquals(valueContent.length, value.writerIndex()); + } + } + + @Test + public void testSequentialHeapBufferTransfer2() { + byte[] valueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf value = wrappedBuffer(valueContent); + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(valueContent); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + int readerIndex = random.nextInt(BLOCK_SIZE); + value.readerIndex(readerIndex); + value.writerIndex(readerIndex + BLOCK_SIZE); + buffer.writeBytes(value); + assertEquals(readerIndex + BLOCK_SIZE, value.writerIndex()); + assertEquals(value.writerIndex(), value.readerIndex()); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + int valueOffset = random.nextInt(BLOCK_SIZE); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + value.readerIndex(valueOffset); + value.writerIndex(valueOffset); + buffer.readBytes(value, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value.getByte(j)); + } + assertEquals(valueOffset, value.readerIndex()); + assertEquals(valueOffset + BLOCK_SIZE, value.writerIndex()); + } + } + + @Test + public void testSequentialDirectBufferTransfer1() { + byte[] valueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf value = directBuffer(BLOCK_SIZE * 2); + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(valueContent); + value.setBytes(0, valueContent); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + buffer.writeBytes(value, random.nextInt(BLOCK_SIZE), BLOCK_SIZE); + assertEquals(0, value.readerIndex()); + assertEquals(0, value.writerIndex()); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + int valueOffset = random.nextInt(BLOCK_SIZE); + value.setBytes(0, valueContent); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + buffer.readBytes(value, valueOffset, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value.getByte(j)); + } + assertEquals(0, value.readerIndex()); + assertEquals(0, value.writerIndex()); + } + value.release(); + expectedValue.release(); + } + + @Test + public void testSequentialDirectBufferTransfer2() { + byte[] valueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf value = directBuffer(BLOCK_SIZE * 2); + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(valueContent); + value.setBytes(0, valueContent); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + int readerIndex = random.nextInt(BLOCK_SIZE); + value.readerIndex(0); + value.writerIndex(readerIndex + BLOCK_SIZE); + value.readerIndex(readerIndex); + buffer.writeBytes(value); + assertEquals(readerIndex + BLOCK_SIZE, value.writerIndex()); + assertEquals(value.writerIndex(), value.readerIndex()); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + value.setBytes(0, valueContent); + int valueOffset = random.nextInt(BLOCK_SIZE); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + value.readerIndex(valueOffset); + value.writerIndex(valueOffset); + buffer.readBytes(value, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value.getByte(j)); + } + assertEquals(valueOffset, value.readerIndex()); + assertEquals(valueOffset + BLOCK_SIZE, value.writerIndex()); + } + value.release(); + expectedValue.release(); + } + + @Test + public void testSequentialByteBufferBackedHeapBufferTransfer1() { + byte[] valueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf value = wrappedBuffer(ByteBuffer.allocate(BLOCK_SIZE * 2)); + value.writerIndex(0); + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(valueContent); + value.setBytes(0, valueContent); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + buffer.writeBytes(value, random.nextInt(BLOCK_SIZE), BLOCK_SIZE); + assertEquals(0, value.readerIndex()); + assertEquals(0, value.writerIndex()); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + int valueOffset = random.nextInt(BLOCK_SIZE); + value.setBytes(0, valueContent); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + buffer.readBytes(value, valueOffset, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value.getByte(j)); + } + assertEquals(0, value.readerIndex()); + assertEquals(0, value.writerIndex()); + } + } + + @Test + public void testSequentialByteBufferBackedHeapBufferTransfer2() { + byte[] valueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf value = wrappedBuffer(ByteBuffer.allocate(BLOCK_SIZE * 2)); + value.writerIndex(0); + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(valueContent); + value.setBytes(0, valueContent); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + int readerIndex = random.nextInt(BLOCK_SIZE); + value.readerIndex(0); + value.writerIndex(readerIndex + BLOCK_SIZE); + value.readerIndex(readerIndex); + buffer.writeBytes(value); + assertEquals(readerIndex + BLOCK_SIZE, value.writerIndex()); + assertEquals(value.writerIndex(), value.readerIndex()); + } + + random.setSeed(seed); + byte[] expectedValueContent = new byte[BLOCK_SIZE * 2]; + ByteBuf expectedValue = wrappedBuffer(expectedValueContent); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValueContent); + value.setBytes(0, valueContent); + int valueOffset = random.nextInt(BLOCK_SIZE); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + value.readerIndex(valueOffset); + value.writerIndex(valueOffset); + buffer.readBytes(value, BLOCK_SIZE); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.getByte(j), value.getByte(j)); + } + assertEquals(valueOffset, value.readerIndex()); + assertEquals(valueOffset + BLOCK_SIZE, value.writerIndex()); + } + } + + @Test + public void testSequentialByteBufferTransfer() { + buffer.writerIndex(0); + ByteBuffer value = ByteBuffer.allocate(BLOCK_SIZE * 2); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(value.array()); + value.clear().position(random.nextInt(BLOCK_SIZE)); + value.limit(value.position() + BLOCK_SIZE); + buffer.writeBytes(value); + } + + random.setSeed(seed); + ByteBuffer expectedValue = ByteBuffer.allocate(BLOCK_SIZE * 2); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValue.array()); + int valueOffset = random.nextInt(BLOCK_SIZE); + value.clear().position(valueOffset).limit(valueOffset + BLOCK_SIZE); + buffer.readBytes(value); + assertEquals(valueOffset + BLOCK_SIZE, value.position()); + for (int j = valueOffset; j < valueOffset + BLOCK_SIZE; j ++) { + assertEquals(expectedValue.get(j), value.get(j)); + } + } + } + + @Test + public void testSequentialCopiedBufferTransfer1() { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + byte[] value = new byte[BLOCK_SIZE]; + random.nextBytes(value); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + buffer.writeBytes(value); + } + + random.setSeed(seed); + byte[] expectedValue = new byte[BLOCK_SIZE]; + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValue); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + ByteBuf actualValue = buffer.readBytes(BLOCK_SIZE); + assertEquals(wrappedBuffer(expectedValue), actualValue); + + // Make sure if it is a copied buffer. + actualValue.setByte(0, (byte) (actualValue.getByte(0) + 1)); + assertFalse(buffer.getByte(i) == actualValue.getByte(0)); + actualValue.release(); + } + } + + @Test + public void testSequentialSlice1() { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + byte[] value = new byte[BLOCK_SIZE]; + random.nextBytes(value); + assertEquals(0, buffer.readerIndex()); + assertEquals(i, buffer.writerIndex()); + buffer.writeBytes(value); + } + + random.setSeed(seed); + byte[] expectedValue = new byte[BLOCK_SIZE]; + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + random.nextBytes(expectedValue); + assertEquals(i, buffer.readerIndex()); + assertEquals(CAPACITY, buffer.writerIndex()); + ByteBuf actualValue = buffer.readSlice(BLOCK_SIZE); + assertEquals(buffer.order(), actualValue.order()); + assertEquals(wrappedBuffer(expectedValue), actualValue); + + // Make sure if it is a sliced buffer. + actualValue.setByte(0, (byte) (actualValue.getByte(0) + 1)); + assertEquals(buffer.getByte(i), actualValue.getByte(0)); + } + } + + @Test + public void testWriteZero() { + assertThrows(IllegalArgumentException.class, () -> buffer.writeZero(-1)); + + buffer.clear(); + while (buffer.isWritable()) { + buffer.writeByte((byte) 0xFF); + } + + buffer.clear(); + for (int i = 0; i < buffer.capacity();) { + int length = Math.min(buffer.capacity() - i, random.nextInt(32)); + buffer.writeZero(length); + i += length; + } + + assertEquals(0, buffer.readerIndex()); + assertEquals(buffer.capacity(), buffer.writerIndex()); + + for (int i = 0; i < buffer.capacity(); i ++) { + assertEquals(0, buffer.getByte(i)); + } + } + + @Test + public void testDiscardReadBytes() { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity(); i += 4) { + buffer.writeInt(i); + } + ByteBuf copy = copiedBuffer(buffer); + + // Make sure there's no effect if called when readerIndex is 0. + buffer.readerIndex(CAPACITY / 4); + int readerIndex = buffer.readerIndex(); + buffer.writerIndex(CAPACITY / 3); + int writerIndex = buffer.writerIndex(); + buffer.readerIndex(0); + buffer.writerIndex(CAPACITY / 2); + buffer.discardReadBytes(); + + assertEquals(0, buffer.readerIndex()); + assertEquals(CAPACITY / 2, buffer.writerIndex()); + assertEquals(copy.slice(0, CAPACITY / 2), buffer.slice(0, CAPACITY / 2)); + buffer.readerIndex(readerIndex); + assertEquals(CAPACITY / 4, buffer.readerIndex()); + buffer.writerIndex(writerIndex); + assertEquals(CAPACITY / 3, buffer.writerIndex()); + + // Make sure bytes after writerIndex is not copied. + buffer.readerIndex(1); + buffer.writerIndex(CAPACITY / 2); + buffer.discardReadBytes(); + + assertEquals(0, buffer.readerIndex()); + assertEquals(CAPACITY / 2 - 1, buffer.writerIndex()); + assertEquals(copy.slice(1, CAPACITY / 2 - 1), buffer.slice(0, CAPACITY / 2 - 1)); + + if (discardReadBytesDoesNotMoveWritableBytes()) { + // If writable bytes were copied, the test should fail to avoid unnecessary memory bandwidth consumption. + assertFalse(copy.slice(CAPACITY / 2, CAPACITY / 2).equals(buffer.slice(CAPACITY / 2 - 1, CAPACITY / 2))); + } else { + assertEquals(copy.slice(CAPACITY / 2, CAPACITY / 2), buffer.slice(CAPACITY / 2 - 1, CAPACITY / 2)); + } + + copy.release(); + } + + /** + * The similar test case with {@link #testDiscardReadBytes()} but this one + * discards a large chunk at once. + */ + @Test + public void testDiscardReadBytes2() { + buffer.writerIndex(0); + for (int i = 0; i < buffer.capacity(); i ++) { + buffer.writeByte((byte) i); + } + ByteBuf copy = copiedBuffer(buffer); + + // Discard the first (CAPACITY / 2 - 1) bytes. + buffer.setIndex(CAPACITY / 2 - 1, CAPACITY - 1); + buffer.discardReadBytes(); + assertEquals(0, buffer.readerIndex()); + assertEquals(CAPACITY / 2, buffer.writerIndex()); + for (int i = 0; i < CAPACITY / 2; i ++) { + assertEquals(copy.slice(CAPACITY / 2 - 1 + i, CAPACITY / 2 - i), buffer.slice(i, CAPACITY / 2 - i)); + } + copy.release(); + } + + @Test + public void testStreamTransfer1() throws Exception { + byte[] expected = new byte[buffer.capacity()]; + random.nextBytes(expected); + + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + ByteArrayInputStream in = new ByteArrayInputStream(expected, i, BLOCK_SIZE); + assertEquals(BLOCK_SIZE, buffer.setBytes(i, in, BLOCK_SIZE)); + assertEquals(-1, buffer.setBytes(i, in, 0)); + } + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + buffer.getBytes(i, out, BLOCK_SIZE); + } + + assertTrue(Arrays.equals(expected, out.toByteArray())); + } + + @Test + public void testStreamTransfer2() throws Exception { + byte[] expected = new byte[buffer.capacity()]; + random.nextBytes(expected); + buffer.clear(); + + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + ByteArrayInputStream in = new ByteArrayInputStream(expected, i, BLOCK_SIZE); + assertEquals(i, buffer.writerIndex()); + buffer.writeBytes(in, BLOCK_SIZE); + assertEquals(i + BLOCK_SIZE, buffer.writerIndex()); + } + + ByteArrayOutputStream out = new ByteArrayOutputStream(); + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + assertEquals(i, buffer.readerIndex()); + buffer.readBytes(out, BLOCK_SIZE); + assertEquals(i + BLOCK_SIZE, buffer.readerIndex()); + } + + assertTrue(Arrays.equals(expected, out.toByteArray())); + } + + @Test + public void testCopy() { + for (int i = 0; i < buffer.capacity(); i ++) { + byte value = (byte) random.nextInt(); + buffer.setByte(i, value); + } + + final int readerIndex = CAPACITY / 3; + final int writerIndex = CAPACITY * 2 / 3; + buffer.setIndex(readerIndex, writerIndex); + + // Make sure all properties are copied. + ByteBuf copy = buffer.copy(); + assertEquals(0, copy.readerIndex()); + assertEquals(buffer.readableBytes(), copy.writerIndex()); + assertEquals(buffer.readableBytes(), copy.capacity()); + assertSame(buffer.order(), copy.order()); + for (int i = 0; i < copy.capacity(); i ++) { + assertEquals(buffer.getByte(i + readerIndex), copy.getByte(i)); + } + + // Make sure the buffer content is independent from each other. + buffer.setByte(readerIndex, (byte) (buffer.getByte(readerIndex) + 1)); + assertTrue(buffer.getByte(readerIndex) != copy.getByte(0)); + copy.setByte(1, (byte) (copy.getByte(1) + 1)); + assertTrue(buffer.getByte(readerIndex + 1) != copy.getByte(1)); + copy.release(); + } + + @Test + public void testDuplicate() { + for (int i = 0; i < buffer.capacity(); i ++) { + byte value = (byte) random.nextInt(); + buffer.setByte(i, value); + } + + final int readerIndex = CAPACITY / 3; + final int writerIndex = CAPACITY * 2 / 3; + buffer.setIndex(readerIndex, writerIndex); + + // Make sure all properties are copied. + ByteBuf duplicate = buffer.duplicate(); + assertSame(buffer.order(), duplicate.order()); + assertEquals(buffer.readableBytes(), duplicate.readableBytes()); + assertEquals(0, buffer.compareTo(duplicate)); + + // Make sure the buffer content is shared. + buffer.setByte(readerIndex, (byte) (buffer.getByte(readerIndex) + 1)); + assertEquals(buffer.getByte(readerIndex), duplicate.getByte(duplicate.readerIndex())); + duplicate.setByte(duplicate.readerIndex(), (byte) (duplicate.getByte(duplicate.readerIndex()) + 1)); + assertEquals(buffer.getByte(readerIndex), duplicate.getByte(duplicate.readerIndex())); + } + + @Test + public void testSliceEndianness() throws Exception { + assertEquals(buffer.order(), buffer.slice(0, buffer.capacity()).order()); + assertEquals(buffer.order(), buffer.slice(0, buffer.capacity() - 1).order()); + assertEquals(buffer.order(), buffer.slice(1, buffer.capacity() - 1).order()); + assertEquals(buffer.order(), buffer.slice(1, buffer.capacity() - 2).order()); + } + + @Test + public void testSliceIndex() throws Exception { + assertEquals(0, buffer.slice(0, buffer.capacity()).readerIndex()); + assertEquals(0, buffer.slice(0, buffer.capacity() - 1).readerIndex()); + assertEquals(0, buffer.slice(1, buffer.capacity() - 1).readerIndex()); + assertEquals(0, buffer.slice(1, buffer.capacity() - 2).readerIndex()); + + assertEquals(buffer.capacity(), buffer.slice(0, buffer.capacity()).writerIndex()); + assertEquals(buffer.capacity() - 1, buffer.slice(0, buffer.capacity() - 1).writerIndex()); + assertEquals(buffer.capacity() - 1, buffer.slice(1, buffer.capacity() - 1).writerIndex()); + assertEquals(buffer.capacity() - 2, buffer.slice(1, buffer.capacity() - 2).writerIndex()); + } + + @Test + public void testRetainedSliceIndex() throws Exception { + ByteBuf retainedSlice = buffer.retainedSlice(0, buffer.capacity()); + assertEquals(0, retainedSlice.readerIndex()); + retainedSlice.release(); + + retainedSlice = buffer.retainedSlice(0, buffer.capacity() - 1); + assertEquals(0, retainedSlice.readerIndex()); + retainedSlice.release(); + + retainedSlice = buffer.retainedSlice(1, buffer.capacity() - 1); + assertEquals(0, retainedSlice.readerIndex()); + retainedSlice.release(); + + retainedSlice = buffer.retainedSlice(1, buffer.capacity() - 2); + assertEquals(0, retainedSlice.readerIndex()); + retainedSlice.release(); + + retainedSlice = buffer.retainedSlice(0, buffer.capacity()); + assertEquals(buffer.capacity(), retainedSlice.writerIndex()); + retainedSlice.release(); + + retainedSlice = buffer.retainedSlice(0, buffer.capacity() - 1); + assertEquals(buffer.capacity() - 1, retainedSlice.writerIndex()); + retainedSlice.release(); + + retainedSlice = buffer.retainedSlice(1, buffer.capacity() - 1); + assertEquals(buffer.capacity() - 1, retainedSlice.writerIndex()); + retainedSlice.release(); + + retainedSlice = buffer.retainedSlice(1, buffer.capacity() - 2); + assertEquals(buffer.capacity() - 2, retainedSlice.writerIndex()); + retainedSlice.release(); + } + + @Test + @SuppressWarnings("ObjectEqualsNull") + public void testEquals() { + assertFalse(buffer.equals(null)); + assertFalse(buffer.equals(new Object())); + + byte[] value = new byte[32]; + buffer.setIndex(0, value.length); + random.nextBytes(value); + buffer.setBytes(0, value); + + assertEquals(buffer, wrappedBuffer(value)); + assertEquals(buffer, wrappedBuffer(value).order(LITTLE_ENDIAN)); + + value[0] ++; + assertNotEquals(buffer, wrappedBuffer(value)); + assertNotEquals(buffer, wrappedBuffer(value).order(LITTLE_ENDIAN)); + } + + @Test + public void testCompareTo() { + assertThrows(NullPointerException.class, () -> buffer.compareTo(null)); + + // Fill the random stuff + byte[] value = new byte[32]; + random.nextBytes(value); + // Prevent overflow / underflow + if (value[0] == 0) { + value[0] ++; + } else if (value[0] == -1) { + value[0] --; + } + + buffer.setIndex(0, value.length); + buffer.setBytes(0, value); + + assertEquals(0, buffer.compareTo(wrappedBuffer(value))); + assertEquals(0, buffer.compareTo(wrappedBuffer(value).order(LITTLE_ENDIAN))); + + value[0] ++; + assertTrue(buffer.compareTo(wrappedBuffer(value)) < 0); + assertTrue(buffer.compareTo(wrappedBuffer(value).order(LITTLE_ENDIAN)) < 0); + value[0] -= 2; + assertTrue(buffer.compareTo(wrappedBuffer(value)) > 0); + assertTrue(buffer.compareTo(wrappedBuffer(value).order(LITTLE_ENDIAN)) > 0); + value[0] ++; + + assertTrue(buffer.compareTo(wrappedBuffer(value, 0, 31)) > 0); + assertTrue(buffer.compareTo(wrappedBuffer(value, 0, 31).order(LITTLE_ENDIAN)) > 0); + assertTrue(buffer.slice(0, 31).compareTo(wrappedBuffer(value)) < 0); + assertTrue(buffer.slice(0, 31).compareTo(wrappedBuffer(value).order(LITTLE_ENDIAN)) < 0); + + ByteBuf retainedSlice = buffer.retainedSlice(0, 31); + assertTrue(retainedSlice.compareTo(wrappedBuffer(value)) < 0); + retainedSlice.release(); + + retainedSlice = buffer.retainedSlice(0, 31); + assertTrue(retainedSlice.compareTo(wrappedBuffer(value).order(LITTLE_ENDIAN)) < 0); + retainedSlice.release(); + } + + @Test + public void testCompareTo2() { + byte[] bytes = {1, 2, 3, 4}; + byte[] bytesReversed = {4, 3, 2, 1}; + + ByteBuf buf1 = newBuffer(4).clear().writeBytes(bytes).order(ByteOrder.LITTLE_ENDIAN); + ByteBuf buf2 = newBuffer(4).clear().writeBytes(bytesReversed).order(ByteOrder.LITTLE_ENDIAN); + ByteBuf buf3 = newBuffer(4).clear().writeBytes(bytes).order(ByteOrder.BIG_ENDIAN); + ByteBuf buf4 = newBuffer(4).clear().writeBytes(bytesReversed).order(ByteOrder.BIG_ENDIAN); + try { + assertEquals(buf1.compareTo(buf2), buf3.compareTo(buf4)); + assertEquals(buf2.compareTo(buf1), buf4.compareTo(buf3)); + assertEquals(buf1.compareTo(buf3), buf2.compareTo(buf4)); + assertEquals(buf3.compareTo(buf1), buf4.compareTo(buf2)); + } finally { + buf1.release(); + buf2.release(); + buf3.release(); + buf4.release(); + } + } + + @Test + public void testToString() { + ByteBuf copied = copiedBuffer("Hello, World!", CharsetUtil.ISO_8859_1); + buffer.clear(); + buffer.writeBytes(copied); + assertEquals("Hello, World!", buffer.toString(CharsetUtil.ISO_8859_1)); + copied.release(); + } + + @Test + public void testToStringMultipleThreads() throws Throwable { + buffer.clear(); + buffer.writeBytes("Hello, World!".getBytes(CharsetUtil.ISO_8859_1)); + + final AtomicInteger counter = new AtomicInteger(30000); + final AtomicReference errorRef = new AtomicReference<>(); + List threads = new ArrayList<>(); + for (int i = 0; i < 10; i++) { + Thread thread = new Thread(() -> { + try { + while (errorRef.get() == null && counter.decrementAndGet() > 0) { + assertEquals("Hello, World!", buffer.toString(CharsetUtil.ISO_8859_1)); + } + } catch (Throwable cause) { + errorRef.compareAndSet(null, cause); + } + }); + threads.add(thread); + } + for (Thread thread : threads) { + thread.start(); + } + + assertTimeoutPreemptively(Duration.ofSeconds(10), () -> { + for (Thread thread : threads) { + thread.join(); + } + }); + + Throwable error = errorRef.get(); + if (error != null) { + throw error; + } + } + + @Test + public void testSWARIndexOf() { + ByteBuf buffer = newBuffer(16); + buffer.clear(); + // Ensure the buffer is completely zeroed. + buffer.setZero(0, buffer.capacity()); + buffer.writeByte((byte) 0); // 0 + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); // 7 + + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 0); + buffer.writeByte((byte) 1); // 11 + buffer.writeByte((byte) 2); + buffer.writeByte((byte) 3); + buffer.writeByte((byte) 4); + buffer.writeByte((byte) 1); + assertEquals(11, buffer.indexOf(0, 12, (byte) 1)); + assertEquals(12, buffer.indexOf(0, 16, (byte) 2)); + assertEquals(-1, buffer.indexOf(0, 11, (byte) 1)); + assertEquals(11, buffer.indexOf(0, 16, (byte) 1)); + buffer.release(); + } + + @Test + public void testIndexOf() { + buffer.clear(); + // Ensure the buffer is completely zeroed. + buffer.setZero(0, buffer.capacity()); + + buffer.writeByte((byte) 1); + buffer.writeByte((byte) 2); + buffer.writeByte((byte) 3); + buffer.writeByte((byte) 2); + buffer.writeByte((byte) 1); + + assertEquals(-1, buffer.indexOf(1, 4, (byte) 1)); + assertEquals(-1, buffer.indexOf(4, 1, (byte) 1)); + assertEquals(1, buffer.indexOf(1, 4, (byte) 2)); + assertEquals(3, buffer.indexOf(4, 1, (byte) 2)); + + assertThrows(IndexOutOfBoundsException.class, () -> buffer.indexOf(0, buffer.capacity() + 1, (byte) 0)); + assertThrows(IndexOutOfBoundsException.class, () -> buffer.indexOf(buffer.capacity(), -1, (byte) 0)); + + assertEquals(4, buffer.indexOf(buffer.capacity() + 1, 0, (byte) 1)); + assertEquals(0, buffer.indexOf(-1, buffer.capacity(), (byte) 1)); + } + + @Test + public void testIndexOfReleaseBuffer() { + ByteBuf buffer = releasedBuffer(); + if (buffer.capacity() != 0) { + assertThrows(IllegalReferenceCountException.class, () -> buffer.indexOf(0, 1, (byte) 1)); + } else { + assertEquals(-1, buffer.indexOf(0, 1, (byte) 1)); + } + } + + @Test + public void testNioBuffer1() { + assumeTrue(buffer.nioBufferCount() == 1); + + byte[] value = new byte[buffer.capacity()]; + random.nextBytes(value); + buffer.clear(); + buffer.writeBytes(value); + + assertRemainingEquals(ByteBuffer.wrap(value), buffer.nioBuffer()); + } + + @Test + public void testToByteBuffer2() { + assumeTrue(buffer.nioBufferCount() == 1); + + byte[] value = new byte[buffer.capacity()]; + random.nextBytes(value); + buffer.clear(); + buffer.writeBytes(value); + + for (int i = 0; i < buffer.capacity() - BLOCK_SIZE + 1; i += BLOCK_SIZE) { + assertRemainingEquals(ByteBuffer.wrap(value, i, BLOCK_SIZE), buffer.nioBuffer(i, BLOCK_SIZE)); + } + } + + private static void assertRemainingEquals(ByteBuffer expected, ByteBuffer actual) { + int remaining = expected.remaining(); + int remaining2 = actual.remaining(); + + assertEquals(remaining, remaining2); + byte[] array1 = new byte[remaining]; + byte[] array2 = new byte[remaining2]; + expected.get(array1); + actual.get(array2); + assertArrayEquals(array1, array2); + } + + @Test + public void testToByteBuffer3() { + assumeTrue(buffer.nioBufferCount() == 1); + + assertEquals(buffer.order(), buffer.nioBuffer().order()); + } + + @Test + public void testSkipBytes1() { + buffer.setIndex(CAPACITY / 4, CAPACITY / 2); + + buffer.skipBytes(CAPACITY / 4); + assertEquals(CAPACITY / 4 * 2, buffer.readerIndex()); + + assertThrows(IndexOutOfBoundsException.class, () -> buffer.skipBytes(CAPACITY / 4 + 1)); + + // Should remain unchanged. + assertEquals(CAPACITY / 4 * 2, buffer.readerIndex()); + } + + @Test + public void testHashCode() { + ByteBuf elemA = buffer(15); + ByteBuf elemB = directBuffer(15); + elemA.writeBytes(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 }); + elemB.writeBytes(new byte[] { 6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9 }); + + Set set = new HashSet<>(); + set.add(elemA); + set.add(elemB); + + assertEquals(2, set.size()); + ByteBuf elemACopy = elemA.copy(); + assertTrue(set.contains(elemACopy)); + + ByteBuf elemBCopy = elemB.copy(); + assertTrue(set.contains(elemBCopy)); + + buffer.clear(); + buffer.writeBytes(elemA.duplicate()); + + assertTrue(set.remove(buffer)); + assertFalse(set.contains(elemA)); + assertEquals(1, set.size()); + + buffer.clear(); + buffer.writeBytes(elemB.duplicate()); + assertTrue(set.remove(buffer)); + assertFalse(set.contains(elemB)); + assertEquals(0, set.size()); + elemA.release(); + elemB.release(); + elemACopy.release(); + elemBCopy.release(); + } + + // Test case for https://github.com/netty/netty/issues/325 + @Test + public void testDiscardAllReadBytes() { + buffer.writerIndex(buffer.capacity()); + buffer.readerIndex(buffer.writerIndex()); + buffer.discardReadBytes(); + } + + @Test + public void testForEachByte() { + buffer.clear(); + for (int i = 0; i < CAPACITY; i ++) { + buffer.writeByte(i + 1); + } + + final AtomicInteger lastIndex = new AtomicInteger(); + buffer.setIndex(CAPACITY / 4, CAPACITY * 3 / 4); + assertEquals(-1, buffer.forEachByte(new ByteProcessor() { + int i = CAPACITY / 4; + + @Override + public boolean process(byte value) { + assertEquals((byte) (i + 1), value); + lastIndex.set(i); + i ++; + return true; + } + })); + + assertEquals(CAPACITY * 3 / 4 - 1, lastIndex.get()); + } + + @Test + public void testForEachByteAbort() { + buffer.clear(); + for (int i = 0; i < CAPACITY; i ++) { + buffer.writeByte(i + 1); + } + + final int stop = CAPACITY / 2; + assertEquals(stop, buffer.forEachByte(CAPACITY / 3, CAPACITY / 3, new ByteProcessor() { + int i = CAPACITY / 3; + + @Override + public boolean process(byte value) { + assertEquals((byte) (i + 1), value); + if (i == stop) { + return false; + } + + i++; + return true; + } + })); + } + + @Test + public void testForEachByteDesc() { + buffer.clear(); + for (int i = 0; i < CAPACITY; i ++) { + buffer.writeByte(i + 1); + } + + final AtomicInteger lastIndex = new AtomicInteger(); + assertEquals(-1, buffer.forEachByteDesc(CAPACITY / 4, CAPACITY * 2 / 4, new ByteProcessor() { + int i = CAPACITY * 3 / 4 - 1; + + @Override + public boolean process(byte value) { + assertEquals((byte) (i + 1), value); + lastIndex.set(i); + i --; + return true; + } + })); + + assertEquals(CAPACITY / 4, lastIndex.get()); + } + + @Test + public void testInternalNioBuffer() { + testInternalNioBuffer(128); + testInternalNioBuffer(1024); + testInternalNioBuffer(4 * 1024); + testInternalNioBuffer(64 * 1024); + testInternalNioBuffer(32 * 1024 * 1024); + testInternalNioBuffer(64 * 1024 * 1024); + } + + private void testInternalNioBuffer(int a) { + ByteBuf buffer = newBuffer(2); + ByteBuffer buf = buffer.internalNioBuffer(buffer.readerIndex(), 1); + assertEquals(1, buf.remaining()); + + byte[] data = new byte[a]; + ThreadLocalRandom.current().nextBytes(data); + buffer.writeBytes(data); + + buf = buffer.internalNioBuffer(buffer.readerIndex(), a); + assertEquals(a, buf.remaining()); + + for (int i = 0; i < a; i++) { + assertEquals(data[i], buf.get()); + } + assertFalse(buf.hasRemaining()); + buffer.release(); + } + + @Test + public void testDuplicateReadGatheringByteChannelMultipleThreads() throws Exception { + testReadGatheringByteChannelMultipleThreads(false); + } + + @Test + public void testSliceReadGatheringByteChannelMultipleThreads() throws Exception { + testReadGatheringByteChannelMultipleThreads(true); + } + + private void testReadGatheringByteChannelMultipleThreads(final boolean slice) throws Exception { + final byte[] bytes = new byte[8]; + random.nextBytes(bytes); + + final ByteBuf buffer = newBuffer(8); + buffer.writeBytes(bytes); + final CountDownLatch latch = new CountDownLatch(60000); + final CyclicBarrier barrier = new CyclicBarrier(11); + for (int i = 0; i < 10; i++) { + new Thread(() -> { + while (latch.getCount() > 0) { + ByteBuf buf; + if (slice) { + buf = buffer.slice(); + } else { + buf = buffer.duplicate(); + } + TestGatheringByteChannel channel = new TestGatheringByteChannel(); + + while (buf.isReadable()) { + try { + buf.readBytes(channel, buf.readableBytes()); + } catch (IOException e) { + // Never happens + return; + } + } + assertArrayEquals(bytes, channel.writtenBytes()); + latch.countDown(); + } + try { + barrier.await(); + } catch (Exception e) { + // ignore + } + }).start(); + } + latch.await(10, TimeUnit.SECONDS); + barrier.await(5, TimeUnit.SECONDS); + buffer.release(); + } + + @Test + public void testDuplicateReadOutputStreamMultipleThreads() throws Exception { + testReadOutputStreamMultipleThreads(false); + } + + @Test + public void testSliceReadOutputStreamMultipleThreads() throws Exception { + testReadOutputStreamMultipleThreads(true); + } + + private void testReadOutputStreamMultipleThreads(final boolean slice) throws Exception { + final byte[] bytes = new byte[8]; + random.nextBytes(bytes); + + final ByteBuf buffer = newBuffer(8); + buffer.writeBytes(bytes); + final CountDownLatch latch = new CountDownLatch(60000); + final CyclicBarrier barrier = new CyclicBarrier(11); + for (int i = 0; i < 10; i++) { + new Thread(() -> { + while (latch.getCount() > 0) { + ByteBuf buf; + if (slice) { + buf = buffer.slice(); + } else { + buf = buffer.duplicate(); + } + ByteArrayOutputStream out = new ByteArrayOutputStream(); + + while (buf.isReadable()) { + try { + buf.readBytes(out, buf.readableBytes()); + } catch (IOException e) { + // Never happens + return; + } + } + assertArrayEquals(bytes, out.toByteArray()); + latch.countDown(); + } + try { + barrier.await(); + } catch (Exception e) { + // ignore + } + }).start(); + } + latch.await(10, TimeUnit.SECONDS); + barrier.await(5, TimeUnit.SECONDS); + buffer.release(); + } + + @Test + public void testDuplicateBytesInArrayMultipleThreads() throws Exception { + testBytesInArrayMultipleThreads(false); + } + + @Test + public void testSliceBytesInArrayMultipleThreads() throws Exception { + testBytesInArrayMultipleThreads(true); + } + + private void testBytesInArrayMultipleThreads(final boolean slice) throws Exception { + final byte[] bytes = new byte[8]; + random.nextBytes(bytes); + + final ByteBuf buffer = newBuffer(8); + buffer.writeBytes(bytes); + final AtomicReference cause = new AtomicReference<>(); + final CountDownLatch latch = new CountDownLatch(60000); + final CyclicBarrier barrier = new CyclicBarrier(11); + for (int i = 0; i < 10; i++) { + new Thread(() -> { + while (cause.get() == null && latch.getCount() > 0) { + ByteBuf buf; + if (slice) { + buf = buffer.slice(); + } else { + buf = buffer.duplicate(); + } + + byte[] array = new byte[8]; + buf.readBytes(array); + + assertArrayEquals(bytes, array); + + Arrays.fill(array, (byte) 0); + buf.getBytes(0, array); + assertArrayEquals(bytes, array); + + latch.countDown(); + } + try { + barrier.await(); + } catch (Exception e) { + // ignore + } + }).start(); + } + latch.await(10, TimeUnit.SECONDS); + barrier.await(5, TimeUnit.SECONDS); + assertNull(cause.get()); + buffer.release(); + } + + @Test + public void readByteThrowsIndexOutOfBoundsException() { + assertThrows(IndexOutOfBoundsException.class, () -> { + final ByteBuf buffer = newBuffer(8); + try { + buffer.writeByte(0); + assertEquals((byte) 0, buffer.readByte()); + buffer.readByte(); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testNioBufferExposeOnlyRegion() { + final ByteBuf buffer = newBuffer(8); + byte[] data = new byte[8]; + random.nextBytes(data); + buffer.writeBytes(data); + + ByteBuffer nioBuf = buffer.nioBuffer(1, data.length - 2); + assertEquals(0, nioBuf.position()); + assertEquals(6, nioBuf.remaining()); + + for (int i = 1; nioBuf.hasRemaining(); i++) { + assertEquals(data[i], nioBuf.get()); + } + buffer.release(); + } + + @Test + public void ensureWritableWithForceDoesNotThrow() { + ensureWritableDoesNotThrow(true); + } + + @Test + public void ensureWritableWithOutForceDoesNotThrow() { + ensureWritableDoesNotThrow(false); + } + + private void ensureWritableDoesNotThrow(boolean force) { + final ByteBuf buffer = newBuffer(8); + buffer.writerIndex(buffer.capacity()); + buffer.ensureWritable(8, force); + buffer.release(); + } + + // See: + // - https://github.com/netty/netty/issues/2587 + // - https://github.com/netty/netty/issues/2580 + @Test + public void testLittleEndianWithExpand() { + ByteBuf buffer = newBuffer(0).order(LITTLE_ENDIAN); + buffer.writeInt(0x12345678); + assertEquals("78563412", ByteBufUtil.hexDump(buffer)); + buffer.release(); + } + + private ByteBuf releasedBuffer() { + ByteBuf buffer = newBuffer(8); + // Clear the buffer so we are sure the reader and writer indices are 0. + // This is important as we may return a slice from newBuffer(...). + buffer.clear(); + assertTrue(buffer.release()); + return buffer; + } + + @Test + public void testDiscardReadBytesAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().discardReadBytes()); + } + + @Test + public void testDiscardSomeReadBytesAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().discardSomeReadBytes()); + } + + @Test + public void testEnsureWritableAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().ensureWritable(16)); + } + + @Test + public void testGetBooleanAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBoolean(0)); + } + + @Test + public void testGetByteAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getByte(0)); + } + + @Test + public void testGetUnsignedByteAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedByte(0)); + } + + @Test + public void testGetShortAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getShort(0)); + } + + @Test + public void testGetShortLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getShortLE(0)); + } + + @Test + public void testGetUnsignedShortAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedShort(0)); + } + + @Test + public void testGetUnsignedShortLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedShortLE(0)); + } + + @Test + public void testGetMediumAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getMedium(0)); + } + + @Test + public void testGetMediumLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getMediumLE(0)); + } + + @Test + public void testGetUnsignedMediumAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedMedium(0)); + } + + @Test + public void testGetIntAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getInt(0)); + } + + @Test + public void testGetIntLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getIntLE(0)); + } + + @Test + public void testGetUnsignedIntAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedInt(0)); + } + + @Test + public void testGetUnsignedIntLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getUnsignedIntLE(0)); + } + + @Test + public void testGetLongAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getLong(0)); + } + + @Test + public void testGetLongLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getLongLE(0)); + } + + @Test + public void testGetCharAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getChar(0)); + } + + @Test + public void testGetFloatAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getFloat(0)); + } + + @Test + public void testGetFloatLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getFloatLE(0)); + } + + @Test + public void testGetDoubleAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getDouble(0)); + } + + @Test + public void testGetDoubleLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getDoubleLE(0)); + } + + @Test + public void testGetBytesAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(8); + try { + releasedBuffer().getBytes(0, buffer); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testGetBytesAfterRelease2() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(); + try { + releasedBuffer().getBytes(0, buffer, 1); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testGetBytesAfterRelease3() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(); + try { + releasedBuffer().getBytes(0, buffer, 0, 1); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testGetBytesAfterRelease4() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBytes(0, new byte[8])); + } + + @Test + public void testGetBytesAfterRelease5() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBytes(0, new byte[8], 0, 1)); + } + + @Test + public void testGetBytesAfterRelease6() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().getBytes(0, ByteBuffer.allocate(8))); + } + + @Test + public void testGetBytesAfterRelease7() throws IOException { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().getBytes(0, new ByteArrayOutputStream(), 1)); + } + + @Test + public void testGetBytesAfterRelease8() throws IOException { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().getBytes(0, new DevNullGatheringByteChannel(), 1)); + } + + @Test + public void testSetBooleanAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBoolean(0, true)); + } + + @Test + public void testSetByteAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setByte(0, 1)); + } + + @Test + public void testSetShortAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setShort(0, 1)); + } + + @Test + public void testSetShortLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setShortLE(0, 1)); + } + + @Test + public void testSetMediumAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setMedium(0, 1)); + } + + @Test + public void testSetMediumLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setMediumLE(0, 1)); + } + + @Test + public void testSetIntAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setInt(0, 1)); + } + + @Test + public void testSetIntLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setIntLE(0, 1)); + } + + @Test + public void testSetLongAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setLong(0, 1)); + } + + @Test + public void testSetLongLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setLongLE(0, 1)); + } + + @Test + public void testSetCharAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setChar(0, 1)); + } + + @Test + public void testSetFloatAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setFloat(0, 1)); + } + + @Test + public void testSetDoubleAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setDouble(0, 1)); + } + + @Test + public void testSetBytesAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(); + try { + releasedBuffer().setBytes(0, buffer); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testSetBytesAfterRelease2() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(); + try { + releasedBuffer().setBytes(0, buffer, 1); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testSetBytesAfterRelease3() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(); + try { + releasedBuffer().setBytes(0, buffer, 0, 1); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testSetUsAsciiCharSequenceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, + () -> testSetCharSequenceAfterRelease0(CharsetUtil.US_ASCII)); + } + + @Test + public void testSetIso88591CharSequenceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, + () -> testSetCharSequenceAfterRelease0(CharsetUtil.ISO_8859_1)); + } + + @Test + public void testSetUtf8CharSequenceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> testSetCharSequenceAfterRelease0(CharsetUtil.UTF_8)); + } + + @Test + public void testSetUtf16CharSequenceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> testSetCharSequenceAfterRelease0(CharsetUtil.UTF_16)); + } + + private void testSetCharSequenceAfterRelease0(Charset charset) { + releasedBuffer().setCharSequence(0, "x", charset); + } + + @Test + public void testSetBytesAfterRelease4() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBytes(0, new byte[8])); + } + + @Test + public void testSetBytesAfterRelease5() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBytes(0, new byte[8], 0, 1)); + } + + @Test + public void testSetBytesAfterRelease6() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setBytes(0, ByteBuffer.allocate(8))); + } + + @Test + public void testSetBytesAfterRelease7() throws IOException { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().setBytes(0, new ByteArrayInputStream(new byte[8]), 1)); + } + + @Test + public void testSetBytesAfterRelease8() throws IOException { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().setBytes(0, new TestScatteringByteChannel(), 1)); + } + + @Test + public void testSetZeroAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().setZero(0, 1)); + } + + @Test + public void testReadBooleanAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBoolean()); + } + + @Test + public void testReadByteAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readByte()); + } + + @Test + public void testReadUnsignedByteAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedByte()); + } + + @Test + public void testReadShortAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readShort()); + } + + @Test + public void testReadShortLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readShortLE()); + } + + @Test + public void testReadUnsignedShortAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedShort()); + } + + @Test + public void testReadUnsignedShortLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedShortLE()); + } + + @Test + public void testReadMediumAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readMedium()); + } + + @Test + public void testReadMediumLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readMediumLE()); + } + + @Test + public void testReadUnsignedMediumAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedMedium()); + } + + @Test + public void testReadUnsignedMediumLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedMediumLE()); + } + + @Test + public void testReadIntAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readInt()); + } + + @Test + public void testReadIntLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readIntLE()); + } + + @Test + public void testReadUnsignedIntAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedInt()); + } + + @Test + public void testReadUnsignedIntLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readUnsignedIntLE()); + } + + @Test + public void testReadLongAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readLong()); + } + + @Test + public void testReadLongLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readLongLE()); + } + + @Test + public void testReadCharAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readChar()); + } + + @Test + public void testReadFloatAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readFloat()); + } + + @Test + public void testReadFloatLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readFloatLE()); + } + + @Test + public void testReadDoubleAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readDouble()); + } + + @Test + public void testReadDoubleLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readDoubleLE()); + } + + @Test + public void testReadBytesAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(1)); + } + + @Test + public void testReadBytesAfterRelease2() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(8); + try { + releasedBuffer().readBytes(buffer); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testReadBytesAfterRelease3() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(8); + try { + releasedBuffer().readBytes(buffer); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testReadBytesAfterRelease4() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(8); + try { + releasedBuffer().readBytes(buffer, 0, 1); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testReadBytesAfterRelease5() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(new byte[8])); + } + + @Test + public void testReadBytesAfterRelease6() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(new byte[8], 0, 1)); + } + + @Test + public void testReadBytesAfterRelease7() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().readBytes(ByteBuffer.allocate(8))); + } + + @Test + public void testReadBytesAfterRelease8() throws IOException { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().readBytes(new ByteArrayOutputStream(), 1)); + } + + @Test + public void testReadBytesAfterRelease9() throws IOException { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().readBytes(new ByteArrayOutputStream(), 1)); + } + + @Test + public void testReadBytesAfterRelease10() throws IOException { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().readBytes(new DevNullGatheringByteChannel(), 1)); + } + + @Test + public void testWriteBooleanAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBoolean(true)); + } + + @Test + public void testWriteByteAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeByte(1)); + } + + @Test + public void testWriteShortAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeShort(1)); + } + + @Test + public void testWriteShortLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeShortLE(1)); + } + + @Test + public void testWriteMediumAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeMedium(1)); + } + + @Test + public void testWriteMediumLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeMediumLE(1)); + } + + @Test + public void testWriteIntAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeInt(1)); + } + + @Test + public void testWriteIntLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeIntLE(1)); + } + + @Test + public void testWriteLongAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeLong(1)); + } + + @Test + public void testWriteLongLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeLongLE(1)); + } + + @Test + public void testWriteCharAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeChar(1)); + } + + @Test + public void testWriteFloatAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeFloat(1)); + } + + @Test + public void testWriteFloatLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeFloatLE(1)); + } + + @Test + public void testWriteDoubleAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeDouble(1)); + } + + @Test + public void testWriteDoubleLEAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeDoubleLE(1)); + } + + @Test + public void testWriteBytesAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(8); + try { + releasedBuffer().writeBytes(buffer); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testWriteBytesAfterRelease2() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = copiedBuffer(new byte[8]); + try { + releasedBuffer().writeBytes(buffer, 1); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testWriteBytesAfterRelease3() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf buffer = buffer(8); + try { + releasedBuffer().writeBytes(buffer, 0, 1); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testWriteBytesAfterRelease4() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBytes(new byte[8])); + } + + @Test + public void testWriteBytesAfterRelease5() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBytes(new byte[8], 0 , 1)); + } + + @Test + public void testWriteBytesAfterRelease6() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeBytes(ByteBuffer.allocate(8))); + } + + @Test + public void testWriteBytesAfterRelease7() throws IOException { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().writeBytes(new ByteArrayInputStream(new byte[8]), 1)); + } + + @Test + public void testWriteBytesAfterRelease8() throws IOException { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().writeBytes(new TestScatteringByteChannel(), 1)); + } + + @Test + public void testWriteZeroAfterRelease() throws IOException { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().writeZero(1)); + } + + @Test + public void testWriteUsAsciiCharSequenceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, + () -> testWriteCharSequenceAfterRelease0(CharsetUtil.US_ASCII)); + } + + @Test + public void testWriteIso88591CharSequenceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, + () -> testWriteCharSequenceAfterRelease0(CharsetUtil.ISO_8859_1)); + } + + @Test + public void testWriteUtf8CharSequenceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, + () -> testWriteCharSequenceAfterRelease0(CharsetUtil.UTF_8)); + } + + @Test + public void testWriteUtf16CharSequenceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, + () -> testWriteCharSequenceAfterRelease0(CharsetUtil.UTF_16)); + } + + private void testWriteCharSequenceAfterRelease0(Charset charset) { + releasedBuffer().writeCharSequence("x", charset); + } + + @Test + public void testForEachByteAfterRelease() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().forEachByte(new TestByteProcessor())); + } + + @Test + public void testForEachByteAfterRelease1() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().forEachByte(0, 1, new TestByteProcessor())); + } + + @Test + public void testForEachByteDescAfterRelease() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().forEachByteDesc(new TestByteProcessor())); + } + + @Test + public void testForEachByteDescAfterRelease1() { + assertThrows(IllegalReferenceCountException.class, + () -> releasedBuffer().forEachByteDesc(0, 1, new TestByteProcessor())); + } + + @Test + public void testCopyAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().copy()); + } + + @Test + public void testCopyAfterRelease1() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().copy()); + } + + @Test + public void testNioBufferAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().nioBuffer()); + } + + @Test + public void testNioBufferAfterRelease1() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().nioBuffer(0, 1)); + } + + @Test + public void testInternalNioBufferAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> { + ByteBuf releasedBuffer = releasedBuffer(); + releasedBuffer.internalNioBuffer(releasedBuffer.readerIndex(), 1); + }); + } + + @Test + public void testNioBuffersAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().nioBuffers()); + } + + @Test + public void testNioBuffersAfterRelease2() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().nioBuffers(0, 1)); + } + + @Test + public void testArrayAfterRelease() { + ByteBuf buf = releasedBuffer(); + if (buf.hasArray()) { + assertThrows(IllegalReferenceCountException.class, () -> buf.array()); + } + } + + @Test + public void testMemoryAddressAfterRelease() { + ByteBuf buf = releasedBuffer(); + if (buf.hasMemoryAddress()) { + assertThrows(IllegalReferenceCountException.class, () -> buf.memoryAddress()); + } + } + + @Test + public void testSliceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().slice()); + } + + @Test + public void testSliceAfterRelease2() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().slice(0, 1)); + } + + private static void assertSliceFailAfterRelease(ByteBuf... bufs) { + for (ByteBuf buf : bufs) { + if (buf.refCnt() > 0) { + buf.release(); + } + } + for (ByteBuf buf : bufs) { + assertEquals(0, buf.refCnt()); + assertThrows(IllegalReferenceCountException.class, () -> buf.slice()); + } + } + + @Test + public void testSliceAfterReleaseRetainedSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + assertSliceFailAfterRelease(buf, buf2); + } + + @Test + public void testSliceAfterReleaseRetainedSliceDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.duplicate(); + assertSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testSliceAfterReleaseRetainedSliceRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.retainedDuplicate(); + assertSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testSliceAfterReleaseRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + assertSliceFailAfterRelease(buf, buf2); + } + + @Test + public void testSliceAfterReleaseRetainedDuplicateSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + ByteBuf buf3 = buf2.slice(0, 1); + assertSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testRetainedSliceAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().retainedSlice()); + } + + @Test + public void testRetainedSliceAfterRelease2() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().retainedSlice(0, 1)); + } + + private static void assertRetainedSliceFailAfterRelease(ByteBuf... bufs) { + for (ByteBuf buf : bufs) { + if (buf.refCnt() > 0) { + buf.release(); + } + } + for (ByteBuf buf : bufs) { + assertEquals(0, buf.refCnt()); + assertThrows(IllegalReferenceCountException.class, () -> buf.retainedSlice()); + } + } + + @Test + public void testRetainedSliceAfterReleaseRetainedSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + assertRetainedSliceFailAfterRelease(buf, buf2); + } + + @Test + public void testRetainedSliceAfterReleaseRetainedSliceDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.duplicate(); + assertRetainedSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testRetainedSliceAfterReleaseRetainedSliceRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.retainedDuplicate(); + assertRetainedSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testRetainedSliceAfterReleaseRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + assertRetainedSliceFailAfterRelease(buf, buf2); + } + + @Test + public void testRetainedSliceAfterReleaseRetainedDuplicateSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + ByteBuf buf3 = buf2.slice(0, 1); + assertRetainedSliceFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testDuplicateAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().duplicate()); + } + + @Test + public void testRetainedDuplicateAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().retainedDuplicate()); + } + + @Test + public void testReleaseAfterRelease() { + assertThrows(IllegalReferenceCountException.class, () -> releasedBuffer().release()); + } + + @Test + public void overReleasingMustNotCloseBuffer() { + ByteBuf buf = newBuffer(1); + assertThrows(IllegalReferenceCountException.class, () -> buf.release(10)); + assertThrows(IllegalReferenceCountException.class, () -> buf.release(2)); + assertThat(buf.refCnt()).isNotZero(); + assertTrue(buf.release()); + } + + private static void assertDuplicateFailAfterRelease(ByteBuf... bufs) { + for (ByteBuf buf : bufs) { + if (buf.refCnt() > 0) { + buf.release(); + } + } + for (ByteBuf buf : bufs) { + assertEquals(0, buf.refCnt()); + assertThrows(IllegalReferenceCountException.class, () -> buf.duplicate()); + } + } + + @Test + public void testDuplicateAfterReleaseRetainedSliceDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + ByteBuf buf3 = buf2.duplicate(); + assertDuplicateFailAfterRelease(buf, buf2, buf3); + } + + @Test + public void testDuplicateAfterReleaseRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + assertDuplicateFailAfterRelease(buf, buf2); + } + + @Test + public void testDuplicateAfterReleaseRetainedDuplicateSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + ByteBuf buf3 = buf2.slice(0, 1); + assertDuplicateFailAfterRelease(buf, buf2, buf3); + } + + private static void assertRetainedDuplicateFailAfterRelease(ByteBuf... bufs) { + for (ByteBuf buf : bufs) { + if (buf.refCnt() > 0) { + buf.release(); + } + } + for (ByteBuf buf : bufs) { + assertEquals(0, buf.refCnt()); + assertThrows(IllegalReferenceCountException.class, () -> buf.retainedDuplicate()); + } + } + + @Test + public void testRetainedDuplicateAfterReleaseRetainedDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedDuplicate(); + assertRetainedDuplicateFailAfterRelease(buf, buf2); + } + + @Test + public void testRetainedDuplicateAfterReleaseDuplicate() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.duplicate(); + assertRetainedDuplicateFailAfterRelease(buf, buf2); + } + + @Test + public void testRetainedDuplicateAfterReleaseRetainedSlice() { + ByteBuf buf = newBuffer(1); + ByteBuf buf2 = buf.retainedSlice(0, 1); + assertRetainedDuplicateFailAfterRelease(buf, buf2); + } + + @Test + public void testSliceRelease() { + ByteBuf buf = newBuffer(8); + assertEquals(1, buf.refCnt()); + assertTrue(buf.slice().release()); + assertEquals(0, buf.refCnt()); + } + + @Test + public void testReadSliceOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testReadSliceOutOfBounds(false)); + } + + @Test + public void testReadRetainedSliceOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testReadSliceOutOfBounds(true)); + } + + private void testReadSliceOutOfBounds(boolean retainedSlice) { + ByteBuf buf = newBuffer(100); + try { + buf.writeZero(50); + if (retainedSlice) { + buf.readRetainedSlice(51); + } else { + buf.readSlice(51); + } + fail(); + } finally { + buf.release(); + } + } + + @Test + public void testWriteUsAsciiCharSequenceExpand() { + testWriteCharSequenceExpand(CharsetUtil.US_ASCII); + } + + @Test + public void testWriteUtf8CharSequenceExpand() { + testWriteCharSequenceExpand(CharsetUtil.UTF_8); + } + + @Test + public void testWriteIso88591CharSequenceExpand() { + testWriteCharSequenceExpand(CharsetUtil.ISO_8859_1); + } + @Test + public void testWriteUtf16CharSequenceExpand() { + testWriteCharSequenceExpand(CharsetUtil.UTF_16); + } + + private void testWriteCharSequenceExpand(Charset charset) { + ByteBuf buf = newBuffer(1); + try { + int writerIndex = buf.capacity() - 1; + buf.writerIndex(writerIndex); + int written = buf.writeCharSequence("AB", charset); + assertEquals(writerIndex, buf.writerIndex() - written); + } finally { + buf.release(); + } + } + + @Test + public void testSetUsAsciiCharSequenceNoExpand() { + assertThrows(IndexOutOfBoundsException.class, () -> testSetCharSequenceNoExpand(CharsetUtil.US_ASCII)); + } + + @Test + public void testSetUtf8CharSequenceNoExpand() { + assertThrows(IndexOutOfBoundsException.class, () -> testSetCharSequenceNoExpand(CharsetUtil.UTF_8)); + } + + @Test + public void testSetIso88591CharSequenceNoExpand() { + assertThrows(IndexOutOfBoundsException.class, () -> testSetCharSequenceNoExpand(CharsetUtil.ISO_8859_1)); + } + + @Test + public void testSetUtf16CharSequenceNoExpand() { + assertThrows(IndexOutOfBoundsException.class, () -> testSetCharSequenceNoExpand(CharsetUtil.UTF_16)); + } + + private void testSetCharSequenceNoExpand(Charset charset) { + ByteBuf buf = newBuffer(1); + try { + buf.setCharSequence(0, "AB", charset); + } finally { + buf.release(); + } + } + + @Test + public void testSetUsAsciiCharSequence() { + testSetGetCharSequence(CharsetUtil.US_ASCII); + } + + @Test + public void testSetUtf8CharSequence() { + testSetGetCharSequence(CharsetUtil.UTF_8); + } + + @Test + public void testSetIso88591CharSequence() { + testSetGetCharSequence(CharsetUtil.ISO_8859_1); + } + + @Test + public void testSetUtf16CharSequence() { + testSetGetCharSequence(CharsetUtil.UTF_16); + } + + private static final CharBuffer EXTENDED_ASCII_CHARS, ASCII_CHARS; + + static { + char[] chars = new char[256]; + for (char c = 0; c < chars.length; c++) { + chars[c] = c; + } + EXTENDED_ASCII_CHARS = CharBuffer.wrap(chars); + ASCII_CHARS = CharBuffer.wrap(chars, 0, 128); + } + + private void testSetGetCharSequence(Charset charset) { + ByteBuf buf = newBuffer(1024); + CharBuffer sequence = CharsetUtil.US_ASCII.equals(charset) + ? ASCII_CHARS : EXTENDED_ASCII_CHARS; + int bytes = buf.setCharSequence(1, sequence, charset); + assertEquals(sequence, CharBuffer.wrap(buf.getCharSequence(1, bytes, charset))); + buf.release(); + } + + @Test + public void testWriteReadUsAsciiCharSequence() { + testWriteReadCharSequence(CharsetUtil.US_ASCII); + } + + @Test + public void testWriteReadUtf8CharSequence() { + testWriteReadCharSequence(CharsetUtil.UTF_8); + } + + @Test + public void testWriteReadIso88591CharSequence() { + testWriteReadCharSequence(CharsetUtil.ISO_8859_1); + } + + @Test + public void testWriteReadUtf16CharSequence() { + testWriteReadCharSequence(CharsetUtil.UTF_16); + } + + private void testWriteReadCharSequence(Charset charset) { + ByteBuf buf = newBuffer(1024); + CharBuffer sequence = CharsetUtil.US_ASCII.equals(charset) + ? ASCII_CHARS : EXTENDED_ASCII_CHARS; + buf.writerIndex(1); + int bytes = buf.writeCharSequence(sequence, charset); + buf.readerIndex(1); + assertEquals(sequence, CharBuffer.wrap(buf.readCharSequence(bytes, charset))); + buf.release(); + } + + @Test + public void testRetainedSliceIndexOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(true, true, true)); + } + + @Test + public void testRetainedSliceLengthOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(true, true, false)); + } + + @Test + public void testMixedSliceAIndexOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(true, false, true)); + } + + @Test + public void testMixedSliceALengthOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(true, false, false)); + } + + @Test + public void testMixedSliceBIndexOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(false, true, true)); + } + + @Test + public void testMixedSliceBLengthOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(false, true, false)); + } + + @Test + public void testSliceIndexOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(false, false, true)); + } + + @Test + public void testSliceLengthOutOfBounds() { + assertThrows(IndexOutOfBoundsException.class, () -> testSliceOutOfBounds(false, false, false)); + } + + @Test + public void testRetainedSliceAndRetainedDuplicateContentIsExpected() { + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(6).writerIndex(0); + ByteBuf expected2 = newBuffer(5).writerIndex(0); + ByteBuf expected3 = newBuffer(4).writerIndex(0); + ByteBuf expected4 = newBuffer(3).writerIndex(0); + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); + expected1.writeBytes(new byte[] {2, 3, 4, 5, 6, 7}); + expected2.writeBytes(new byte[] {3, 4, 5, 6, 7}); + expected3.writeBytes(new byte[] {4, 5, 6, 7}); + expected4.writeBytes(new byte[] {5, 6, 7}); + + ByteBuf slice1 = buf.retainedSlice(buf.readerIndex() + 1, 6); + assertEquals(0, slice1.compareTo(expected1)); + assertEquals(0, slice1.compareTo(buf.slice(buf.readerIndex() + 1, 6))); + // Simulate a handler that releases the original buffer, and propagates a slice. + buf.release(); + + // Advance the reader index on the slice. + slice1.readByte(); + + ByteBuf dup1 = slice1.retainedDuplicate(); + assertEquals(0, dup1.compareTo(expected2)); + assertEquals(0, dup1.compareTo(slice1.duplicate())); + + // Advance the reader index on dup1. + dup1.readByte(); + + ByteBuf dup2 = dup1.duplicate(); + assertEquals(0, dup2.compareTo(expected3)); + + // Advance the reader index on dup2. + dup2.readByte(); + + ByteBuf slice2 = dup2.retainedSlice(dup2.readerIndex(), 3); + assertEquals(0, slice2.compareTo(expected4)); + assertEquals(0, slice2.compareTo(dup2.slice(dup2.readerIndex(), 3))); + + // Cleanup the expected buffers used for testing. + assertTrue(expected1.release()); + assertTrue(expected2.release()); + assertTrue(expected3.release()); + assertTrue(expected4.release()); + + slice2.release(); + dup2.release(); + + assertEquals(slice2.refCnt(), dup2.refCnt()); + assertEquals(dup2.refCnt(), dup1.refCnt()); + + // The handler is now done with the original slice + assertTrue(slice1.release()); + + // Reference counting may be shared, or may be independently tracked, but at this point all buffers should + // be deallocated and have a reference count of 0. + assertEquals(0, buf.refCnt()); + assertEquals(0, slice1.refCnt()); + assertEquals(0, slice2.refCnt()); + assertEquals(0, dup1.refCnt()); + assertEquals(0, dup2.refCnt()); + } + + @Test + public void testRetainedDuplicateAndRetainedSliceContentIsExpected() { + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(6).writerIndex(0); + ByteBuf expected2 = newBuffer(5).writerIndex(0); + ByteBuf expected3 = newBuffer(4).writerIndex(0); + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); + expected1.writeBytes(new byte[] {2, 3, 4, 5, 6, 7}); + expected2.writeBytes(new byte[] {3, 4, 5, 6, 7}); + expected3.writeBytes(new byte[] {5, 6, 7}); + + ByteBuf dup1 = buf.retainedDuplicate(); + assertEquals(0, dup1.compareTo(buf)); + assertEquals(0, dup1.compareTo(buf.slice())); + // Simulate a handler that releases the original buffer, and propagates a slice. + buf.release(); + + // Advance the reader index on the dup. + dup1.readByte(); + + ByteBuf slice1 = dup1.retainedSlice(dup1.readerIndex(), 6); + assertEquals(0, slice1.compareTo(expected1)); + assertEquals(0, slice1.compareTo(slice1.duplicate())); + + // Advance the reader index on slice1. + slice1.readByte(); + + ByteBuf dup2 = slice1.duplicate(); + assertEquals(0, dup2.compareTo(slice1)); + + // Advance the reader index on dup2. + dup2.readByte(); + + ByteBuf slice2 = dup2.retainedSlice(dup2.readerIndex() + 1, 3); + assertEquals(0, slice2.compareTo(expected3)); + assertEquals(0, slice2.compareTo(dup2.slice(dup2.readerIndex() + 1, 3))); + + // Cleanup the expected buffers used for testing. + assertTrue(expected1.release()); + assertTrue(expected2.release()); + assertTrue(expected3.release()); + + slice2.release(); + slice1.release(); + + assertEquals(slice2.refCnt(), dup2.refCnt()); + assertEquals(dup2.refCnt(), slice1.refCnt()); + + // The handler is now done with the original slice + assertTrue(dup1.release()); + + // Reference counting may be shared, or may be independently tracked, but at this point all buffers should + // be deallocated and have a reference count of 0. + assertEquals(0, buf.refCnt()); + assertEquals(0, slice1.refCnt()); + assertEquals(0, slice2.refCnt()); + assertEquals(0, dup1.refCnt()); + assertEquals(0, dup2.refCnt()); + } + + @Test + public void testRetainedSliceContents() { + testSliceContents(true); + } + + @Test + public void testMultipleLevelRetainedSlice1() { + testMultipleLevelRetainedSliceWithNonRetained(true, true); + } + + @Test + public void testMultipleLevelRetainedSlice2() { + testMultipleLevelRetainedSliceWithNonRetained(true, false); + } + + @Test + public void testMultipleLevelRetainedSlice3() { + testMultipleLevelRetainedSliceWithNonRetained(false, true); + } + + @Test + public void testMultipleLevelRetainedSlice4() { + testMultipleLevelRetainedSliceWithNonRetained(false, false); + } + + @Test + public void testRetainedSliceReleaseOriginal1() { + testSliceReleaseOriginal(true, true); + } + + @Test + public void testRetainedSliceReleaseOriginal2() { + testSliceReleaseOriginal(true, false); + } + + @Test + public void testRetainedSliceReleaseOriginal3() { + testSliceReleaseOriginal(false, true); + } + + @Test + public void testRetainedSliceReleaseOriginal4() { + testSliceReleaseOriginal(false, false); + } + + @Test + public void testRetainedDuplicateReleaseOriginal1() { + testDuplicateReleaseOriginal(true, true); + } + + @Test + public void testRetainedDuplicateReleaseOriginal2() { + testDuplicateReleaseOriginal(true, false); + } + + @Test + public void testRetainedDuplicateReleaseOriginal3() { + testDuplicateReleaseOriginal(false, true); + } + + @Test + public void testRetainedDuplicateReleaseOriginal4() { + testDuplicateReleaseOriginal(false, false); + } + + @Test + public void testMultipleRetainedSliceReleaseOriginal1() { + testMultipleRetainedSliceReleaseOriginal(true, true); + } + + @Test + public void testMultipleRetainedSliceReleaseOriginal2() { + testMultipleRetainedSliceReleaseOriginal(true, false); + } + + @Test + public void testMultipleRetainedSliceReleaseOriginal3() { + testMultipleRetainedSliceReleaseOriginal(false, true); + } + + @Test + public void testMultipleRetainedSliceReleaseOriginal4() { + testMultipleRetainedSliceReleaseOriginal(false, false); + } + + @Test + public void testMultipleRetainedDuplicateReleaseOriginal1() { + testMultipleRetainedDuplicateReleaseOriginal(true, true); + } + + @Test + public void testMultipleRetainedDuplicateReleaseOriginal2() { + testMultipleRetainedDuplicateReleaseOriginal(true, false); + } + + @Test + public void testMultipleRetainedDuplicateReleaseOriginal3() { + testMultipleRetainedDuplicateReleaseOriginal(false, true); + } + + @Test + public void testMultipleRetainedDuplicateReleaseOriginal4() { + testMultipleRetainedDuplicateReleaseOriginal(false, false); + } + + @Test + public void testSliceContents() { + testSliceContents(false); + } + + @Test + public void testRetainedDuplicateContents() { + testDuplicateContents(true); + } + + @Test + public void testDuplicateContents() { + testDuplicateContents(false); + } + + @Test + public void testDuplicateCapacityChange() { + testDuplicateCapacityChange(false); + } + + @Test + public void testRetainedDuplicateCapacityChange() { + testDuplicateCapacityChange(true); + } + + @Test + public void testSliceCapacityChange() { + assertThrows(UnsupportedOperationException.class, () -> testSliceCapacityChange(false)); + } + + @Test + public void testRetainedSliceCapacityChange() { + assertThrows(UnsupportedOperationException.class, () -> testSliceCapacityChange(true)); + } + + @Test + public void testRetainedSliceUnreleasable1() { + testRetainedSliceUnreleasable(true, true); + } + + @Test + public void testRetainedSliceUnreleasable2() { + testRetainedSliceUnreleasable(true, false); + } + + @Test + public void testRetainedSliceUnreleasable3() { + testRetainedSliceUnreleasable(false, true); + } + + @Test + public void testRetainedSliceUnreleasable4() { + testRetainedSliceUnreleasable(false, false); + } + + @Test + public void testReadRetainedSliceUnreleasable1() { + testReadRetainedSliceUnreleasable(true, true); + } + + @Test + public void testReadRetainedSliceUnreleasable2() { + testReadRetainedSliceUnreleasable(true, false); + } + + @Test + public void testReadRetainedSliceUnreleasable3() { + testReadRetainedSliceUnreleasable(false, true); + } + + @Test + public void testReadRetainedSliceUnreleasable4() { + testReadRetainedSliceUnreleasable(false, false); + } + + @Test + public void testRetainedDuplicateUnreleasable1() { + testRetainedDuplicateUnreleasable(true, true); + } + + @Test + public void testRetainedDuplicateUnreleasable2() { + testRetainedDuplicateUnreleasable(true, false); + } + + @Test + public void testRetainedDuplicateUnreleasable3() { + testRetainedDuplicateUnreleasable(false, true); + } + + @Test + public void testRetainedDuplicateUnreleasable4() { + testRetainedDuplicateUnreleasable(false, false); + } + + private void testRetainedSliceUnreleasable(boolean initRetainedSlice, boolean finalRetainedSlice) { + ByteBuf buf = newBuffer(8); + ByteBuf buf1 = initRetainedSlice ? buf.retainedSlice() : buf.slice().retain(); + ByteBuf buf2 = unreleasableBuffer(buf1); + ByteBuf buf3 = finalRetainedSlice ? buf2.retainedSlice() : buf2.slice().retain(); + assertFalse(buf3.release()); + assertFalse(buf2.release()); + buf1.release(); + assertTrue(buf.release()); + assertEquals(0, buf1.refCnt()); + assertEquals(0, buf.refCnt()); + } + + private void testReadRetainedSliceUnreleasable(boolean initRetainedSlice, boolean finalRetainedSlice) { + ByteBuf buf = newBuffer(8); + ByteBuf buf1 = initRetainedSlice ? buf.retainedSlice() : buf.slice().retain(); + ByteBuf buf2 = unreleasableBuffer(buf1); + ByteBuf buf3 = finalRetainedSlice ? buf2.readRetainedSlice(buf2.readableBytes()) + : buf2.readSlice(buf2.readableBytes()).retain(); + assertFalse(buf3.release()); + assertFalse(buf2.release()); + buf1.release(); + assertTrue(buf.release()); + assertEquals(0, buf1.refCnt()); + assertEquals(0, buf.refCnt()); + } + + private void testRetainedDuplicateUnreleasable(boolean initRetainedDuplicate, boolean finalRetainedDuplicate) { + ByteBuf buf = newBuffer(8); + ByteBuf buf1 = initRetainedDuplicate ? buf.retainedDuplicate() : buf.duplicate().retain(); + ByteBuf buf2 = unreleasableBuffer(buf1); + ByteBuf buf3 = finalRetainedDuplicate ? buf2.retainedDuplicate() : buf2.duplicate().retain(); + assertFalse(buf3.release()); + assertFalse(buf2.release()); + buf1.release(); + assertTrue(buf.release()); + assertEquals(0, buf1.refCnt()); + assertEquals(0, buf.refCnt()); + } + + private void testDuplicateCapacityChange(boolean retainedDuplicate) { + ByteBuf buf = newBuffer(8); + ByteBuf dup = retainedDuplicate ? buf.retainedDuplicate() : buf.duplicate(); + try { + dup.capacity(10); + assertEquals(buf.capacity(), dup.capacity()); + dup.capacity(5); + assertEquals(buf.capacity(), dup.capacity()); + } finally { + if (retainedDuplicate) { + dup.release(); + } + buf.release(); + } + } + + private void testSliceCapacityChange(boolean retainedSlice) { + ByteBuf buf = newBuffer(8); + ByteBuf slice = retainedSlice ? buf.retainedSlice(buf.readerIndex() + 1, 3) + : buf.slice(buf.readerIndex() + 1, 3); + try { + slice.capacity(10); + } finally { + if (retainedSlice) { + slice.release(); + } + buf.release(); + } + } + + private void testSliceOutOfBounds(boolean initRetainedSlice, boolean finalRetainedSlice, boolean indexOutOfBounds) { + ByteBuf buf = newBuffer(8); + ByteBuf slice = initRetainedSlice ? buf.retainedSlice(buf.readerIndex() + 1, 2) + : buf.slice(buf.readerIndex() + 1, 2); + try { + assertEquals(2, slice.capacity()); + assertEquals(2, slice.maxCapacity()); + final int index = indexOutOfBounds ? 3 : 0; + final int length = indexOutOfBounds ? 0 : 3; + if (finalRetainedSlice) { + // This is expected to fail ... so no need to release. + slice.retainedSlice(index, length); + } else { + slice.slice(index, length); + } + } finally { + if (initRetainedSlice) { + slice.release(); + } + buf.release(); + } + } + + private void testSliceContents(boolean retainedSlice) { + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected = newBuffer(3).writerIndex(0); + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); + expected.writeBytes(new byte[] {4, 5, 6}); + ByteBuf slice = retainedSlice ? buf.retainedSlice(buf.readerIndex() + 3, 3) + : buf.slice(buf.readerIndex() + 3, 3); + try { + assertEquals(0, slice.compareTo(expected)); + assertEquals(0, slice.compareTo(slice.duplicate())); + ByteBuf b = slice.retainedDuplicate(); + assertEquals(0, slice.compareTo(b)); + b.release(); + assertEquals(0, slice.compareTo(slice.slice(0, slice.capacity()))); + } finally { + if (retainedSlice) { + slice.release(); + } + buf.release(); + expected.release(); + } + } + + private void testSliceReleaseOriginal(boolean retainedSlice1, boolean retainedSlice2) { + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(3).writerIndex(0); + ByteBuf expected2 = newBuffer(2).writerIndex(0); + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); + expected1.writeBytes(new byte[] {6, 7, 8}); + expected2.writeBytes(new byte[] {7, 8}); + ByteBuf slice1 = retainedSlice1 ? buf.retainedSlice(buf.readerIndex() + 5, 3) + : buf.slice(buf.readerIndex() + 5, 3).retain(); + assertEquals(0, slice1.compareTo(expected1)); + // Simulate a handler that releases the original buffer, and propagates a slice. + buf.release(); + + ByteBuf slice2 = retainedSlice2 ? slice1.retainedSlice(slice1.readerIndex() + 1, 2) + : slice1.slice(slice1.readerIndex() + 1, 2).retain(); + assertEquals(0, slice2.compareTo(expected2)); + + // Cleanup the expected buffers used for testing. + assertTrue(expected1.release()); + assertTrue(expected2.release()); + + // The handler created a slice of the slice and is now done with it. + slice2.release(); + + // The handler is now done with the original slice + assertTrue(slice1.release()); + + // Reference counting may be shared, or may be independently tracked, but at this point all buffers should + // be deallocated and have a reference count of 0. + assertEquals(0, buf.refCnt()); + assertEquals(0, slice1.refCnt()); + assertEquals(0, slice2.refCnt()); + } + + private void testMultipleLevelRetainedSliceWithNonRetained(boolean doSlice1, boolean doSlice2) { + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(6).writerIndex(0); + ByteBuf expected2 = newBuffer(4).writerIndex(0); + ByteBuf expected3 = newBuffer(2).writerIndex(0); + ByteBuf expected4SliceSlice = newBuffer(1).writerIndex(0); + ByteBuf expected4DupSlice = newBuffer(1).writerIndex(0); + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); + expected1.writeBytes(new byte[] {2, 3, 4, 5, 6, 7}); + expected2.writeBytes(new byte[] {3, 4, 5, 6}); + expected3.writeBytes(new byte[] {4, 5}); + expected4SliceSlice.writeBytes(new byte[] {5}); + expected4DupSlice.writeBytes(new byte[] {4}); + + ByteBuf slice1 = buf.retainedSlice(buf.readerIndex() + 1, 6); + assertEquals(0, slice1.compareTo(expected1)); + // Simulate a handler that releases the original buffer, and propagates a slice. + buf.release(); + + ByteBuf slice2 = slice1.retainedSlice(slice1.readerIndex() + 1, 4); + assertEquals(0, slice2.compareTo(expected2)); + assertEquals(0, slice2.compareTo(slice2.duplicate())); + assertEquals(0, slice2.compareTo(slice2.slice())); + + ByteBuf tmpBuf = slice2.retainedDuplicate(); + assertEquals(0, slice2.compareTo(tmpBuf)); + tmpBuf.release(); + tmpBuf = slice2.retainedSlice(); + assertEquals(0, slice2.compareTo(tmpBuf)); + tmpBuf.release(); + + ByteBuf slice3 = doSlice1 ? slice2.slice(slice2.readerIndex() + 1, 2) : slice2.duplicate(); + if (doSlice1) { + assertEquals(0, slice3.compareTo(expected3)); + } else { + assertEquals(0, slice3.compareTo(expected2)); + } + + ByteBuf slice4 = doSlice2 ? slice3.slice(slice3.readerIndex() + 1, 1) : slice3.duplicate(); + if (doSlice1 && doSlice2) { + assertEquals(0, slice4.compareTo(expected4SliceSlice)); + } else if (doSlice2) { + assertEquals(0, slice4.compareTo(expected4DupSlice)); + } else { + assertEquals(0, slice3.compareTo(slice4)); + } + + // Cleanup the expected buffers used for testing. + assertTrue(expected1.release()); + assertTrue(expected2.release()); + assertTrue(expected3.release()); + assertTrue(expected4SliceSlice.release()); + assertTrue(expected4DupSlice.release()); + + // Slice 4, 3, and 2 should effectively "share" a reference count. + slice4.release(); + assertEquals(slice3.refCnt(), slice2.refCnt()); + assertEquals(slice3.refCnt(), slice4.refCnt()); + + // Slice 1 should also release the original underlying buffer without throwing exceptions + assertTrue(slice1.release()); + + // Reference counting may be shared, or may be independently tracked, but at this point all buffers should + // be deallocated and have a reference count of 0. + assertEquals(0, buf.refCnt()); + assertEquals(0, slice1.refCnt()); + assertEquals(0, slice2.refCnt()); + assertEquals(0, slice3.refCnt()); + } + + private void testDuplicateReleaseOriginal(boolean retainedDuplicate1, boolean retainedDuplicate2) { + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected = newBuffer(8).writerIndex(0); + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); + expected.writeBytes(buf, buf.readerIndex(), buf.readableBytes()); + ByteBuf dup1 = retainedDuplicate1 ? buf.retainedDuplicate() + : buf.duplicate().retain(); + assertEquals(0, dup1.compareTo(expected)); + // Simulate a handler that releases the original buffer, and propagates a slice. + buf.release(); + + ByteBuf dup2 = retainedDuplicate2 ? dup1.retainedDuplicate() + : dup1.duplicate().retain(); + assertEquals(0, dup2.compareTo(expected)); + + // Cleanup the expected buffers used for testing. + assertTrue(expected.release()); + + // The handler created a slice of the slice and is now done with it. + dup2.release(); + + // The handler is now done with the original slice + assertTrue(dup1.release()); + + // Reference counting may be shared, or may be independently tracked, but at this point all buffers should + // be deallocated and have a reference count of 0. + assertEquals(0, buf.refCnt()); + assertEquals(0, dup1.refCnt()); + assertEquals(0, dup2.refCnt()); + } + + private void testMultipleRetainedSliceReleaseOriginal(boolean retainedSlice1, boolean retainedSlice2) { + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected1 = newBuffer(3).writerIndex(0); + ByteBuf expected2 = newBuffer(2).writerIndex(0); + ByteBuf expected3 = newBuffer(2).writerIndex(0); + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); + expected1.writeBytes(new byte[] {6, 7, 8}); + expected2.writeBytes(new byte[] {7, 8}); + expected3.writeBytes(new byte[] {6, 7}); + ByteBuf slice1 = retainedSlice1 ? buf.retainedSlice(buf.readerIndex() + 5, 3) + : buf.slice(buf.readerIndex() + 5, 3).retain(); + assertEquals(0, slice1.compareTo(expected1)); + // Simulate a handler that releases the original buffer, and propagates a slice. + buf.release(); + + ByteBuf slice2 = retainedSlice2 ? slice1.retainedSlice(slice1.readerIndex() + 1, 2) + : slice1.slice(slice1.readerIndex() + 1, 2).retain(); + assertEquals(0, slice2.compareTo(expected2)); + + // The handler created a slice of the slice and is now done with it. + slice2.release(); + + ByteBuf slice3 = slice1.retainedSlice(slice1.readerIndex(), 2); + assertEquals(0, slice3.compareTo(expected3)); + + // The handler created another slice of the slice and is now done with it. + slice3.release(); + + // The handler is now done with the original slice + assertTrue(slice1.release()); + + // Cleanup the expected buffers used for testing. + assertTrue(expected1.release()); + assertTrue(expected2.release()); + assertTrue(expected3.release()); + + // Reference counting may be shared, or may be independently tracked, but at this point all buffers should + // be deallocated and have a reference count of 0. + assertEquals(0, buf.refCnt()); + assertEquals(0, slice1.refCnt()); + assertEquals(0, slice2.refCnt()); + assertEquals(0, slice3.refCnt()); + } + + private void testMultipleRetainedDuplicateReleaseOriginal(boolean retainedDuplicate1, boolean retainedDuplicate2) { + ByteBuf buf = newBuffer(8).writerIndex(0); + ByteBuf expected = newBuffer(8).writerIndex(0); + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); + expected.writeBytes(buf, buf.readerIndex(), buf.readableBytes()); + ByteBuf dup1 = retainedDuplicate1 ? buf.retainedDuplicate() + : buf.duplicate().retain(); + assertEquals(0, dup1.compareTo(expected)); + // Simulate a handler that releases the original buffer, and propagates a slice. + buf.release(); + + ByteBuf dup2 = retainedDuplicate2 ? dup1.retainedDuplicate() + : dup1.duplicate().retain(); + assertEquals(0, dup2.compareTo(expected)); + assertEquals(0, dup2.compareTo(dup2.duplicate())); + assertEquals(0, dup2.compareTo(dup2.slice())); + + ByteBuf tmpBuf = dup2.retainedDuplicate(); + assertEquals(0, dup2.compareTo(tmpBuf)); + tmpBuf.release(); + tmpBuf = dup2.retainedSlice(); + assertEquals(0, dup2.compareTo(tmpBuf)); + tmpBuf.release(); + + // The handler created a slice of the slice and is now done with it. + dup2.release(); + + ByteBuf dup3 = dup1.retainedDuplicate(); + assertEquals(0, dup3.compareTo(expected)); + + // The handler created another slice of the slice and is now done with it. + dup3.release(); + + // The handler is now done with the original slice + assertTrue(dup1.release()); + + // Cleanup the expected buffers used for testing. + assertTrue(expected.release()); + + // Reference counting may be shared, or may be independently tracked, but at this point all buffers should + // be deallocated and have a reference count of 0. + assertEquals(0, buf.refCnt()); + assertEquals(0, dup1.refCnt()); + assertEquals(0, dup2.refCnt()); + assertEquals(0, dup3.refCnt()); + } + + private void testDuplicateContents(boolean retainedDuplicate) { + ByteBuf buf = newBuffer(8).writerIndex(0); + buf.writeBytes(new byte[] {1, 2, 3, 4, 5, 6, 7, 8}); + ByteBuf dup = retainedDuplicate ? buf.retainedDuplicate() : buf.duplicate(); + try { + assertEquals(0, dup.compareTo(buf)); + assertEquals(0, dup.compareTo(dup.duplicate())); + ByteBuf b = dup.retainedDuplicate(); + assertEquals(0, dup.compareTo(b)); + b.release(); + assertEquals(0, dup.compareTo(dup.slice(dup.readerIndex(), dup.readableBytes()))); + } finally { + if (retainedDuplicate) { + dup.release(); + } + buf.release(); + } + } + + @Test + public void testDuplicateRelease() { + ByteBuf buf = newBuffer(8); + assertEquals(1, buf.refCnt()); + assertTrue(buf.duplicate().release()); + assertEquals(0, buf.refCnt()); + } + + // Test-case trying to reproduce: + // https://github.com/netty/netty/issues/2843 + @Test + public void testRefCnt() throws Exception { + testRefCnt0(false); + } + + // Test-case trying to reproduce: + // https://github.com/netty/netty/issues/2843 + @Test + public void testRefCnt2() throws Exception { + testRefCnt0(true); + } + + @Test + public void testEmptyNioBuffers() throws Exception { + ByteBuf buffer = newBuffer(8); + buffer.clear(); + assertFalse(buffer.isReadable()); + ByteBuffer[] nioBuffers = buffer.nioBuffers(); + assertEquals(1, nioBuffers.length); + assertFalse(nioBuffers[0].hasRemaining()); + buffer.release(); + } + + @Test + public void testGetReadOnlyDirectDst() { + testGetReadOnlyDst(true); + } + + @Test + public void testGetReadOnlyHeapDst() { + testGetReadOnlyDst(false); + } + + private void testGetReadOnlyDst(boolean direct) { + byte[] bytes = { 'a', 'b', 'c', 'd' }; + + ByteBuf buffer = newBuffer(bytes.length); + buffer.writeBytes(bytes); + + ByteBuffer dst = direct ? ByteBuffer.allocateDirect(bytes.length) : ByteBuffer.allocate(bytes.length); + ByteBuffer readOnlyDst = dst.asReadOnlyBuffer(); + assertThrows(ReadOnlyBufferException.class, () -> buffer.getBytes(0, readOnlyDst)); + assertEquals(0, readOnlyDst.position()); + buffer.release(); + } + + @Test + public void testReadBytesAndWriteBytesWithFileChannel() throws IOException { + File file = PlatformDependent.createTempFile("file-channel", ".tmp", null); + RandomAccessFile randomAccessFile = null; + try { + randomAccessFile = new RandomAccessFile(file, "rw"); + FileChannel channel = randomAccessFile.getChannel(); + // channelPosition should never be changed + long channelPosition = channel.position(); + + byte[] bytes = {'a', 'b', 'c', 'd'}; + int len = bytes.length; + ByteBuf buffer = newBuffer(len).writerIndex(0); + buffer.writeBytes(bytes); + + int oldReaderIndex = buffer.readerIndex(); + assertEquals(len, buffer.readBytes(channel, 10, len)); + assertEquals(oldReaderIndex + len, buffer.readerIndex()); + assertEquals(channelPosition, channel.position()); + + ByteBuf buffer2 = newBuffer(len).writerIndex(0); + int oldWriterIndex = buffer2.writerIndex(); + assertEquals(len, buffer2.writeBytes(channel, 10, len)); + assertEquals(channelPosition, channel.position()); + assertEquals(oldWriterIndex + len, buffer2.writerIndex()); + assertEquals('a', buffer2.getByte(0)); + assertEquals('b', buffer2.getByte(1)); + assertEquals('c', buffer2.getByte(2)); + assertEquals('d', buffer2.getByte(3)); + buffer.release(); + buffer2.release(); + } finally { + if (randomAccessFile != null) { + randomAccessFile.close(); + } + file.delete(); + } + } + + @Test + public void testGetBytesAndSetBytesWithFileChannel() throws IOException { + File file = PlatformDependent.createTempFile("file-channel", ".tmp", null); + RandomAccessFile randomAccessFile = null; + try { + randomAccessFile = new RandomAccessFile(file, "rw"); + FileChannel channel = randomAccessFile.getChannel(); + // channelPosition should never be changed + long channelPosition = channel.position(); + + byte[] bytes = {'a', 'b', 'c', 'd'}; + int len = bytes.length; + ByteBuf buffer = newBuffer(len).writerIndex(0); + buffer.writeBytes(bytes); + + int oldReaderIndex = buffer.readerIndex(); + assertEquals(len, buffer.getBytes(oldReaderIndex, channel, 10, len)); + assertEquals(oldReaderIndex, buffer.readerIndex()); + assertEquals(channelPosition, channel.position()); + + ByteBuf buffer2 = newBuffer(len).writerIndex(0); + int oldWriterIndex = buffer2.writerIndex(); + assertEquals(buffer2.setBytes(oldWriterIndex, channel, 10, len), len); + assertEquals(channelPosition, channel.position()); + + assertEquals(oldWriterIndex, buffer2.writerIndex()); + assertEquals('a', buffer2.getByte(oldWriterIndex)); + assertEquals('b', buffer2.getByte(oldWriterIndex + 1)); + assertEquals('c', buffer2.getByte(oldWriterIndex + 2)); + assertEquals('d', buffer2.getByte(oldWriterIndex + 3)); + + buffer.release(); + buffer2.release(); + } finally { + if (randomAccessFile != null) { + randomAccessFile.close(); + } + file.delete(); + } + } + + @Test + public void testReadBytes() { + ByteBuf buffer = newBuffer(8); + byte[] bytes = new byte[8]; + buffer.writeBytes(bytes); + + ByteBuf buffer2 = buffer.readBytes(4); + assertSame(buffer.alloc(), buffer2.alloc()); + assertEquals(4, buffer.readerIndex()); + assertTrue(buffer.release()); + assertEquals(0, buffer.refCnt()); + assertTrue(buffer2.release()); + assertEquals(0, buffer2.refCnt()); + } + + @Test + public void testForEachByteDesc2() { + byte[] expected = {1, 2, 3, 4}; + ByteBuf buf = newBuffer(expected.length); + try { + buf.writeBytes(expected); + final byte[] bytes = new byte[expected.length]; + int i = buf.forEachByteDesc(new ByteProcessor() { + private int index = bytes.length - 1; + + @Override + public boolean process(byte value) { + bytes[index--] = value; + return true; + } + }); + assertEquals(-1, i); + assertArrayEquals(expected, bytes); + } finally { + buf.release(); + } + } + + @Test + public void testForEachByte2() { + byte[] expected = {1, 2, 3, 4}; + ByteBuf buf = newBuffer(expected.length); + try { + buf.writeBytes(expected); + final byte[] bytes = new byte[expected.length]; + int i = buf.forEachByte(new ByteProcessor() { + private int index; + + @Override + public boolean process(byte value) { + bytes[index++] = value; + return true; + } + }); + assertEquals(-1, i); + assertArrayEquals(expected, bytes); + } finally { + buf.release(); + } + } + + @Test + public void testGetBytesByteBuffer() { + assertThrows(IndexOutOfBoundsException.class, () -> { + byte[] bytes = {'a', 'b', 'c', 'd', 'e', 'f', 'g'}; + // Ensure destination buffer is bigger then what is in the ByteBuf. + ByteBuffer nioBuffer = ByteBuffer.allocate(bytes.length + 1); + ByteBuf buffer = newBuffer(bytes.length); + try { + buffer.writeBytes(bytes); + buffer.getBytes(buffer.readerIndex(), nioBuffer); + } finally { + buffer.release(); + } + }); + } + + private void testRefCnt0(final boolean parameter) throws Exception { + for (int i = 0; i < 10; i++) { + final CountDownLatch latch = new CountDownLatch(1); + final CountDownLatch innerLatch = new CountDownLatch(1); + + final ByteBuf buffer = newBuffer(4); + assertEquals(1, buffer.refCnt()); + final AtomicInteger cnt = new AtomicInteger(Integer.MAX_VALUE); + Thread t1 = new Thread(() -> { + boolean released; + if (parameter) { + released = buffer.release(buffer.refCnt()); + } else { + released = buffer.release(); + } + assertTrue(released); + Thread t2 = new Thread(() -> { + cnt.set(buffer.refCnt()); + latch.countDown(); + }); + t2.start(); + try { + // Keep Thread alive a bit so the ThreadLocal caches are not freed + innerLatch.await(); + } catch (InterruptedException ignore) { + // ignore + } + }); + t1.start(); + + latch.await(); + assertEquals(0, cnt.get()); + innerLatch.countDown(); + } + } + + public static final class TestGatheringByteChannel implements GatheringByteChannel { + private final ByteArrayOutputStream out = new ByteArrayOutputStream(); + private final WritableByteChannel channel = Channels.newChannel(out); + private final int limit; + public TestGatheringByteChannel(int limit) { + this.limit = limit; + } + + public TestGatheringByteChannel() { + this(Integer.MAX_VALUE); + } + + @Override + public long write(ByteBuffer[] srcs, int offset, int length) throws IOException { + long written = 0; + for (; offset < length; offset++) { + written += write(srcs[offset]); + if (written >= limit) { + break; + } + } + return written; + } + + @Override + public long write(ByteBuffer[] srcs) throws IOException { + return write(srcs, 0, srcs.length); + } + + @Override + public int write(ByteBuffer src) throws IOException { + int oldLimit = src.limit(); + if (limit < src.remaining()) { + src.limit(src.position() + limit); + } + int w = channel.write(src); + src.limit(oldLimit); + return w; + } + + @Override + public boolean isOpen() { + return channel.isOpen(); + } + + @Override + public void close() throws IOException { + channel.close(); + } + + public byte[] writtenBytes() { + return out.toByteArray(); + } + } + + private static final class DevNullGatheringByteChannel implements GatheringByteChannel { + @Override + public long write(ByteBuffer[] srcs, int offset, int length) { + throw new UnsupportedOperationException(); + } + + @Override + public long write(ByteBuffer[] srcs) { + throw new UnsupportedOperationException(); + } + + @Override + public int write(ByteBuffer src) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isOpen() { + return false; + } + + @Override + public void close() { + throw new UnsupportedOperationException(); + } + } + + private static final class TestScatteringByteChannel implements ScatteringByteChannel { + @Override + public long read(ByteBuffer[] dsts, int offset, int length) { + throw new UnsupportedOperationException(); + } + + @Override + public long read(ByteBuffer[] dsts) { + throw new UnsupportedOperationException(); + } + + @Override + public int read(ByteBuffer dst) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isOpen() { + return false; + } + + @Override + public void close() { + throw new UnsupportedOperationException(); + } + } + + private static final class TestByteProcessor implements ByteProcessor { + @Override + public boolean process(byte value) { + return true; + } + } + + @Test + public void testCapacityEnforceMaxCapacity() { + assertThrows(IllegalArgumentException.class, () -> { + ByteBuf buffer = newBuffer(3, 13); + assertEquals(13, buffer.maxCapacity()); + assertEquals(3, buffer.capacity()); + try { + buffer.capacity(14); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testCapacityNegative() { + assertThrows(IllegalArgumentException.class, () -> { + ByteBuf buffer = newBuffer(3, 13); + assertEquals(13, buffer.maxCapacity()); + assertEquals(3, buffer.capacity()); + try { + buffer.capacity(-1); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testCapacityDecrease() { + ByteBuf buffer = newBuffer(3, 13); + assertEquals(13, buffer.maxCapacity()); + assertEquals(3, buffer.capacity()); + try { + buffer.capacity(2); + assertEquals(2, buffer.capacity()); + assertEquals(13, buffer.maxCapacity()); + } finally { + buffer.release(); + } + } + + @Test + public void testCapacityIncrease() { + ByteBuf buffer = newBuffer(3, 13); + assertEquals(13, buffer.maxCapacity()); + assertEquals(3, buffer.capacity()); + try { + buffer.capacity(4); + assertEquals(4, buffer.capacity()); + assertEquals(13, buffer.maxCapacity()); + } finally { + buffer.release(); + } + } + + @Test + public void testReaderIndexLargerThanWriterIndex() { + assertThrows(IndexOutOfBoundsException.class, () -> { + String content1 = "hello"; + String content2 = "world"; + int length = content1.length() + content2.length(); + ByteBuf buffer = newBuffer(length); + buffer.setIndex(0, 0); + buffer.writeCharSequence(content1, CharsetUtil.US_ASCII); + buffer.skipBytes(content1.length()); + buffer.writeCharSequence(content2, CharsetUtil.US_ASCII); + buffer.skipBytes(content2.length()); + assertTrue(buffer.readerIndex() <= buffer.writerIndex()); + + try { + buffer.readerIndex(buffer.writerIndex() + 1); + } finally { + buffer.release(); + } + }); + } + + @Test + public void testMaxFastWritableBytes() { + ByteBuf buffer = newBuffer(150, 500).writerIndex(100); + assertEquals(50, buffer.writableBytes()); + assertEquals(150, buffer.capacity()); + assertEquals(500, buffer.maxCapacity()); + assertEquals(400, buffer.maxWritableBytes()); + // Default implementation has fast writable == writable + assertEquals(50, buffer.maxFastWritableBytes()); + buffer.release(); + } + + @Test + public void testEnsureWritableIntegerOverflow() { + ByteBuf buffer = newBuffer(CAPACITY); + buffer.writerIndex(buffer.readerIndex()); + buffer.writeByte(1); + try { + assertThrows(IndexOutOfBoundsException.class, () -> buffer.ensureWritable(Integer.MAX_VALUE)); + } finally { + buffer.release(); + } + } + + @Test + public void testEndiannessIndexOf() { + buffer.clear(); + final int v = 0x02030201; + buffer.writeIntLE(v); + buffer.writeByte(0x01); + + assertEquals(-1, buffer.indexOf(1, 4, (byte) 1)); + assertEquals(-1, buffer.indexOf(4, 1, (byte) 1)); + assertEquals(1, buffer.indexOf(1, 4, (byte) 2)); + assertEquals(3, buffer.indexOf(4, 1, (byte) 2)); + } + + @Test + public void explicitLittleEndianReadMethodsMustAlwaysUseLittleEndianByteOrder() { + buffer.clear(); + buffer.writeBytes(new byte[] {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}); + assertEquals(0x0201, buffer.readShortLE()); + buffer.readerIndex(0); + assertEquals(0x0201, buffer.readUnsignedShortLE()); + buffer.readerIndex(0); + assertEquals(0x030201, buffer.readMediumLE()); + buffer.readerIndex(0); + assertEquals(0x030201, buffer.readUnsignedMediumLE()); + buffer.readerIndex(0); + assertEquals(0x04030201, buffer.readIntLE()); + buffer.readerIndex(0); + assertEquals(0x04030201, buffer.readUnsignedIntLE()); + buffer.readerIndex(0); + assertEquals(0x04030201, Float.floatToRawIntBits(buffer.readFloatLE())); + buffer.readerIndex(0); + assertEquals(0x0807060504030201L, buffer.readLongLE()); + buffer.readerIndex(0); + assertEquals(0x0807060504030201L, Double.doubleToRawLongBits(buffer.readDoubleLE())); + buffer.readerIndex(0); + } + + @Test + public void explicitLittleEndianWriteMethodsMustAlwaysUseLittleEndianByteOrder() { + buffer.clear(); + buffer.writeShortLE(0x0102); + assertEquals(0x0102, buffer.readShortLE()); + buffer.clear(); + buffer.writeMediumLE(0x010203); + assertEquals(0x010203, buffer.readMediumLE()); + buffer.clear(); + buffer.writeIntLE(0x01020304); + assertEquals(0x01020304, buffer.readIntLE()); + buffer.clear(); + buffer.writeFloatLE(Float.intBitsToFloat(0x01020304)); + assertEquals(0x01020304, Float.floatToRawIntBits(buffer.readFloatLE())); + buffer.clear(); + buffer.writeLongLE(0x0102030405060708L); + assertEquals(0x0102030405060708L, buffer.readLongLE()); + buffer.clear(); + buffer.writeDoubleLE(Double.longBitsToDouble(0x0102030405060708L)); + assertEquals(0x0102030405060708L, Double.doubleToRawLongBits(buffer.readDoubleLE())); + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/ByteBufAdaptorTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/ByteBufAdaptorTest.java new file mode 100644 index 00000000000..8fd4ee91ecb --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/ByteBufAdaptorTest.java @@ -0,0 +1,99 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests.adaptor; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.api.BufferAllocator; +import io.netty.buffer.api.MemoryManager; +import io.netty.buffer.api.adaptor.ByteBufAllocatorAdaptor; +import org.junit.jupiter.api.AfterAll; +import org.junit.jupiter.api.Disabled; + +import java.util.Optional; + +import static org.junit.jupiter.api.Assumptions.assumeTrue; + +public abstract class ByteBufAdaptorTest extends AbstractByteBufTest { + static ByteBufAllocatorAdaptor alloc; + + static void setUpAllocator(String name) { + Optional managers = MemoryManager.lookupImplementation(name); + assumeTrue(managers.isPresent(), () -> "Memory implementation '" + name + "' not found."); + BufferAllocator onheap = MemoryManager.using(managers.get(), BufferAllocator::onHeapPooled); + BufferAllocator offheap = MemoryManager.using(managers.get(), BufferAllocator::onHeapPooled); + alloc = new ByteBufAllocatorAdaptor(onheap, offheap); + } + + @AfterAll + public static void tearDownAllocator() throws Exception { + if (alloc != null) { + alloc.close(); + } + } + + @Override + protected ByteBuf newBuffer(int capacity, int maxCapacity) { + return alloc.buffer(capacity, maxCapacity); + } + + @Disabled("This test codifies that asking to reading 0 bytes from an empty but unclosed stream should return -1, " + + "which is just weird.") + @Override + public void testStreamTransfer1() throws Exception { + } + + @Disabled("Relies on capacity and max capacity being separate things.") + @Override + public void testCapacityIncrease() { + } + + @Disabled("Decreasing capacity not supported in new API.") + @Override + public void testCapacityDecrease() { + } + + @Disabled("Decreasing capacity not supported in new API.") + @Override + public void testCapacityNegative() { + throw new IllegalArgumentException(); // Can't ignore tests annotated with throws expectation? + } + + @Disabled("Decreasing capacity not supported in new API.") + @Override + public void testCapacityEnforceMaxCapacity() { + throw new IllegalArgumentException(); // Can't ignore tests annotated with throws expectation? + } + + @Disabled("Decreasing capacity not supported in new API.") + @Override + public void testMaxFastWritableBytes() { + } + + @Disabled("Impossible to expose entire memory as a ByteBuffer using new API.") + @Override + public void testNioBufferExposeOnlyRegion() { + } + + @Disabled("Impossible to expose entire memory as a ByteBuffer using new API.") + @Override + public void testToByteBuffer2() { + } + + @Disabled("No longer allowed to allocate 0 sized buffers, except for composite buffers with no components.") + @Override + public void testLittleEndianWithExpand() { + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/NioByteBufAdaptorTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/NioByteBufAdaptorTest.java new file mode 100644 index 00000000000..de0f4e14a77 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/NioByteBufAdaptorTest.java @@ -0,0 +1,25 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests.adaptor; + +import org.junit.jupiter.api.BeforeAll; + +public class NioByteBufAdaptorTest extends ByteBufAdaptorTest { + @BeforeAll + public static void setUpAllocator() { + setUpAllocator("ByteBuffer"); + } +} diff --git a/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/UnsafeByteBufAdaptorTest.java b/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/UnsafeByteBufAdaptorTest.java new file mode 100644 index 00000000000..ae5601354b3 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/api/tests/adaptor/UnsafeByteBufAdaptorTest.java @@ -0,0 +1,25 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.api.tests.adaptor; + +import org.junit.jupiter.api.BeforeAll; + +public class UnsafeByteBufAdaptorTest extends ByteBufAdaptorTest { + @BeforeAll + public static void setUpAllocator() { + setUpAllocator("Unsafe"); + } +} diff --git a/buffer/src/test/java/io/netty/buffer/search/BitapSearchProcessorFactoryTest.java b/buffer/src/test/java/io/netty/buffer/search/BitapSearchProcessorFactoryTest.java new file mode 100644 index 00000000000..c087604e06c --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/search/BitapSearchProcessorFactoryTest.java @@ -0,0 +1,34 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.search; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertThrows; + +public class BitapSearchProcessorFactoryTest { + + @Test + public void testAcceptMaximumLengthNeedle() { + new BitapSearchProcessorFactory(new byte[64]); + } + + @Test + public void testRejectTooLongNeedle() { + assertThrows(IllegalArgumentException.class, () -> new BitapSearchProcessorFactory(new byte[65])); + } + +} diff --git a/buffer/src/test/java/io/netty/buffer/search/MultiSearchProcessorTest.java b/buffer/src/test/java/io/netty/buffer/search/MultiSearchProcessorTest.java new file mode 100644 index 00000000000..1a489822ba8 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/search/MultiSearchProcessorTest.java @@ -0,0 +1,107 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.search; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.util.CharsetUtil; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class MultiSearchProcessorTest { + + @Test + public void testSearchForMultiple() { + final ByteBuf haystack = Unpooled.copiedBuffer("one two three one", CharsetUtil.UTF_8); + final int length = haystack.readableBytes(); + + final MultiSearchProcessor processor = AbstractMultiSearchProcessorFactory.newAhoCorasicSearchProcessorFactory( + bytes("one"), + bytes("two"), + bytes("three") + ).newSearchProcessor(); + + assertEquals(-1, processor.getFoundNeedleId()); + + assertEquals(2, haystack.forEachByte(processor)); + assertEquals(0, processor.getFoundNeedleId()); // index of "one" in needles[] + + assertEquals(6, haystack.forEachByte(3, length - 3, processor)); + assertEquals(1, processor.getFoundNeedleId()); // index of "two" in needles[] + + assertEquals(12, haystack.forEachByte(7, length - 7, processor)); + assertEquals(2, processor.getFoundNeedleId()); // index of "three" in needles[] + + assertEquals(16, haystack.forEachByte(13, length - 13, processor)); + assertEquals(0, processor.getFoundNeedleId()); // index of "one" in needles[] + + assertEquals(-1, haystack.forEachByte(17, length - 17, processor)); + + haystack.release(); + } + + @Test + public void testSearchForMultipleOverlapping() { + final ByteBuf haystack = Unpooled.copiedBuffer("abcd", CharsetUtil.UTF_8); + final int length = haystack.readableBytes(); + + final MultiSearchProcessor processor = AbstractMultiSearchProcessorFactory.newAhoCorasicSearchProcessorFactory( + bytes("ab"), + bytes("bc"), + bytes("cd") + ).newSearchProcessor(); + + assertEquals(1, haystack.forEachByte(processor)); + assertEquals(0, processor.getFoundNeedleId()); // index of "ab" in needles[] + + assertEquals(2, haystack.forEachByte(2, length - 2, processor)); + assertEquals(1, processor.getFoundNeedleId()); // index of "bc" in needles[] + + assertEquals(3, haystack.forEachByte(3, length - 3, processor)); + assertEquals(2, processor.getFoundNeedleId()); // index of "cd" in needles[] + + haystack.release(); + } + + @Test + public void findLongerNeedleInCaseOfSuffixMatch() { + final ByteBuf haystack = Unpooled.copiedBuffer("xabcx", CharsetUtil.UTF_8); + + final MultiSearchProcessor processor1 = AbstractMultiSearchProcessorFactory.newAhoCorasicSearchProcessorFactory( + bytes("abc"), + bytes("bc") + ).newSearchProcessor(); + + assertEquals(3, haystack.forEachByte(processor1)); // end of "abc" in haystack + assertEquals(0, processor1.getFoundNeedleId()); // index of "abc" in needles[] + + final MultiSearchProcessor processor2 = AbstractMultiSearchProcessorFactory.newAhoCorasicSearchProcessorFactory( + bytes("bc"), + bytes("abc") + ).newSearchProcessor(); + + assertEquals(3, haystack.forEachByte(processor2)); // end of "abc" in haystack + assertEquals(1, processor2.getFoundNeedleId()); // index of "abc" in needles[] + + haystack.release(); + } + + private static byte[] bytes(String s) { + return s.getBytes(CharsetUtil.UTF_8); + } + +} diff --git a/buffer/src/test/java/io/netty/buffer/search/SearchProcessorTest.java b/buffer/src/test/java/io/netty/buffer/search/SearchProcessorTest.java new file mode 100644 index 00000000000..b24caf0d8a4 --- /dev/null +++ b/buffer/src/test/java/io/netty/buffer/search/SearchProcessorTest.java @@ -0,0 +1,167 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.buffer.search; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.util.CharsetUtil; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.EnumSource; + +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertEquals; + +public class SearchProcessorTest { + + private enum Algorithm { + KNUTH_MORRIS_PRATT { + @Override + SearchProcessorFactory newFactory(byte[] needle) { + return AbstractSearchProcessorFactory.newKmpSearchProcessorFactory(needle); + } + }, + BITAP { + @Override + SearchProcessorFactory newFactory(byte[] needle) { + return AbstractSearchProcessorFactory.newBitapSearchProcessorFactory(needle); + } + }, + AHO_CORASIC { + @Override + SearchProcessorFactory newFactory(byte[] needle) { + return AbstractMultiSearchProcessorFactory.newAhoCorasicSearchProcessorFactory(needle); + } + }; + abstract SearchProcessorFactory newFactory(byte[] needle); + } + + @ParameterizedTest + @EnumSource(Algorithm.class) + public void testSearch(Algorithm algorithm) { + final ByteBuf haystack = Unpooled.copiedBuffer("abcâ˜ē", CharsetUtil.UTF_8); + + assertEquals(0, haystack.forEachByte(factory(algorithm, "a").newSearchProcessor())); + assertEquals(1, haystack.forEachByte(factory(algorithm, "ab").newSearchProcessor())); + assertEquals(2, haystack.forEachByte(factory(algorithm, "abc").newSearchProcessor())); + assertEquals(5, haystack.forEachByte(factory(algorithm, "abcâ˜ē").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "abcâ˜ēâ˜ē").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "abcâ˜ēx").newSearchProcessor())); + + assertEquals(1, haystack.forEachByte(factory(algorithm, "b").newSearchProcessor())); + assertEquals(2, haystack.forEachByte(factory(algorithm, "bc").newSearchProcessor())); + assertEquals(5, haystack.forEachByte(factory(algorithm, "bcâ˜ē").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "bcâ˜ēâ˜ē").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "bcâ˜ēx").newSearchProcessor())); + + assertEquals(2, haystack.forEachByte(factory(algorithm, "c").newSearchProcessor())); + assertEquals(5, haystack.forEachByte(factory(algorithm, "câ˜ē").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "câ˜ēâ˜ē").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "câ˜ēx").newSearchProcessor())); + + assertEquals(5, haystack.forEachByte(factory(algorithm, "â˜ē").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "â˜ēâ˜ē").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "â˜ēx").newSearchProcessor())); + + assertEquals(-1, haystack.forEachByte(factory(algorithm, "z").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "aa").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "ba").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "abcd").newSearchProcessor())); + assertEquals(-1, haystack.forEachByte(factory(algorithm, "abcde").newSearchProcessor())); + + haystack.release(); + } + + @ParameterizedTest + @EnumSource(Algorithm.class) + public void testRepeating(Algorithm algorithm) { + final ByteBuf haystack = Unpooled.copiedBuffer("abcababc", CharsetUtil.UTF_8); + final int length = haystack.readableBytes(); + SearchProcessor processor = factory(algorithm, "ab").newSearchProcessor(); + + assertEquals(1, haystack.forEachByte(processor)); + assertEquals(4, haystack.forEachByte(2, length - 2, processor)); + assertEquals(6, haystack.forEachByte(5, length - 5, processor)); + assertEquals(-1, haystack.forEachByte(7, length - 7, processor)); + + haystack.release(); + } + + @ParameterizedTest + @EnumSource(Algorithm.class) + public void testOverlapping(Algorithm algorithm) { + final ByteBuf haystack = Unpooled.copiedBuffer("ababab", CharsetUtil.UTF_8); + final int length = haystack.readableBytes(); + SearchProcessor processor = factory(algorithm, "bab").newSearchProcessor(); + + assertEquals(3, haystack.forEachByte(processor)); + assertEquals(5, haystack.forEachByte(4, length - 4, processor)); + assertEquals(-1, haystack.forEachByte(6, length - 6, processor)); + + haystack.release(); + } + + @ParameterizedTest + @EnumSource(Algorithm.class) + public void testLongInputs(Algorithm algorithm) { + final int haystackLen = 1024; + final int needleLen = 64; + + final byte[] haystackBytes = new byte[haystackLen]; + haystackBytes[haystackLen - 1] = 1; + final ByteBuf haystack = Unpooled.copiedBuffer(haystackBytes); // 00000...00001 + + final byte[] needleBytes = new byte[needleLen]; // 000...000 + assertEquals(needleLen - 1, haystack.forEachByte(factory(algorithm, needleBytes).newSearchProcessor())); + + needleBytes[needleLen - 1] = 1; // 000...001 + assertEquals(haystackLen - 1, haystack.forEachByte(factory(algorithm, needleBytes).newSearchProcessor())); + + needleBytes[needleLen - 1] = 2; // 000...002 + assertEquals(-1, haystack.forEachByte(factory(algorithm, needleBytes).newSearchProcessor())); + + needleBytes[needleLen - 1] = 0; + needleBytes[0] = 1; // 100...000 + assertEquals(-1, haystack.forEachByte(factory(algorithm, needleBytes).newSearchProcessor())); + } + + @ParameterizedTest + @EnumSource(Algorithm.class) + public void testUniqueLen64Substrings(Algorithm algorithm) { + final byte[] haystackBytes = new byte[32 * 65]; // 1, 2, 2, 3, 3, 3, 4, 4, 4, 4, ... + int pos = 0; + for (int i = 1; i <= 64; i++) { + for (int j = 0; j < i; j++) { + haystackBytes[pos++] = (byte) i; + } + } + final ByteBuf haystack = Unpooled.copiedBuffer(haystackBytes); + + for (int start = 0; start < haystackBytes.length - 64; start++) { + final byte[] needle = Arrays.copyOfRange(haystackBytes, start, start + 64); + assertEquals(start + 63, haystack.forEachByte(factory(algorithm, needle).newSearchProcessor())); + } + } + + private SearchProcessorFactory factory(Algorithm algorithm, byte[] needle) { + return algorithm.newFactory(needle); + } + + private SearchProcessorFactory factory(Algorithm algorithm, String needle) { + return factory(algorithm, needle.getBytes(CharsetUtil.UTF_8)); + } + +} diff --git a/codec-dns/pom.xml b/codec-dns/pom.xml index 1902a69f42d..3c643f96897 100644 --- a/codec-dns/pom.xml +++ b/codec-dns/pom.xml @@ -6,7 +6,7 @@ ~ version 2.0 (the "License"); you may not use this file except in compliance ~ with the License. You may obtain a copy of the License at: ~ - ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ https://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -14,13 +14,13 @@ ~ License for the specific language governing permissions and limitations ~ under the License. --> - + 4.0.0 io.netty netty-parent - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT netty-codec-dns @@ -33,6 +33,21 @@ + + ${project.groupId} + netty-common + ${project.version} + + + ${project.groupId} + netty-buffer + ${project.version} + + + ${project.groupId} + netty-transport + ${project.version} + ${project.groupId} netty-codec diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsMessage.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsMessage.java index 5d0a3933e0a..52d98d56c39 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsMessage.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsMessage.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -27,7 +27,7 @@ import java.util.ArrayList; import java.util.List; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * A skeletal implementation of {@link DnsMessage}. @@ -87,7 +87,7 @@ public DnsOpCode opCode() { @Override public DnsMessage setOpCode(DnsOpCode opCode) { - this.opCode = checkNotNull(opCode, "opCode"); + this.opCode = requireNonNull(opCode, "opCode"); return this; } @@ -427,6 +427,8 @@ private Object sectionAt(int section) { return authorities; case 3: return additionals; + default: + break; } throw new Error(); // Should never reach here. @@ -446,17 +448,19 @@ private void setSection(int section, Object value) { case 3: additionals = value; return; + default: + break; } throw new Error(); // Should never reach here. } private static int sectionOrdinal(DnsSection section) { - return checkNotNull(section, "section").ordinal(); + return requireNonNull(section, "section").ordinal(); } private static DnsRecord checkQuestion(int section, DnsRecord record) { - if (section == SECTION_QUESTION && !(checkNotNull(record, "record") instanceof DnsQuestion)) { + if (section == SECTION_QUESTION && !(requireNonNull(record, "record") instanceof DnsQuestion)) { throw new IllegalArgumentException( "record: " + record + " (expected: " + StringUtil.simpleClassName(DnsQuestion.class) + ')'); } @@ -469,6 +473,6 @@ private static T castRecord(Object record) { } private static ArrayList newRecordList() { - return new ArrayList(2); + return new ArrayList<>(2); } } diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsOptPseudoRrRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsOptPseudoRrRecord.java index 944684ad48d..13439c16869 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsOptPseudoRrRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsOptPseudoRrRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -38,7 +38,7 @@ protected AbstractDnsOptPseudoRrRecord(int maxPayloadSize) { // See https://tools.ietf.org/html/rfc6891#section-6.1.3 private static long packIntoLong(int val, int val2) { // We are currently not support DO and Z fields, just use 0. - return ((val & 0xff) << 24 | (val2 & 0xff) << 16 | (0 & 0xff) << 8 | 0 & 0xff) & 0xFFFFFFFFL; + return ((val & 0xffL) << 24 | (val2 & 0xff) << 16) & 0xFFFFFFFFL; } @Override diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsRecord.java index 28b92c27f92..290ee1fa02d 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/AbstractDnsRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,12 +15,14 @@ */ package io.netty.handler.codec.dns; +import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.StringUtil; import io.netty.util.internal.UnstableApi; import java.net.IDN; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import static java.util.Objects.requireNonNull; /** * A skeletal implementation of {@link DnsRecord}. @@ -62,19 +64,28 @@ protected AbstractDnsRecord(String name, DnsRecordType type, long timeToLive) { * @param timeToLive the TTL value of the record */ protected AbstractDnsRecord(String name, DnsRecordType type, int dnsClass, long timeToLive) { - if (timeToLive < 0) { - throw new IllegalArgumentException("timeToLive: " + timeToLive + " (expected: >= 0)"); - } + checkPositiveOrZero(timeToLive, "timeToLive"); // Convert to ASCII which will also check that the length is not too big. // See: // - https://github.com/netty/netty/issues/4937 // - https://github.com/netty/netty/issues/4935 - this.name = appendTrailingDot(IDN.toASCII(checkNotNull(name, "name"))); - this.type = checkNotNull(type, "type"); + this.name = appendTrailingDot(IDNtoASCII(name)); + this.type = requireNonNull(type, "type"); this.dnsClass = (short) dnsClass; this.timeToLive = timeToLive; } + private static String IDNtoASCII(String name) { + requireNonNull(name, "name"); + if (PlatformDependent.isAndroid() && DefaultDnsRecordDecoder.ROOT.equals(name)) { + // Prior Android 10 there was a bug that did not correctly parse ".". + // + // See https://github.com/netty/netty/issues/10034 + return name; + } + return IDN.toASCII(name); + } + private static String appendTrailingDot(String name) { if (name.length() > 0 && name.charAt(name.length() - 1) != '.') { return name + '.'; diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQuery.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQuery.java index d61474a4ffd..17208884e8b 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQuery.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQuery.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQueryDecoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQueryDecoder.java index 660368317bd..0c9a02ae5f8 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQueryDecoder.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQueryDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,17 +15,14 @@ */ package io.netty.handler.codec.dns; -import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.socket.DatagramPacket; -import io.netty.handler.codec.CorruptedFrameException; import io.netty.handler.codec.MessageToMessageDecoder; import io.netty.util.internal.UnstableApi; -import java.util.List; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * Decodes a {@link DatagramPacket} into a {@link DatagramDnsQuery}. @@ -47,69 +44,18 @@ public DatagramDnsQueryDecoder() { * Creates a new decoder with the specified {@code recordDecoder}. */ public DatagramDnsQueryDecoder(DnsRecordDecoder recordDecoder) { - this.recordDecoder = checkNotNull(recordDecoder, "recordDecoder"); + this.recordDecoder = requireNonNull(recordDecoder, "recordDecoder"); } @Override - protected void decode(ChannelHandlerContext ctx, DatagramPacket packet, List out) throws Exception { - final ByteBuf buf = packet.content(); - - final DnsQuery query = newQuery(packet, buf); - boolean success = false; - try { - final int questionCount = buf.readUnsignedShort(); - final int answerCount = buf.readUnsignedShort(); - final int authorityRecordCount = buf.readUnsignedShort(); - final int additionalRecordCount = buf.readUnsignedShort(); - - decodeQuestions(query, buf, questionCount); - decodeRecords(query, DnsSection.ANSWER, buf, answerCount); - decodeRecords(query, DnsSection.AUTHORITY, buf, authorityRecordCount); - decodeRecords(query, DnsSection.ADDITIONAL, buf, additionalRecordCount); - - out.add(query); - success = true; - } finally { - if (!success) { - query.release(); - } - } - } - - private static DnsQuery newQuery(DatagramPacket packet, ByteBuf buf) { - final int id = buf.readUnsignedShort(); - - final int flags = buf.readUnsignedShort(); - if (flags >> 15 == 1) { - throw new CorruptedFrameException("not a query"); - } - final DnsQuery query = - new DatagramDnsQuery( - packet.sender(), - packet.recipient(), - id, - DnsOpCode.valueOf((byte) (flags >> 11 & 0xf))); - query.setRecursionDesired((flags >> 8 & 1) == 1); - query.setZ(flags >> 4 & 0x7); - return query; - } - - private void decodeQuestions(DnsQuery query, ByteBuf buf, int questionCount) throws Exception { - for (int i = questionCount; i > 0; i--) { - query.addRecord(DnsSection.QUESTION, recordDecoder.decodeQuestion(buf)); - } - } - - private void decodeRecords( - DnsQuery query, DnsSection section, ByteBuf buf, int count) throws Exception { - for (int i = count; i > 0; i--) { - final DnsRecord r = recordDecoder.decodeRecord(buf); - if (r == null) { - // Truncated response - break; + protected void decode(ChannelHandlerContext ctx, DatagramPacket packet) throws Exception { + DnsQuery query = DnsMessageUtil.decodeDnsQuery(recordDecoder, packet.content(), + new DnsMessageUtil.DnsQueryFactory() { + @Override + public DnsQuery newQuery(int id, DnsOpCode dnsOpCode) { + return new DatagramDnsQuery(packet.sender(), packet.recipient(), id, dnsOpCode); } - - query.addRecord(section, r); - } + }); + ctx.fireChannelRead(query); } } diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQueryEncoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQueryEncoder.java index dbb562fd2b5..528bf8de855 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQueryEncoder.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsQueryEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -26,8 +26,6 @@ import java.net.InetSocketAddress; import java.util.List; -import static io.netty.util.internal.ObjectUtil.checkNotNull; - /** * Encodes a {@link DatagramDnsQuery} (or an {@link AddressedEnvelope} of {@link DnsQuery}} into a * {@link DatagramPacket}. @@ -36,7 +34,7 @@ @ChannelHandler.Sharable public class DatagramDnsQueryEncoder extends MessageToMessageEncoder> { - private final DnsRecordEncoder recordEncoder; + private final DnsQueryEncoder encoder; /** * Creates a new encoder with {@linkplain DnsRecordEncoder#DEFAULT the default record encoder}. @@ -49,7 +47,7 @@ public DatagramDnsQueryEncoder() { * Creates a new encoder with the specified {@code recordEncoder}. */ public DatagramDnsQueryEncoder(DnsRecordEncoder recordEncoder) { - this.recordEncoder = checkNotNull(recordEncoder, "recordEncoder"); + this.encoder = new DnsQueryEncoder(recordEncoder); } @Override @@ -63,9 +61,7 @@ protected void encode( boolean success = false; try { - encodeHeader(query, buf); - encodeQuestions(query, buf); - encodeRecords(query, DnsSection.ADDITIONAL, buf); + encoder.encode(query, buf); success = true; } finally { if (!success) { @@ -85,38 +81,4 @@ protected ByteBuf allocateBuffer( @SuppressWarnings("unused") AddressedEnvelope msg) throws Exception { return ctx.alloc().ioBuffer(1024); } - - /** - * Encodes the header that is always 12 bytes long. - * - * @param query the query header being encoded - * @param buf the buffer the encoded data should be written to - */ - private static void encodeHeader(DnsQuery query, ByteBuf buf) { - buf.writeShort(query.id()); - int flags = 0; - flags |= (query.opCode().byteValue() & 0xFF) << 14; - if (query.isRecursionDesired()) { - flags |= 1 << 8; - } - buf.writeShort(flags); - buf.writeShort(query.count(DnsSection.QUESTION)); - buf.writeShort(0); // answerCount - buf.writeShort(0); // authorityResourceCount - buf.writeShort(query.count(DnsSection.ADDITIONAL)); - } - - private void encodeQuestions(DnsQuery query, ByteBuf buf) throws Exception { - final int count = query.count(DnsSection.QUESTION); - for (int i = 0; i < count; i++) { - recordEncoder.encodeQuestion((DnsQuestion) query.recordAt(DnsSection.QUESTION, i), buf); - } - } - - private void encodeRecords(DnsQuery query, DnsSection section, ByteBuf buf) throws Exception { - final int count = query.count(section); - for (int i = 0; i < count; i++) { - recordEncoder.encodeRecord(query.recordAt(section, i), buf); - } - } } diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponse.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponse.java index bee775cc64a..c25b4dfb15d 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponse.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponse.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponseDecoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponseDecoder.java index 547d5aefac4..e3163e53b30 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponseDecoder.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponseDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,6 @@ */ package io.netty.handler.codec.dns; -import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.socket.DatagramPacket; @@ -23,9 +22,7 @@ import io.netty.handler.codec.MessageToMessageDecoder; import io.netty.util.internal.UnstableApi; -import java.util.List; - -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import java.net.InetSocketAddress; /** * Decodes a {@link DatagramPacket} into a {@link DatagramDnsResponse}. @@ -34,7 +31,7 @@ @ChannelHandler.Sharable public class DatagramDnsResponseDecoder extends MessageToMessageDecoder { - private final DnsRecordDecoder recordDecoder; + private final DnsResponseDecoder responseDecoder; /** * Creates a new decoder with {@linkplain DnsRecordDecoder#DEFAULT the default record decoder}. @@ -47,73 +44,27 @@ public DatagramDnsResponseDecoder() { * Creates a new decoder with the specified {@code recordDecoder}. */ public DatagramDnsResponseDecoder(DnsRecordDecoder recordDecoder) { - this.recordDecoder = checkNotNull(recordDecoder, "recordDecoder"); + this.responseDecoder = new DnsResponseDecoder(recordDecoder) { + @Override + protected DnsResponse newResponse(InetSocketAddress sender, InetSocketAddress recipient, + int id, DnsOpCode opCode, DnsResponseCode responseCode) { + return new DatagramDnsResponse(sender, recipient, id, opCode, responseCode); + } + }; } @Override - protected void decode(ChannelHandlerContext ctx, DatagramPacket packet, List out) throws Exception { - final ByteBuf buf = packet.content(); - - final DnsResponse response = newResponse(packet, buf); - boolean success = false; + protected void decode(ChannelHandlerContext ctx, DatagramPacket packet) throws Exception { + final DnsResponse response; try { - final int questionCount = buf.readUnsignedShort(); - final int answerCount = buf.readUnsignedShort(); - final int authorityRecordCount = buf.readUnsignedShort(); - final int additionalRecordCount = buf.readUnsignedShort(); - - decodeQuestions(response, buf, questionCount); - decodeRecords(response, DnsSection.ANSWER, buf, answerCount); - decodeRecords(response, DnsSection.AUTHORITY, buf, authorityRecordCount); - decodeRecords(response, DnsSection.ADDITIONAL, buf, additionalRecordCount); - - out.add(response); - success = true; - } finally { - if (!success) { - response.release(); - } - } - } - - private static DnsResponse newResponse(DatagramPacket packet, ByteBuf buf) { - final int id = buf.readUnsignedShort(); - - final int flags = buf.readUnsignedShort(); - if (flags >> 15 == 0) { - throw new CorruptedFrameException("not a response"); + response = decodeResponse(ctx, packet); + } catch (IndexOutOfBoundsException e) { + throw new CorruptedFrameException("Unable to decode response", e); } - - final DnsResponse response = new DatagramDnsResponse( - packet.sender(), - packet.recipient(), - id, - DnsOpCode.valueOf((byte) (flags >> 11 & 0xf)), DnsResponseCode.valueOf((byte) (flags & 0xf))); - - response.setRecursionDesired((flags >> 8 & 1) == 1); - response.setAuthoritativeAnswer((flags >> 10 & 1) == 1); - response.setTruncated((flags >> 9 & 1) == 1); - response.setRecursionAvailable((flags >> 7 & 1) == 1); - response.setZ(flags >> 4 & 0x7); - return response; + ctx.fireChannelRead(response); } - private void decodeQuestions(DnsResponse response, ByteBuf buf, int questionCount) throws Exception { - for (int i = questionCount; i > 0; i --) { - response.addRecord(DnsSection.QUESTION, recordDecoder.decodeQuestion(buf)); - } - } - - private void decodeRecords( - DnsResponse response, DnsSection section, ByteBuf buf, int count) throws Exception { - for (int i = count; i > 0; i --) { - final DnsRecord r = recordDecoder.decodeRecord(buf); - if (r == null) { - // Truncated response - break; - } - - response.addRecord(section, r); - } + protected DnsResponse decodeResponse(ChannelHandlerContext ctx, DatagramPacket packet) throws Exception { + return responseDecoder.decode(packet.sender(), packet.recipient(), packet.content()); } } diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponseEncoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponseEncoder.java index 904c78392ca..aa27f94a555 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponseEncoder.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DatagramDnsResponseEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -26,7 +26,7 @@ import java.net.InetSocketAddress; import java.util.List; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * Encodes a {@link DatagramDnsResponse} (or an {@link AddressedEnvelope} of {@link DnsResponse}} into a @@ -50,7 +50,7 @@ public DatagramDnsResponseEncoder() { * Creates a new encoder with the specified {@code recordEncoder}. */ public DatagramDnsResponseEncoder(DnsRecordEncoder recordEncoder) { - this.recordEncoder = checkNotNull(recordEncoder, "recordEncoder"); + this.recordEncoder = requireNonNull(recordEncoder, "recordEncoder"); } @Override @@ -61,19 +61,7 @@ protected void encode(ChannelHandlerContext ctx, final DnsResponse response = in.content(); final ByteBuf buf = allocateBuffer(ctx, in); - boolean success = false; - try { - encodeHeader(response, buf); - encodeQuestions(response, buf); - encodeRecords(response, DnsSection.ANSWER, buf); - encodeRecords(response, DnsSection.AUTHORITY, buf); - encodeRecords(response, DnsSection.ADDITIONAL, buf); - success = true; - } finally { - if (!success) { - buf.release(); - } - } + DnsMessageUtil.encodeDnsResponse(recordEncoder, response, buf); out.add(new DatagramPacket(buf, recipient, null)); } @@ -87,49 +75,4 @@ protected ByteBuf allocateBuffer( @SuppressWarnings("unused") AddressedEnvelope msg) throws Exception { return ctx.alloc().ioBuffer(1024); } - - /** - * Encodes the header that is always 12 bytes long. - * - * @param response the response header being encoded - * @param buf the buffer the encoded data should be written to - */ - private static void encodeHeader(DnsResponse response, ByteBuf buf) { - buf.writeShort(response.id()); - int flags = 32768; - flags |= (response.opCode().byteValue() & 0xFF) << 11; - if (response.isAuthoritativeAnswer()) { - flags |= 1 << 10; - } - if (response.isTruncated()) { - flags |= 1 << 9; - } - if (response.isRecursionDesired()) { - flags |= 1 << 8; - } - if (response.isRecursionAvailable()) { - flags |= 1 << 7; - } - flags |= response.z() << 4; - flags |= response.code().intValue(); - buf.writeShort(flags); - buf.writeShort(response.count(DnsSection.QUESTION)); - buf.writeShort(response.count(DnsSection.ANSWER)); - buf.writeShort(response.count(DnsSection.AUTHORITY)); - buf.writeShort(response.count(DnsSection.ADDITIONAL)); - } - - private void encodeQuestions(DnsResponse response, ByteBuf buf) throws Exception { - final int count = response.count(DnsSection.QUESTION); - for (int i = 0; i < count; i++) { - recordEncoder.encodeQuestion((DnsQuestion) response.recordAt(DnsSection.QUESTION, i), buf); - } - } - - private void encodeRecords(DnsResponse response, DnsSection section, ByteBuf buf) throws Exception { - final int count = response.count(section); - for (int i = 0; i < count; i++) { - recordEncoder.encodeRecord(response.recordAt(section, i), buf); - } - } } diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsOptEcsRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsOptEcsRecord.java index d29b3c34b6f..e10e3966189 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsOptEcsRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsOptEcsRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsPtrRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsPtrRecord.java index eeca18ec80f..99feea0e97c 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsPtrRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsPtrRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ package io.netty.handler.codec.dns; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; import io.netty.util.internal.StringUtil; import io.netty.util.internal.UnstableApi; @@ -44,7 +44,7 @@ public class DefaultDnsPtrRecord extends AbstractDnsRecord implements DnsPtrReco public DefaultDnsPtrRecord( String name, int dnsClass, long timeToLive, String hostname) { super(name, DnsRecordType.PTR, dnsClass, timeToLive); - this.hostname = checkNotNull(hostname, "hostname"); + this.hostname = requireNonNull(hostname, "hostname"); } @Override diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsQuery.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsQuery.java index b7b3732c6e6..6fcebcd1894 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsQuery.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsQuery.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsQuestion.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsQuestion.java index af5bbf69cf5..f32320e055a 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsQuestion.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsQuestion.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRawRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRawRecord.java index f70eeda7c1f..a56b319f1da 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRawRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRawRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,7 +19,7 @@ import io.netty.util.internal.StringUtil; import io.netty.util.internal.UnstableApi; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The default {@code DnsRawRecord} implementation. @@ -59,7 +59,7 @@ public DefaultDnsRawRecord(String name, DnsRecordType type, long timeToLive, Byt public DefaultDnsRawRecord( String name, DnsRecordType type, int dnsClass, long timeToLive, ByteBuf content) { super(name, type, dnsClass, timeToLive); - this.content = checkNotNull(content, "content"); + this.content = requireNonNull(content, "content"); } @Override diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRecordDecoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRecordDecoder.java index b4e50ff402e..33a1e02fee3 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRecordDecoder.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRecordDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,8 +16,6 @@ package io.netty.handler.codec.dns; import io.netty.buffer.ByteBuf; -import io.netty.handler.codec.CorruptedFrameException; -import io.netty.util.CharsetUtil; import io.netty.util.internal.UnstableApi; /** @@ -49,7 +47,7 @@ public final T decodeRecord(ByteBuf in) throws Exception { final String name = decodeName(in); final int endOffset = in.writerIndex(); - if (endOffset - startOffset < 10) { + if (endOffset - in.readerIndex() < 10) { // Not enough data in.readerIndex(startOffset); return null; @@ -98,6 +96,11 @@ protected DnsRecord decodeRecord( return new DefaultDnsPtrRecord( name, dnsClass, timeToLive, decodeName0(in.duplicate().setIndex(offset, offset + length))); } + if (type == DnsRecordType.CNAME || type == DnsRecordType.NS) { + return new DefaultDnsRawRecord(name, type, dnsClass, timeToLive, + DnsCodecUtil.decompressDomainName( + in.duplicate().setIndex(offset, offset + length))); + } return new DefaultDnsRawRecord( name, type, dnsClass, timeToLive, in.retainedDuplicate().setIndex(offset, offset + length)); } @@ -123,69 +126,6 @@ protected String decodeName0(ByteBuf in) { * @return the domain name for an entry */ public static String decodeName(ByteBuf in) { - int position = -1; - int checked = 0; - final int end = in.writerIndex(); - final int readable = in.readableBytes(); - - // Looking at the spec we should always have at least enough readable bytes to read a byte here but it seems - // some servers do not respect this for empty names. So just workaround this and return an empty name in this - // case. - // - // See: - // - https://github.com/netty/netty/issues/5014 - // - https://www.ietf.org/rfc/rfc1035.txt , Section 3.1 - if (readable == 0) { - return ROOT; - } - - final StringBuilder name = new StringBuilder(readable << 1); - while (in.isReadable()) { - final int len = in.readUnsignedByte(); - final boolean pointer = (len & 0xc0) == 0xc0; - if (pointer) { - if (position == -1) { - position = in.readerIndex() + 1; - } - - if (!in.isReadable()) { - throw new CorruptedFrameException("truncated pointer in a name"); - } - - final int next = (len & 0x3f) << 8 | in.readUnsignedByte(); - if (next >= end) { - throw new CorruptedFrameException("name has an out-of-range pointer"); - } - in.readerIndex(next); - - // check for loops - checked += 2; - if (checked >= end) { - throw new CorruptedFrameException("name contains a loop."); - } - } else if (len != 0) { - if (!in.isReadable(len)) { - throw new CorruptedFrameException("truncated label in a name"); - } - name.append(in.toString(in.readerIndex(), len, CharsetUtil.UTF_8)).append('.'); - in.skipBytes(len); - } else { // len == 0 - break; - } - } - - if (position != -1) { - in.readerIndex(position); - } - - if (name.length() == 0) { - return ROOT; - } - - if (name.charAt(name.length() - 1) != '.') { - name.append('.'); - } - - return name.toString(); + return DnsCodecUtil.decodeDomainName(in); } } diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRecordEncoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRecordEncoder.java index 48f60bcc951..a5134ec2c78 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRecordEncoder.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsRecordEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,14 +16,11 @@ package io.netty.handler.codec.dns; import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufUtil; import io.netty.channel.socket.InternetProtocolFamily; import io.netty.handler.codec.UnsupportedMessageTypeException; import io.netty.util.internal.StringUtil; import io.netty.util.internal.UnstableApi; -import static io.netty.handler.codec.dns.DefaultDnsRecordDecoder.ROOT; - /** * The default {@link DnsRecordEncoder} implementation. * @@ -93,7 +90,7 @@ private void encodeOptEcsRecord(DnsOptEcsRecord record, ByteBuf out) throws Exce sourcePrefixLength + " (expected: 0 >= " + addressBits + ')'); } - // See http://www.iana.org/assignments/address-family-numbers/address-family-numbers.xhtml + // See https://www.iana.org/assignments/address-family-numbers/address-family-numbers.xhtml final short addressNumber = (short) (bytes.length == 4 ? InternetProtocolFamily.IPv4.addressNumber() : InternetProtocolFamily.IPv6.addressNumber()); int payloadLength = calculateEcsAddressLength(sourcePrefixLength, lowOrderBitsToPreserve); @@ -141,25 +138,7 @@ private void encodeRawRecord(DnsRawRecord record, ByteBuf out) throws Exception } protected void encodeName(String name, ByteBuf buf) throws Exception { - if (ROOT.equals(name)) { - // Root domain - buf.writeByte(0); - return; - } - - final String[] labels = name.split("\\."); - for (String label : labels) { - final int labelLen = label.length(); - if (labelLen == 0) { - // zero-length label means the end of the name. - break; - } - - buf.writeByte(labelLen); - ByteBufUtil.writeAscii(buf, label); - } - - buf.writeByte(0); // marks end of name field + DnsCodecUtil.encodeDomainName(name, buf); } private static byte padWithZeros(byte b, int lowOrderBitsToPreserve) { diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsResponse.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsResponse.java index 987febeba97..98512b2e493 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsResponse.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DefaultDnsResponse.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,7 @@ import io.netty.util.internal.UnstableApi; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The default {@link DnsResponse} implementation. @@ -102,7 +102,7 @@ public DnsResponseCode code() { @Override public DnsResponse setCode(DnsResponseCode code) { - this.code = checkNotNull(code, "code"); + this.code = requireNonNull(code, "code"); return this; } diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsCodecUtil.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsCodecUtil.java new file mode 100644 index 00000000000..a702771df86 --- /dev/null +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsCodecUtil.java @@ -0,0 +1,131 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.handler.codec.dns; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.handler.codec.CorruptedFrameException; +import io.netty.util.CharsetUtil; + +import static io.netty.handler.codec.dns.DefaultDnsRecordDecoder.*; + +final class DnsCodecUtil { + private DnsCodecUtil() { + // Util class + } + + static void encodeDomainName(String name, ByteBuf buf) { + if (ROOT.equals(name)) { + // Root domain + buf.writeByte(0); + return; + } + + final String[] labels = name.split("\\."); + for (String label : labels) { + final int labelLen = label.length(); + if (labelLen == 0) { + // zero-length label means the end of the name. + break; + } + + buf.writeByte(labelLen); + ByteBufUtil.writeAscii(buf, label); + } + + buf.writeByte(0); // marks end of name field + } + + static String decodeDomainName(ByteBuf in) { + int position = -1; + int checked = 0; + final int end = in.writerIndex(); + final int readable = in.readableBytes(); + + // Looking at the spec we should always have at least enough readable bytes to read a byte here but it seems + // some servers do not respect this for empty names. So just workaround this and return an empty name in this + // case. + // + // See: + // - https://github.com/netty/netty/issues/5014 + // - https://www.ietf.org/rfc/rfc1035.txt , Section 3.1 + if (readable == 0) { + return ROOT; + } + + final StringBuilder name = new StringBuilder(readable << 1); + while (in.isReadable()) { + final int len = in.readUnsignedByte(); + final boolean pointer = (len & 0xc0) == 0xc0; + if (pointer) { + if (position == -1) { + position = in.readerIndex() + 1; + } + + if (!in.isReadable()) { + throw new CorruptedFrameException("truncated pointer in a name"); + } + + final int next = (len & 0x3f) << 8 | in.readUnsignedByte(); + if (next >= end) { + throw new CorruptedFrameException("name has an out-of-range pointer"); + } + in.readerIndex(next); + + // check for loops + checked += 2; + if (checked >= end) { + throw new CorruptedFrameException("name contains a loop."); + } + } else if (len != 0) { + if (!in.isReadable(len)) { + throw new CorruptedFrameException("truncated label in a name"); + } + name.append(in.toString(in.readerIndex(), len, CharsetUtil.UTF_8)).append('.'); + in.skipBytes(len); + } else { // len == 0 + break; + } + } + + if (position != -1) { + in.readerIndex(position); + } + + if (name.length() == 0) { + return ROOT; + } + + if (name.charAt(name.length() - 1) != '.') { + name.append('.'); + } + + return name.toString(); + } + + /** + * Decompress pointer data. + * @param compression compressed data + * @return decompressed data + */ + static ByteBuf decompressDomainName(ByteBuf compression) { + String domainName = decodeDomainName(compression); + ByteBuf result = compression.alloc().buffer(domainName.length() << 1); + encodeDomainName(domainName, result); + return result; + } +} diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsMessage.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsMessage.java index f6d1abbd400..46be48ae80a 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsMessage.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsMessage.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsMessageUtil.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsMessageUtil.java index 81d3d8099ca..1e9475c92ca 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsMessageUtil.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsMessageUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,9 @@ */ package io.netty.handler.codec.dns; +import io.netty.buffer.ByteBuf; import io.netty.channel.AddressedEnvelope; +import io.netty.handler.codec.CorruptedFrameException; import io.netty.util.internal.StringUtil; import java.net.SocketAddress; @@ -177,5 +179,124 @@ private static void appendRecords(StringBuilder buf, DnsMessage message, DnsSect } } - private DnsMessageUtil() { } + static DnsQuery decodeDnsQuery(DnsRecordDecoder decoder, ByteBuf buf, DnsQueryFactory supplier) throws Exception { + DnsQuery query = newQuery(buf, supplier); + boolean success = false; + try { + int questionCount = buf.readUnsignedShort(); + int answerCount = buf.readUnsignedShort(); + int authorityRecordCount = buf.readUnsignedShort(); + int additionalRecordCount = buf.readUnsignedShort(); + decodeQuestions(decoder, query, buf, questionCount); + decodeRecords(decoder, query, DnsSection.ANSWER, buf, answerCount); + decodeRecords(decoder, query, DnsSection.AUTHORITY, buf, authorityRecordCount); + decodeRecords(decoder, query, DnsSection.ADDITIONAL, buf, additionalRecordCount); + success = true; + return query; + } finally { + if (!success) { + query.release(); + } + } + } + + private static DnsQuery newQuery(ByteBuf buf, DnsQueryFactory supplier) { + int id = buf.readUnsignedShort(); + int flags = buf.readUnsignedShort(); + if (flags >> 15 == 1) { + throw new CorruptedFrameException("not a query"); + } + + DnsQuery query = supplier.newQuery(id, DnsOpCode.valueOf((byte) (flags >> 11 & 0xf))); + query.setRecursionDesired((flags >> 8 & 1) == 1); + query.setZ(flags >> 4 & 0x7); + return query; + } + + private static void decodeQuestions(DnsRecordDecoder decoder, + DnsQuery query, ByteBuf buf, int questionCount) throws Exception { + for (int i = questionCount; i > 0; --i) { + query.addRecord(DnsSection.QUESTION, decoder.decodeQuestion(buf)); + } + } + + private static void decodeRecords(DnsRecordDecoder decoder, + DnsQuery query, DnsSection section, ByteBuf buf, int count) throws Exception { + for (int i = count; i > 0; --i) { + DnsRecord r = decoder.decodeRecord(buf); + if (r == null) { + break; + } + query.addRecord(section, r); + } + } + + static void encodeDnsResponse(DnsRecordEncoder encoder, DnsResponse response, ByteBuf buf) throws Exception { + boolean success = false; + try { + encodeHeader(response, buf); + encodeQuestions(encoder, response, buf); + encodeRecords(encoder, response, DnsSection.ANSWER, buf); + encodeRecords(encoder, response, DnsSection.AUTHORITY, buf); + encodeRecords(encoder, response, DnsSection.ADDITIONAL, buf); + success = true; + } finally { + if (!success) { + buf.release(); + } + } + } + + /** + * Encodes the header that is always 12 bytes long. + * + * @param response the response header being encoded + * @param buf the buffer the encoded data should be written to + */ + private static void encodeHeader(DnsResponse response, ByteBuf buf) { + buf.writeShort(response.id()); + int flags = 32768; + flags |= (response.opCode().byteValue() & 0xFF) << 11; + if (response.isAuthoritativeAnswer()) { + flags |= 1 << 10; + } + if (response.isTruncated()) { + flags |= 1 << 9; + } + if (response.isRecursionDesired()) { + flags |= 1 << 8; + } + if (response.isRecursionAvailable()) { + flags |= 1 << 7; + } + flags |= response.z() << 4; + flags |= response.code().intValue(); + buf.writeShort(flags); + buf.writeShort(response.count(DnsSection.QUESTION)); + buf.writeShort(response.count(DnsSection.ANSWER)); + buf.writeShort(response.count(DnsSection.AUTHORITY)); + buf.writeShort(response.count(DnsSection.ADDITIONAL)); + } + + private static void encodeQuestions(DnsRecordEncoder encoder, DnsResponse response, ByteBuf buf) throws Exception { + int count = response.count(DnsSection.QUESTION); + for (int i = 0; i < count; ++i) { + encoder.encodeQuestion(response.recordAt(DnsSection.QUESTION, i), buf); + } + } + + private static void encodeRecords(DnsRecordEncoder encoder, + DnsResponse response, DnsSection section, ByteBuf buf) throws Exception { + int count = response.count(section); + for (int i = 0; i < count; ++i) { + encoder.encodeRecord(response.recordAt(section, i), buf); + } + } + + interface DnsQueryFactory { + DnsQuery newQuery(int id, DnsOpCode dnsOpCode); + } + + private DnsMessageUtil() { + } } diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOpCode.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOpCode.java index 6b615053259..b58896426d6 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOpCode.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOpCode.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,7 @@ import io.netty.util.internal.UnstableApi; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The DNS {@code OpCode} as defined in RFC2929. @@ -65,6 +65,8 @@ public static DnsOpCode valueOf(int b) { return NOTIFY; case 0x05: return UPDATE; + default: + break; } return new DnsOpCode(b); @@ -80,7 +82,7 @@ private DnsOpCode(int byteValue) { public DnsOpCode(int byteValue, String name) { this.byteValue = (byte) byteValue; - this.name = checkNotNull(name, "name"); + this.name = requireNonNull(name, "name"); } public byte byteValue() { diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOptEcsRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOptEcsRecord.java index 12b45bca57a..d602dd626fe 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOptEcsRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOptEcsRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOptPseudoRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOptPseudoRecord.java index 4f3ca798cd4..4269d806cb6 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOptPseudoRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsOptPseudoRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsPtrRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsPtrRecord.java index b99518af68b..3ba5622d12b 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsPtrRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsPtrRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQuery.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQuery.java index ca21fd77a8b..18b100dcea7 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQuery.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQuery.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQueryEncoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQueryEncoder.java new file mode 100644 index 00000000000..a2f2dab62b0 --- /dev/null +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQueryEncoder.java @@ -0,0 +1,75 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.dns; + +import io.netty.buffer.ByteBuf; + +import java.util.Objects; + +final class DnsQueryEncoder { + + private final DnsRecordEncoder recordEncoder; + + /** + * Creates a new encoder with the specified {@code recordEncoder}. + */ + DnsQueryEncoder(DnsRecordEncoder recordEncoder) { + this.recordEncoder = Objects.requireNonNull(recordEncoder, "recordEncoder"); + } + + /** + * Encodes the given {@link DnsQuery} into a {@link ByteBuf}. + */ + void encode(DnsQuery query, ByteBuf out) throws Exception { + encodeHeader(query, out); + encodeQuestions(query, out); + encodeRecords(query, DnsSection.ADDITIONAL, out); + } + + /** + * Encodes the header that is always 12 bytes long. + * + * @param query the query header being encoded + * @param buf the buffer the encoded data should be written to + */ + private static void encodeHeader(DnsQuery query, ByteBuf buf) { + buf.writeShort(query.id()); + int flags = 0; + flags |= (query.opCode().byteValue() & 0xFF) << 14; + if (query.isRecursionDesired()) { + flags |= 1 << 8; + } + buf.writeShort(flags); + buf.writeShort(query.count(DnsSection.QUESTION)); + buf.writeShort(0); // answerCount + buf.writeShort(0); // authorityResourceCount + buf.writeShort(query.count(DnsSection.ADDITIONAL)); + } + + private void encodeQuestions(DnsQuery query, ByteBuf buf) throws Exception { + final int count = query.count(DnsSection.QUESTION); + for (int i = 0; i < count; i++) { + recordEncoder.encodeQuestion((DnsQuestion) query.recordAt(DnsSection.QUESTION, i), buf); + } + } + + private void encodeRecords(DnsQuery query, DnsSection section, ByteBuf buf) throws Exception { + final int count = query.count(section); + for (int i = 0; i < count; i++) { + recordEncoder.encodeRecord(query.recordAt(section, i), buf); + } + } +} diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQuestion.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQuestion.java index fa614cd1037..54697184fbe 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQuestion.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsQuestion.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRawRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRawRecord.java index 8176c911092..07f39a75b26 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRawRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRawRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecord.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecord.java index 7ccadd93988..85630df8b03 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecord.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecord.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordDecoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordDecoder.java index 0870b466868..13c510e5022 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordDecoder.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordEncoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordEncoder.java index 503a4009e70..a5ce4186394 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordEncoder.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordType.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordType.java index 1c5f79b8d78..f4455beaf3a 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordType.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsRecordType.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -303,7 +303,7 @@ public class DnsRecordType implements Comparable { */ public static final DnsRecordType DLV = new DnsRecordType(0x8001, "DLV"); - private static final Map BY_NAME = new HashMap(); + private static final Map BY_NAME = new HashMap<>(); private static final IntObjectHashMap BY_TYPE = new IntObjectHashMap(); private static final String EXPECTED; diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponse.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponse.java index 345f66d68f3..537e216ed49 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponse.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponse.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponseCode.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponseCode.java index abc9b0d4eab..7c370f90bfc 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponseCode.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponseCode.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,7 @@ import io.netty.util.internal.UnstableApi; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The DNS {@code RCODE}, as defined in RFC2929. @@ -173,7 +173,7 @@ public DnsResponseCode(int code, String name) { } this.code = code; - this.name = checkNotNull(name, "name"); + this.name = requireNonNull(name, "name"); } /** diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponseDecoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponseDecoder.java new file mode 100644 index 00000000000..017f259a309 --- /dev/null +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsResponseDecoder.java @@ -0,0 +1,104 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.dns; + +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.CorruptedFrameException; + +import java.net.SocketAddress; +import java.util.Objects; + +abstract class DnsResponseDecoder { + + private final DnsRecordDecoder recordDecoder; + + /** + * Creates a new decoder with the specified {@code recordDecoder}. + */ + DnsResponseDecoder(DnsRecordDecoder recordDecoder) { + this.recordDecoder = Objects.requireNonNull(recordDecoder, "recordDecoder"); + } + + final DnsResponse decode(A sender, A recipient, ByteBuf buffer) throws Exception { + final int id = buffer.readUnsignedShort(); + + final int flags = buffer.readUnsignedShort(); + if (flags >> 15 == 0) { + throw new CorruptedFrameException("not a response"); + } + + final DnsResponse response = newResponse( + sender, + recipient, + id, + DnsOpCode.valueOf((byte) (flags >> 11 & 0xf)), DnsResponseCode.valueOf((byte) (flags & 0xf))); + + response.setRecursionDesired((flags >> 8 & 1) == 1); + response.setAuthoritativeAnswer((flags >> 10 & 1) == 1); + response.setTruncated((flags >> 9 & 1) == 1); + response.setRecursionAvailable((flags >> 7 & 1) == 1); + response.setZ(flags >> 4 & 0x7); + + boolean success = false; + try { + final int questionCount = buffer.readUnsignedShort(); + final int answerCount = buffer.readUnsignedShort(); + final int authorityRecordCount = buffer.readUnsignedShort(); + final int additionalRecordCount = buffer.readUnsignedShort(); + + decodeQuestions(response, buffer, questionCount); + if (!decodeRecords(response, DnsSection.ANSWER, buffer, answerCount)) { + success = true; + return response; + } + if (!decodeRecords(response, DnsSection.AUTHORITY, buffer, authorityRecordCount)) { + success = true; + return response; + } + + decodeRecords(response, DnsSection.ADDITIONAL, buffer, additionalRecordCount); + success = true; + return response; + } finally { + if (!success) { + response.release(); + } + } + } + + protected abstract DnsResponse newResponse(A sender, A recipient, int id, + DnsOpCode opCode, DnsResponseCode responseCode) throws Exception; + + private void decodeQuestions(DnsResponse response, ByteBuf buf, int questionCount) throws Exception { + for (int i = questionCount; i > 0; i --) { + response.addRecord(DnsSection.QUESTION, recordDecoder.decodeQuestion(buf)); + } + } + + private boolean decodeRecords( + DnsResponse response, DnsSection section, ByteBuf buf, int count) throws Exception { + for (int i = count; i > 0; i --) { + final DnsRecord r = recordDecoder.decodeRecord(buf); + if (r == null) { + // Truncated response + return false; + } + + response.addRecord(section, r); + } + return true; + } +} diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsSection.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsSection.java index dc597c2ef35..fcf9e766ca5 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsSection.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/DnsSection.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsQueryDecoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsQueryDecoder.java new file mode 100644 index 00000000000..7af77a8eb4d --- /dev/null +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsQueryDecoder.java @@ -0,0 +1,57 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.dns; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import io.netty.util.internal.ObjectUtil; +import io.netty.util.internal.UnstableApi; + +@UnstableApi +public final class TcpDnsQueryDecoder extends LengthFieldBasedFrameDecoder { + private final DnsRecordDecoder decoder; + + /** + * Creates a new decoder with {@linkplain DnsRecordDecoder#DEFAULT the default record decoder}. + */ + public TcpDnsQueryDecoder() { + this(DnsRecordDecoder.DEFAULT, 65535); + } + + /** + * Creates a new decoder with the specified {@code decoder}. + */ + public TcpDnsQueryDecoder(DnsRecordDecoder decoder, int maxFrameLength) { + super(maxFrameLength, 0, 2, 0, 2); + this.decoder = ObjectUtil.checkNotNull(decoder, "decoder"); + } + + @Override + protected Object decode0(ChannelHandlerContext ctx, ByteBuf in) throws Exception { + ByteBuf frame = (ByteBuf) super.decode0(ctx, in); + if (frame == null) { + return null; + } + + return DnsMessageUtil.decodeDnsQuery(decoder, frame.slice(), new DnsMessageUtil.DnsQueryFactory() { + @Override + public DnsQuery newQuery(int id, DnsOpCode dnsOpCode) { + return new DefaultDnsQuery(id, dnsOpCode); + } + }); + } +} diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsQueryEncoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsQueryEncoder.java new file mode 100644 index 00000000000..401d3b46b8b --- /dev/null +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsQueryEncoder.java @@ -0,0 +1,64 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.dns; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToByteEncoder; +import io.netty.util.internal.UnstableApi; + +@ChannelHandler.Sharable +@UnstableApi +public final class TcpDnsQueryEncoder extends MessageToByteEncoder { + + private final DnsQueryEncoder encoder; + + /** + * Creates a new encoder with {@linkplain DnsRecordEncoder#DEFAULT the default record encoder}. + */ + public TcpDnsQueryEncoder() { + this(DnsRecordEncoder.DEFAULT); + } + + /** + * Creates a new encoder with the specified {@code recordEncoder}. + */ + public TcpDnsQueryEncoder(DnsRecordEncoder recordEncoder) { + this.encoder = new DnsQueryEncoder(recordEncoder); + } + + @Override + protected void encode(ChannelHandlerContext ctx, DnsQuery msg, ByteBuf out) throws Exception { + // Length is two octets as defined by RFC-7766 + // See https://tools.ietf.org/html/rfc7766#section-8 + out.writerIndex(out.writerIndex() + 2); + encoder.encode(msg, out); + + // Now fill in the correct length based on the amount of data that we wrote the ByteBuf. + out.setShort(0, out.readableBytes() - 2); + } + + @Override + protected ByteBuf allocateBuffer(ChannelHandlerContext ctx, @SuppressWarnings("unused") DnsQuery msg, + boolean preferDirect) { + if (preferDirect) { + return ctx.alloc().ioBuffer(1024); + } else { + return ctx.alloc().heapBuffer(1024); + } + } +} diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsResponseDecoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsResponseDecoder.java new file mode 100644 index 00000000000..986d4827c16 --- /dev/null +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsResponseDecoder.java @@ -0,0 +1,72 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.dns; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.LengthFieldBasedFrameDecoder; +import io.netty.util.internal.UnstableApi; + +import java.net.SocketAddress; + +@UnstableApi +public final class TcpDnsResponseDecoder extends LengthFieldBasedFrameDecoder { + + private final DnsResponseDecoder responseDecoder; + + /** + * Creates a new decoder with {@linkplain DnsRecordDecoder#DEFAULT the default record decoder}. + */ + public TcpDnsResponseDecoder() { + this(DnsRecordDecoder.DEFAULT, 64 * 1024); + } + + /** + * Creates a new decoder with the specified {@code recordDecoder} and {@code maxFrameLength} + */ + public TcpDnsResponseDecoder(DnsRecordDecoder recordDecoder, int maxFrameLength) { + // Length is two octets as defined by RFC-7766 + // See https://tools.ietf.org/html/rfc7766#section-8 + super(maxFrameLength, 0, 2, 0, 2); + + this.responseDecoder = new DnsResponseDecoder(recordDecoder) { + @Override + protected DnsResponse newResponse(SocketAddress sender, SocketAddress recipient, + int id, DnsOpCode opCode, DnsResponseCode responseCode) { + return new DefaultDnsResponse(id, opCode, responseCode); + } + }; + } + + @Override + protected Object decode0(ChannelHandlerContext ctx, ByteBuf in) throws Exception { + ByteBuf frame = (ByteBuf) super.decode0(ctx, in); + if (frame == null) { + return null; + } + + try { + return responseDecoder.decode(ctx.channel().remoteAddress(), ctx.channel().localAddress(), frame.slice()); + } finally { + frame.release(); + } + } + + @Override + protected ByteBuf extractFrame(ChannelHandlerContext ctx, ByteBuf buffer, int index, int length) { + return buffer.copy(index, length); + } +} diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsResponseEncoder.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsResponseEncoder.java new file mode 100644 index 00000000000..2a9d5d01c84 --- /dev/null +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/TcpDnsResponseEncoder.java @@ -0,0 +1,56 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.dns; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToMessageEncoder; +import io.netty.util.internal.ObjectUtil; +import io.netty.util.internal.UnstableApi; + +import java.util.List; + +@UnstableApi +@ChannelHandler.Sharable +public final class TcpDnsResponseEncoder extends MessageToMessageEncoder { + private final DnsRecordEncoder encoder; + + /** + * Creates a new encoder with {@linkplain DnsRecordEncoder#DEFAULT the default record encoder}. + */ + public TcpDnsResponseEncoder() { + this(DnsRecordEncoder.DEFAULT); + } + + /** + * Creates a new encoder with the specified {@code encoder}. + */ + public TcpDnsResponseEncoder(DnsRecordEncoder encoder) { + this.encoder = ObjectUtil.checkNotNull(encoder, "encoder"); + } + + @Override + protected void encode(ChannelHandlerContext ctx, DnsResponse response, List out) throws Exception { + ByteBuf buf = ctx.alloc().ioBuffer(1024); + + buf.writerIndex(buf.writerIndex() + 2); + DnsMessageUtil.encodeDnsResponse(encoder, response, buf); + buf.setShort(0, buf.readableBytes() - 2); + + out.add(buf); + } +} diff --git a/codec-dns/src/main/java/io/netty/handler/codec/dns/package-info.java b/codec-dns/src/main/java/io/netty/handler/codec/dns/package-info.java index 0be6aa683aa..fd3d6bcd9ab 100644 --- a/codec-dns/src/main/java/io/netty/handler/codec/dns/package-info.java +++ b/codec-dns/src/main/java/io/netty/handler/codec/dns/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-dns/src/test/java/io/netty/handler/codec/dns/AbstractDnsRecordTest.java b/codec-dns/src/test/java/io/netty/handler/codec/dns/AbstractDnsRecordTest.java index d55a0b1354a..c5c6d8b5351 100644 --- a/codec-dns/src/test/java/io/netty/handler/codec/dns/AbstractDnsRecordTest.java +++ b/codec-dns/src/test/java/io/netty/handler/codec/dns/AbstractDnsRecordTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,8 +15,10 @@ */ package io.netty.handler.codec.dns; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; public class AbstractDnsRecordTest { @@ -24,39 +26,39 @@ public class AbstractDnsRecordTest { public void testValidDomainName() { String name = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; AbstractDnsRecord record = new AbstractDnsRecord(name, DnsRecordType.A, 0) { }; - Assert.assertEquals(name + '.', record.name()); + assertEquals(name + '.', record.name()); } @Test public void testValidDomainNameUmlaut() { String name = "ä"; AbstractDnsRecord record = new AbstractDnsRecord(name, DnsRecordType.A, 0) { }; - Assert.assertEquals("xn--4ca.", record.name()); + assertEquals("xn--4ca.", record.name()); } @Test public void testValidDomainNameTrailingDot() { String name = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa."; AbstractDnsRecord record = new AbstractDnsRecord(name, DnsRecordType.A, 0) { }; - Assert.assertEquals(name, record.name()); + assertEquals(name, record.name()); } @Test public void testValidDomainNameUmlautTrailingDot() { String name = "ä."; AbstractDnsRecord record = new AbstractDnsRecord(name, DnsRecordType.A, 0) { }; - Assert.assertEquals("xn--4ca.", record.name()); + assertEquals("xn--4ca.", record.name()); } - @Test(expected = IllegalArgumentException.class) + @Test public void testValidDomainNameLength() { String name = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; - new AbstractDnsRecord(name, DnsRecordType.A, 0) { }; + assertThrows(IllegalArgumentException.class, () -> new AbstractDnsRecord(name, DnsRecordType.A, 0) { }); } - @Test(expected = IllegalArgumentException.class) + @Test public void testValidDomainNameUmlautLength() { String name = "äaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"; - new AbstractDnsRecord(name, DnsRecordType.A, 0) { }; + assertThrows(IllegalArgumentException.class, () -> new AbstractDnsRecord(name, DnsRecordType.A, 0) { }); } } diff --git a/codec-dns/src/test/java/io/netty/handler/codec/dns/DefaultDnsRecordDecoderTest.java b/codec-dns/src/test/java/io/netty/handler/codec/dns/DefaultDnsRecordDecoderTest.java index 6de6ce5d724..a8379f6d8d7 100644 --- a/codec-dns/src/test/java/io/netty/handler/codec/dns/DefaultDnsRecordDecoderTest.java +++ b/codec-dns/src/test/java/io/netty/handler/codec/dns/DefaultDnsRecordDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,10 +16,12 @@ package io.netty.handler.codec.dns; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; public class DefaultDnsRecordDecoderTest { @@ -89,6 +91,81 @@ public void testDecodePtrRecord() throws Exception { } } + @Test + public void testdecompressCompressPointer() { + byte[] compressionPointer = { + 5, 'n', 'e', 't', 't', 'y', 2, 'i', 'o', 0, + (byte) 0xC0, 0 + }; + ByteBuf buffer = Unpooled.wrappedBuffer(compressionPointer); + ByteBuf uncompressed = null; + try { + uncompressed = DnsCodecUtil.decompressDomainName(buffer.duplicate().setIndex(10, 12)); + assertEquals(0, ByteBufUtil.compare(buffer.duplicate().setIndex(0, 10), uncompressed)); + } finally { + buffer.release(); + if (uncompressed != null) { + uncompressed.release(); + } + } + } + + @Test + public void testdecompressNestedCompressionPointer() { + byte[] nestedCompressionPointer = { + 6, 'g', 'i', 't', 'h', 'u', 'b', 2, 'i', 'o', 0, // github.io + 5, 'n', 'e', 't', 't', 'y', (byte) 0xC0, 0, // netty.github.io + (byte) 0xC0, 11, // netty.github.io + }; + ByteBuf buffer = Unpooled.wrappedBuffer(nestedCompressionPointer); + ByteBuf uncompressed = null; + try { + uncompressed = DnsCodecUtil.decompressDomainName(buffer.duplicate().setIndex(19, 21)); + assertEquals(0, ByteBufUtil.compare( + Unpooled.wrappedBuffer(new byte[] { + 5, 'n', 'e', 't', 't', 'y', 6, 'g', 'i', 't', 'h', 'u', 'b', 2, 'i', 'o', 0 + }), uncompressed)); + } finally { + buffer.release(); + if (uncompressed != null) { + uncompressed.release(); + } + } + } + + @Test + public void testDecodeCompressionRDataPointer() throws Exception { + DefaultDnsRecordDecoder decoder = new DefaultDnsRecordDecoder(); + byte[] compressionPointer = { + 5, 'n', 'e', 't', 't', 'y', 2, 'i', 'o', 0, + (byte) 0xC0, 0 + }; + ByteBuf buffer = Unpooled.wrappedBuffer(compressionPointer); + DefaultDnsRawRecord cnameRecord = null; + DefaultDnsRawRecord nsRecord = null; + try { + cnameRecord = (DefaultDnsRawRecord) decoder.decodeRecord( + "netty.github.io", DnsRecordType.CNAME, DnsRecord.CLASS_IN, 60, buffer, 10, 2); + assertEquals(0, ByteBufUtil.compare(buffer.duplicate().setIndex(0, 10), cnameRecord.content()), + "The rdata of CNAME-type record should be decompressed in advance"); + assertEquals("netty.io.", DnsCodecUtil.decodeDomainName(cnameRecord.content())); + nsRecord = (DefaultDnsRawRecord) decoder.decodeRecord( + "netty.github.io", DnsRecordType.NS, DnsRecord.CLASS_IN, 60, buffer, 10, 2); + assertEquals(0, ByteBufUtil.compare(buffer.duplicate().setIndex(0, 10), nsRecord.content()), + "The rdata of NS-type record should be decompressed in advance"); + assertEquals("netty.io.", DnsCodecUtil.decodeDomainName(nsRecord.content())); + } finally { + buffer.release(); + if (cnameRecord != null) { + cnameRecord.release(); + } + + if (nsRecord != null) { + nsRecord.release(); + } + } + } + @Test public void testDecodeMessageCompression() throws Exception { // See https://www.ietf.org/rfc/rfc1035 [4.1.4. Message compression] @@ -156,4 +233,24 @@ public void testDecodeMessageCompression() throws Exception { buffer.release(); } } + + @Test + public void testTruncatedPacket() throws Exception { + ByteBuf buffer = Unpooled.buffer(); + buffer.writeByte(0); + buffer.writeShort(DnsRecordType.A.intValue()); + buffer.writeShort(1); + buffer.writeInt(32); + + // Write a truncated last value. + buffer.writeByte(0); + DefaultDnsRecordDecoder decoder = new DefaultDnsRecordDecoder(); + try { + int readerIndex = buffer.readerIndex(); + assertNull(decoder.decodeRecord(buffer)); + assertEquals(readerIndex, buffer.readerIndex()); + } finally { + buffer.release(); + } + } } diff --git a/codec-dns/src/test/java/io/netty/handler/codec/dns/DefaultDnsRecordEncoderTest.java b/codec-dns/src/test/java/io/netty/handler/codec/dns/DefaultDnsRecordEncoderTest.java index 88e984dc969..7d3d3337579 100644 --- a/codec-dns/src/test/java/io/netty/handler/codec/dns/DefaultDnsRecordEncoderTest.java +++ b/codec-dns/src/test/java/io/netty/handler/codec/dns/DefaultDnsRecordEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,14 +18,14 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.socket.InternetProtocolFamily; -import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.SocketUtils; import io.netty.util.internal.StringUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.net.InetAddress; +import java.util.concurrent.ThreadLocalRandom; -import static org.junit.Assert.assertEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; public class DefaultDnsRecordEncoderTest { @@ -97,7 +97,7 @@ private static void testIp(InetAddress address, int prefix) throws Exception { // Pad the leftover of the last byte with zeros. int idx = addressPart.writerIndex() - 1; byte lastByte = addressPart.getByte(idx); - int paddingMask = ~((1 << (8 - lowOrderBitsToPreserve)) - 1); + int paddingMask = -1 << 8 - lowOrderBitsToPreserve; addressPart.setByte(idx, lastByte & paddingMask); } @@ -146,6 +146,6 @@ private static void testIp(InetAddress address, int prefix) throws Exception { } private static int nextInt(int max) { - return PlatformDependent.threadLocalRandom().nextInt(max); + return ThreadLocalRandom.current().nextInt(max); } } diff --git a/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsQueryTest.java b/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsQueryTest.java index d322f61a01e..272a62be3e7 100644 --- a/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsQueryTest.java +++ b/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsQueryTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,23 +19,28 @@ import io.netty.channel.socket.DatagramPacket; import io.netty.util.internal.SocketUtils; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.net.InetSocketAddress; import java.util.ArrayList; import java.util.List; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.is; -import static org.junit.Assert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class DnsQueryTest { @Test - public void writeQueryTest() throws Exception { + public void testEncodeAndDecodeQuery() { InetSocketAddress addr = SocketUtils.socketAddress("8.8.8.8", 53); - EmbeddedChannel embedder = new EmbeddedChannel(new DatagramDnsQueryEncoder()); - List queries = new ArrayList(5); + EmbeddedChannel writeChannel = new EmbeddedChannel(new DatagramDnsQueryEncoder()); + EmbeddedChannel readChannel = new EmbeddedChannel(new DatagramDnsQueryDecoder()); + + List queries = new ArrayList<>(5); queries.add(new DatagramDnsQuery(null, addr, 1).setRecord( DnsSection.QUESTION, new DefaultDnsQuestion("1.0.0.127.in-addr.arpa", DnsRecordType.PTR))); @@ -58,12 +63,17 @@ public void writeQueryTest() throws Exception { assertThat(query.count(DnsSection.AUTHORITY), is(0)); assertThat(query.count(DnsSection.ADDITIONAL), is(0)); - embedder.writeOutbound(query); + assertTrue(writeChannel.writeOutbound(query)); - DatagramPacket packet = embedder.readOutbound(); - Assert.assertTrue(packet.content().isReadable()); - packet.release(); - Assert.assertNull(embedder.readOutbound()); + DatagramPacket packet = writeChannel.readOutbound(); + assertTrue(packet.content().isReadable()); + assertTrue(readChannel.writeInbound(packet)); + assertEquals(query, readChannel.readInbound()); + assertNull(writeChannel.readOutbound()); + assertNull(readChannel.readInbound()); } + + assertFalse(writeChannel.finish()); + assertFalse(readChannel.finish()); } } diff --git a/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsRecordTypeTest.java b/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsRecordTypeTest.java index aeeab95b04e..2a74834ad77 100644 --- a/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsRecordTypeTest.java +++ b/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsRecordTypeTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ package io.netty.handler.codec.dns; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.lang.reflect.Field; import java.lang.reflect.Modifier; @@ -23,12 +23,15 @@ import java.util.HashSet; import java.util.List; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertSame; public class DnsRecordTypeTest { private static List allTypes() throws Exception { - List result = new ArrayList(); + List result = new ArrayList<>(); for (Field field : DnsRecordType.class.getFields()) { if ((field.getModifiers() & Modifier.STATIC) != 0 && field.getType() == DnsRecordType.class) { result.add((DnsRecordType) field.get(null)); @@ -40,8 +43,8 @@ private static List allTypes() throws Exception { @Test public void testSanity() throws Exception { - assertEquals("More than one type has the same int value", - allTypes().size(), new HashSet(allTypes()).size()); + assertEquals(allTypes().size(), new HashSet<>(allTypes()).size(), + "More than one type has the same int value"); } /** @@ -77,7 +80,7 @@ public void testFind() throws Exception { DnsRecordType found = DnsRecordType.valueOf(t.intValue()); assertSame(t, found); found = DnsRecordType.valueOf(t.name()); - assertSame(t.name(), t, found); + assertSame(t, found, t.name()); } } } diff --git a/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsResponseTest.java b/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsResponseTest.java index 9bf1e27aaa0..79f405f2cea 100644 --- a/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsResponseTest.java +++ b/codec-dns/src/test/java/io/netty/handler/codec/dns/DnsResponseTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,14 +21,14 @@ import io.netty.channel.embedded.EmbeddedChannel; import io.netty.channel.socket.DatagramPacket; import io.netty.handler.codec.CorruptedFrameException; -import org.junit.Rule; -import org.junit.Test; -import org.junit.rules.ExpectedException; +import org.junit.jupiter.api.Test; import java.net.InetSocketAddress; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; public class DnsResponseTest { @@ -90,16 +90,30 @@ public void readResponseTest() throws Exception { envelope.release(); } + assertFalse(embedder.finish()); } - @Rule - public ExpectedException exception = ExpectedException.none(); - @Test public void readMalformedResponseTest() throws Exception { EmbeddedChannel embedder = new EmbeddedChannel(new DatagramDnsResponseDecoder()); ByteBuf packet = embedder.alloc().buffer(512).writeBytes(malformedLoopPacket); - exception.expect(CorruptedFrameException.class); - embedder.writeInbound(new DatagramPacket(packet, null, new InetSocketAddress(0))); + try { + assertThrows(CorruptedFrameException.class, + () -> embedder.writeInbound(new DatagramPacket(packet, null, new InetSocketAddress(0)))); + } finally { + assertFalse(embedder.finish()); + } + } + + @Test + public void readIncompleteResponseTest() { + EmbeddedChannel embedder = new EmbeddedChannel(new DatagramDnsResponseDecoder()); + ByteBuf packet = embedder.alloc().buffer(512); + try { + assertThrows(CorruptedFrameException.class, + () -> embedder.writeInbound(new DatagramPacket(packet, null, new InetSocketAddress(0)))); + } finally { + assertFalse(embedder.finish()); + } } } diff --git a/codec-dns/src/test/java/io/netty/handler/codec/dns/TcpDnsTest.java b/codec-dns/src/test/java/io/netty/handler/codec/dns/TcpDnsTest.java new file mode 100644 index 00000000000..e4229ce1d1c --- /dev/null +++ b/codec-dns/src/test/java/io/netty/handler/codec/dns/TcpDnsTest.java @@ -0,0 +1,86 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.dns; + +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.util.ReferenceCountUtil; +import org.hamcrest.Matchers; +import org.junit.jupiter.api.Test; + +import java.util.Random; + +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.is; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class TcpDnsTest { + private static final String QUERY_DOMAIN = "www.example.com"; + private static final long TTL = 600; + private static final byte[] QUERY_RESULT = new byte[]{(byte) 192, (byte) 168, 1, 1}; + + @Test + public void testQueryDecode() { + EmbeddedChannel channel = new EmbeddedChannel(new TcpDnsQueryDecoder()); + + int randomID = new Random().nextInt(60000 - 1000) + 1000; + DnsQuery query = new DefaultDnsQuery(randomID, DnsOpCode.QUERY) + .setRecord(DnsSection.QUESTION, new DefaultDnsQuestion(QUERY_DOMAIN, DnsRecordType.A)); + assertTrue(channel.writeInbound(query)); + + DnsQuery readQuery = channel.readInbound(); + assertThat(readQuery, is(query)); + assertThat(readQuery.recordAt(DnsSection.QUESTION).name(), is(query.recordAt(DnsSection.QUESTION).name())); + assertFalse(channel.finish()); + } + + @Test + public void testResponseEncode() { + EmbeddedChannel channel = new EmbeddedChannel(new TcpDnsResponseEncoder()); + + int randomID = new Random().nextInt(60000 - 1000) + 1000; + DnsQuery query = new DefaultDnsQuery(randomID, DnsOpCode.QUERY) + .setRecord(DnsSection.QUESTION, new DefaultDnsQuestion(QUERY_DOMAIN, DnsRecordType.A)); + + DnsQuestion question = query.recordAt(DnsSection.QUESTION); + channel.writeInbound(newResponse(query, question, QUERY_RESULT)); + + DnsResponse readResponse = channel.readInbound(); + assertThat(readResponse.recordAt(DnsSection.QUESTION), is((DnsRecord) question)); + DnsRawRecord record = new DefaultDnsRawRecord(question.name(), + DnsRecordType.A, TTL, Unpooled.wrappedBuffer(QUERY_RESULT)); + assertThat(readResponse.recordAt(DnsSection.ANSWER), is((DnsRecord) record)); + assertThat(readResponse.recordAt(DnsSection.ANSWER).content(), is(record.content())); + ReferenceCountUtil.release(readResponse); + ReferenceCountUtil.release(record); + query.release(); + assertFalse(channel.finish()); + } + + private static DefaultDnsResponse newResponse(DnsQuery query, DnsQuestion question, byte[]... addresses) { + DefaultDnsResponse response = new DefaultDnsResponse(query.id()); + response.addRecord(DnsSection.QUESTION, question); + + for (byte[] address : addresses) { + DefaultDnsRawRecord queryAnswer = new DefaultDnsRawRecord(question.name(), + DnsRecordType.A, TTL, Unpooled.wrappedBuffer(address)); + response.addRecord(DnsSection.ANSWER, queryAnswer); + } + return response; + } +} diff --git a/codec-haproxy/pom.xml b/codec-haproxy/pom.xml index e84370144da..d641927639f 100644 --- a/codec-haproxy/pom.xml +++ b/codec-haproxy/pom.xml @@ -6,7 +6,7 @@ ~ version 2.0 (the "License"); you may not use this file except in compliance ~ with the License. You may obtain a copy of the License at: ~ - ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ https://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -14,13 +14,13 @@ ~ License for the specific language governing permissions and limitations ~ under the License. --> - + 4.0.0 io.netty netty-parent - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT netty-codec-haproxy @@ -33,6 +33,16 @@ + + ${project.groupId} + netty-buffer + ${project.version} + + + ${project.groupId} + netty-transport + ${project.version} + ${project.groupId} netty-codec diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyCommand.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyCommand.java index 4fb9d334d64..4fc7566e958 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyCommand.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyCommand.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java index c2a4e22c723..436adec57e9 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyConstants.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -56,5 +56,31 @@ final class HAProxyConstants { static final byte TPAF_UNIX_STREAM_BYTE = 0x31; static final byte TPAF_UNIX_DGRAM_BYTE = 0x32; + /** + * V2 protocol binary header prefix + */ + static final byte[] BINARY_PREFIX = { + (byte) 0x0D, + (byte) 0x0A, + (byte) 0x0D, + (byte) 0x0A, + (byte) 0x00, + (byte) 0x0D, + (byte) 0x0A, + (byte) 0x51, + (byte) 0x55, + (byte) 0x49, + (byte) 0x54, + (byte) 0x0A + }; + + static final byte[] TEXT_PREFIX = { + (byte) 'P', + (byte) 'R', + (byte) 'O', + (byte) 'X', + (byte) 'Y', + }; + private HAProxyConstants() { } } diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java index b40bf42df26..eefdbe607a7 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessage.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,11 +15,18 @@ */ package io.netty.handler.codec.haproxy; +import static java.util.Objects.requireNonNull; + import io.netty.buffer.ByteBuf; import io.netty.handler.codec.haproxy.HAProxyProxiedProtocol.AddressFamily; +import io.netty.util.AbstractReferenceCounted; import io.netty.util.ByteProcessor; import io.netty.util.CharsetUtil; import io.netty.util.NetUtil; +import io.netty.util.ResourceLeakDetector; +import io.netty.util.ResourceLeakDetectorFactory; +import io.netty.util.ResourceLeakTracker; +import io.netty.util.internal.StringUtil; import java.util.ArrayList; import java.util.Collections; @@ -28,29 +35,11 @@ /** * Message container for decoded HAProxy proxy protocol parameters */ -public final class HAProxyMessage { - - /** - * Version 1 proxy protocol message for 'UNKNOWN' proxied protocols. Per spec, when the proxied protocol is - * 'UNKNOWN' we must discard all other header values. - */ - private static final HAProxyMessage V1_UNKNOWN_MSG = new HAProxyMessage( - HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNKNOWN, null, null, 0, 0); - - /** - * Version 2 proxy protocol message for 'UNKNOWN' proxied protocols. Per spec, when the proxied protocol is - * 'UNKNOWN' we must discard all other header values. - */ - private static final HAProxyMessage V2_UNKNOWN_MSG = new HAProxyMessage( - HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNKNOWN, null, null, 0, 0); - - /** - * Version 2 proxy protocol message for local requests. Per spec, we should use an unspecified protocol and family - * for 'LOCAL' commands. Per spec, when the proxied protocol is 'UNKNOWN' we must discard all other header values. - */ - private static final HAProxyMessage V2_LOCAL_MSG = new HAProxyMessage( - HAProxyProtocolVersion.V2, HAProxyCommand.LOCAL, HAProxyProxiedProtocol.UNKNOWN, null, null, 0, 0); +public final class HAProxyMessage extends AbstractReferenceCounted { + private static final ResourceLeakDetector leakDetector = + ResourceLeakDetectorFactory.instance().newResourceLeakDetector(HAProxyMessage.class); + private final ResourceLeakTracker leak; private final HAProxyProtocolVersion protocolVersion; private final HAProxyCommand command; private final HAProxyProxiedProtocol proxiedProtocol; @@ -72,33 +61,47 @@ private HAProxyMessage( } /** - * Creates a new instance + * Creates a new instance of HAProxyMessage. + * @param protocolVersion the protocol version. + * @param command the command. + * @param proxiedProtocol the protocol containing the address family and transport protocol. + * @param sourceAddress the source address. + * @param destinationAddress the destination address. + * @param sourcePort the source port. This value must be 0 for unix, unspec addresses. + * @param destinationPort the destination port. This value must be 0 for unix, unspec addresses. */ - private HAProxyMessage( + public HAProxyMessage( HAProxyProtocolVersion protocolVersion, HAProxyCommand command, HAProxyProxiedProtocol proxiedProtocol, String sourceAddress, String destinationAddress, int sourcePort, int destinationPort) { this(protocolVersion, command, proxiedProtocol, - sourceAddress, destinationAddress, sourcePort, destinationPort, Collections.emptyList()); + sourceAddress, destinationAddress, sourcePort, destinationPort, Collections.emptyList()); } /** - * Creates a new instance + * Creates a new instance of HAProxyMessage. + * @param protocolVersion the protocol version. + * @param command the command. + * @param proxiedProtocol the protocol containing the address family and transport protocol. + * @param sourceAddress the source address. + * @param destinationAddress the destination address. + * @param sourcePort the source port. This value must be 0 for unix, unspec addresses. + * @param destinationPort the destination port. This value must be 0 for unix, unspec addresses. + * @param tlvs the list of tlvs. */ - private HAProxyMessage( + public HAProxyMessage( HAProxyProtocolVersion protocolVersion, HAProxyCommand command, HAProxyProxiedProtocol proxiedProtocol, String sourceAddress, String destinationAddress, int sourcePort, int destinationPort, - List tlvs) { - - if (proxiedProtocol == null) { - throw new NullPointerException("proxiedProtocol"); - } + List tlvs) { + requireNonNull(protocolVersion, "protocolVersion"); + requireNonNull(proxiedProtocol, "proxiedProtocol"); + requireNonNull(tlvs, "tlvs"); AddressFamily addrFamily = proxiedProtocol.addressFamily(); checkAddress(sourceAddress, addrFamily); checkAddress(destinationAddress, addrFamily); - checkPort(sourcePort); - checkPort(destinationPort); + checkPort(sourcePort, addrFamily); + checkPort(destinationPort, addrFamily); this.protocolVersion = protocolVersion; this.command = command; @@ -108,6 +111,8 @@ private HAProxyMessage( this.sourcePort = sourcePort; this.destinationPort = destinationPort; this.tlvs = Collections.unmodifiableList(tlvs); + + leak = leakDetector.track(this); } /** @@ -118,9 +123,7 @@ private HAProxyMessage( * @throws HAProxyProtocolException if any portion of the header is invalid */ static HAProxyMessage decodeHeader(ByteBuf header) { - if (header == null) { - throw new NullPointerException("header"); - } + requireNonNull(header, "header"); if (header.readableBytes() < 16) { throw new HAProxyProtocolException( @@ -150,7 +153,7 @@ static HAProxyMessage decodeHeader(ByteBuf header) { } if (cmd == HAProxyCommand.LOCAL) { - return V2_LOCAL_MSG; + return unknownMsg(HAProxyProtocolVersion.V2, HAProxyCommand.LOCAL); } // Per spec, the 14th byte is the protocol and address family byte @@ -162,7 +165,7 @@ static HAProxyMessage decodeHeader(ByteBuf header) { } if (protAndFam == HAProxyProxiedProtocol.UNKNOWN) { - return V2_UNKNOWN_MSG; + return unknownMsg(HAProxyProtocolVersion.V2, HAProxyCommand.PROXY); } int addressInfoLen = header.readUnsignedShort(); @@ -243,7 +246,7 @@ private static List readTlvs(final ByteBuf header) { return Collections.emptyList(); } // In most cases there are less than 4 TLVs available - List haProxyTLVs = new ArrayList(4); + List haProxyTLVs = new ArrayList<>(4); do { haProxyTLVs.add(haProxyTLV); @@ -274,7 +277,7 @@ private static HAProxyTLV readNextTLV(final ByteBuf header) { if (byteBuf.readableBytes() >= 4) { - final List encapsulatedTlvs = new ArrayList(4); + final List encapsulatedTlvs = new ArrayList<>(4); do { final HAProxyTLV haProxyTLV = readNextTLV(byteBuf); if (haProxyTLV == null) { @@ -285,8 +288,8 @@ private static HAProxyTLV readNextTLV(final ByteBuf header) { return new HAProxySSLTLV(verify, client, encapsulatedTlvs, rawContent); } - return new HAProxySSLTLV(verify, client, Collections.emptyList(), rawContent); - // If we're not dealing with a SSL Type, we can use the same mechanism + return new HAProxySSLTLV(verify, client, Collections.emptyList(), rawContent); + // If we're not dealing with an SSL Type, we can use the same mechanism case PP2_TYPE_ALPN: case PP2_TYPE_AUTHORITY: case PP2_TYPE_SSL_VERSION: @@ -337,16 +340,28 @@ static HAProxyMessage decodeHeader(String header) { } if (protAndFam == HAProxyProxiedProtocol.UNKNOWN) { - return V1_UNKNOWN_MSG; + return unknownMsg(HAProxyProtocolVersion.V1, HAProxyCommand.PROXY); } if (numParts != 6) { throw new HAProxyProtocolException("invalid TCP4/6 header: " + header + " (expected: 6 parts)"); } - return new HAProxyMessage( - HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, - protAndFam, parts[2], parts[3], parts[4], parts[5]); + try { + return new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, + protAndFam, parts[2], parts[3], parts[4], parts[5]); + } catch (RuntimeException e) { + throw new HAProxyProtocolException("invalid HAProxy message", e); + } + } + + /** + * Proxy protocol message for 'UNKNOWN' proxied protocols. Per spec, when the proxied protocol is + * 'UNKNOWN' we must discard all other header values. + */ + private static HAProxyMessage unknownMsg(HAProxyProtocolVersion version, HAProxyCommand command) { + return new HAProxyMessage(version, command, HAProxyProxiedProtocol.UNKNOWN, null, null, 0, 0); } /** @@ -358,31 +373,20 @@ static HAProxyMessage decodeHeader(String header) { */ private static String ipBytesToString(ByteBuf header, int addressLen) { StringBuilder sb = new StringBuilder(); - if (addressLen == 4) { - sb.append(header.readByte() & 0xff); - sb.append('.'); - sb.append(header.readByte() & 0xff); - sb.append('.'); - sb.append(header.readByte() & 0xff); - sb.append('.'); - sb.append(header.readByte() & 0xff); + final int ipv4Len = 4; + final int ipv6Len = 8; + if (addressLen == ipv4Len) { + for (int i = 0; i < ipv4Len; i++) { + sb.append(header.readByte() & 0xff); + sb.append('.'); + } } else { - sb.append(Integer.toHexString(header.readUnsignedShort())); - sb.append(':'); - sb.append(Integer.toHexString(header.readUnsignedShort())); - sb.append(':'); - sb.append(Integer.toHexString(header.readUnsignedShort())); - sb.append(':'); - sb.append(Integer.toHexString(header.readUnsignedShort())); - sb.append(':'); - sb.append(Integer.toHexString(header.readUnsignedShort())); - sb.append(':'); - sb.append(Integer.toHexString(header.readUnsignedShort())); - sb.append(':'); - sb.append(Integer.toHexString(header.readUnsignedShort())); - sb.append(':'); - sb.append(Integer.toHexString(header.readUnsignedShort())); + for (int i = 0; i < ipv6Len; i++) { + sb.append(Integer.toHexString(header.readUnsignedShort())); + sb.append(':'); + } } + sb.setLength(sb.length() - 1); return sb.toString(); } @@ -391,18 +395,18 @@ private static String ipBytesToString(ByteBuf header, int addressLen) { * * @param value the port * @return port as an integer - * @throws HAProxyProtocolException if port is not a valid integer + * @throws IllegalArgumentException if port is not a valid integer */ private static int portStringToInt(String value) { int port; try { port = Integer.parseInt(value); } catch (NumberFormatException e) { - throw new HAProxyProtocolException("invalid port: " + value, e); + throw new IllegalArgumentException("invalid port: " + value, e); } if (port <= 0 || port > 65535) { - throw new HAProxyProtocolException("invalid port: " + value + " (expected: 1 ~ 65535)"); + throw new IllegalArgumentException("invalid port: " + value + " (expected: 1 ~ 65535)"); } return port; @@ -413,52 +417,65 @@ private static int portStringToInt(String value) { * * @param address human-readable address * @param addrFamily the {@link AddressFamily} to check the address against - * @throws HAProxyProtocolException if the address is invalid + * @throws IllegalArgumentException if the address is invalid */ private static void checkAddress(String address, AddressFamily addrFamily) { - if (addrFamily == null) { - throw new NullPointerException("addrFamily"); - } + requireNonNull(addrFamily, "addrFamily"); switch (addrFamily) { case AF_UNSPEC: if (address != null) { - throw new HAProxyProtocolException("unable to validate an AF_UNSPEC address: " + address); + throw new IllegalArgumentException("unable to validate an AF_UNSPEC address: " + address); } return; case AF_UNIX: + requireNonNull(address, "address"); + if (address.getBytes(CharsetUtil.US_ASCII).length > 108) { + throw new IllegalArgumentException("invalid AF_UNIX address: " + address); + } return; } - if (address == null) { - throw new NullPointerException("address"); - } + requireNonNull(address, "address"); switch (addrFamily) { case AF_IPv4: if (!NetUtil.isValidIpV4Address(address)) { - throw new HAProxyProtocolException("invalid IPv4 address: " + address); + throw new IllegalArgumentException("invalid IPv4 address: " + address); } break; case AF_IPv6: if (!NetUtil.isValidIpV6Address(address)) { - throw new HAProxyProtocolException("invalid IPv6 address: " + address); + throw new IllegalArgumentException("invalid IPv6 address: " + address); } break; default: - throw new Error(); + throw new IllegalArgumentException("unexpected addrFamily: " + addrFamily); } } /** - * Validate a UDP/TCP port + * Validate the port depending on the addrFamily. * * @param port the UDP/TCP port - * @throws HAProxyProtocolException if the port is out of range (0-65535 inclusive) + * @throws IllegalArgumentException if the port is out of range (0-65535 inclusive) */ - private static void checkPort(int port) { - if (port < 0 || port > 65535) { - throw new HAProxyProtocolException("invalid port: " + port + " (expected: 1 ~ 65535)"); + private static void checkPort(int port, AddressFamily addrFamily) { + switch (addrFamily) { + case AF_IPv6: + case AF_IPv4: + if (port < 0 || port > 65535) { + throw new IllegalArgumentException("invalid port: " + port + " (expected: 0 ~ 65535)"); + } + break; + case AF_UNIX: + case AF_UNSPEC: + if (port != 0) { + throw new IllegalArgumentException("port cannot be specified with addrFamily: " + addrFamily); + } + break; + default: + throw new IllegalArgumentException("unexpected addrFamily: " + addrFamily); } } @@ -484,7 +501,8 @@ public HAProxyProxiedProtocol proxiedProtocol() { } /** - * Returns the human-readable source address of this {@link HAProxyMessage}. + * Returns the human-readable source address of this {@link HAProxyMessage} or {@code null} + * if HAProxy performs health check with {@code send-proxy-v2}. */ public String sourceAddress() { return sourceAddress; @@ -519,4 +537,93 @@ public int destinationPort() { public List tlvs() { return tlvs; } + + int tlvNumBytes() { + int tlvNumBytes = 0; + for (int i = 0; i < tlvs.size(); i++) { + tlvNumBytes += tlvs.get(i).totalNumBytes(); + } + return tlvNumBytes; + } + + @Override + public HAProxyMessage touch() { + tryRecord(); + return (HAProxyMessage) super.touch(); + } + + @Override + public HAProxyMessage touch(Object hint) { + if (leak != null) { + leak.record(hint); + } + return this; + } + + @Override + public HAProxyMessage retain() { + tryRecord(); + return (HAProxyMessage) super.retain(); + } + + @Override + public HAProxyMessage retain(int increment) { + tryRecord(); + return (HAProxyMessage) super.retain(increment); + } + + @Override + public boolean release() { + tryRecord(); + return super.release(); + } + + @Override + public boolean release(int decrement) { + tryRecord(); + return super.release(decrement); + } + + private void tryRecord() { + if (leak != null) { + leak.record(); + } + } + + @Override + protected void deallocate() { + try { + for (HAProxyTLV tlv : tlvs) { + tlv.release(); + } + } finally { + final ResourceLeakTracker leak = this.leak; + if (leak != null) { + boolean closed = leak.close(this); + assert closed; + } + } + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(256) + .append(StringUtil.simpleClassName(this)) + .append("(protocolVersion: ").append(protocolVersion) + .append(", command: ").append(command) + .append(", proxiedProtocol: ").append(proxiedProtocol) + .append(", sourceAddress: ").append(sourceAddress) + .append(", destinationAddress: ").append(destinationAddress) + .append(", sourcePort: ").append(sourcePort) + .append(", destinationPort: ").append(destinationPort) + .append(", tlvs: ["); + if (!tlvs.isEmpty()) { + for (HAProxyTLV tlv: tlvs) { + sb.append(tlv).append(", "); + } + sb.setLength(sb.length() - 2); + } + sb.append("])"); + return sb.toString(); + } } diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java index df2a663e89c..0b491432695 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,16 +18,16 @@ import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; -import io.netty.handler.codec.LineBasedFrameDecoder; import io.netty.handler.codec.ProtocolDetectionResult; import io.netty.util.CharsetUtil; -import java.util.List; + +import static io.netty.handler.codec.haproxy.HAProxyConstants.*; /** * Decodes an HAProxy proxy protocol header * - * @see Proxy Protocol Specification + * @see Proxy Protocol Specification */ public class HAProxyMessageDecoder extends ByteToMessageDecoder { /** @@ -50,37 +50,6 @@ public class HAProxyMessageDecoder extends ByteToMessageDecoder { */ private static final int V2_MAX_TLV = 65535 - 216; - /** - * Version 1 header delimiter is always '\r\n' per spec - */ - private static final int DELIMITER_LENGTH = 2; - - /** - * Binary header prefix - */ - private static final byte[] BINARY_PREFIX = { - (byte) 0x0D, - (byte) 0x0A, - (byte) 0x0D, - (byte) 0x0A, - (byte) 0x00, - (byte) 0x0D, - (byte) 0x0A, - (byte) 0x51, - (byte) 0x55, - (byte) 0x49, - (byte) 0x54, - (byte) 0x0A - }; - - private static final byte[] TEXT_PREFIX = { - (byte) 'P', - (byte) 'R', - (byte) 'O', - (byte) 'X', - (byte) 'Y', - }; - /** * Binary header prefix length */ @@ -98,6 +67,11 @@ public class HAProxyMessageDecoder extends ByteToMessageDecoder { private static final ProtocolDetectionResult DETECTION_RESULT_V2 = ProtocolDetectionResult.detected(HAProxyProtocolVersion.V2); + /** + * Used to extract a header frame out of the {@link ByteBuf} and return it. + */ + private HeaderExtractor headerExtractor; + /** * {@code true} if we're discarding input because we're already over maxLength */ @@ -108,6 +82,11 @@ public class HAProxyMessageDecoder extends ByteToMessageDecoder { */ private int discardedBytes; + /** + * Whether or not to throw an exception as soon as we exceed maxLength. + */ + private final boolean failFast; + /** * {@code true} if we're finished decoding the proxy protocol header */ @@ -125,14 +104,27 @@ public class HAProxyMessageDecoder extends ByteToMessageDecoder { private final int v2MaxHeaderSize; /** - * Creates a new decoder with no additional data (TLV) restrictions + * Creates a new decoder with no additional data (TLV) restrictions, and should throw an exception as soon as + * we exceed maxLength. */ public HAProxyMessageDecoder() { + this(true); + } + + /** + * Creates a new decoder with no additional data (TLV) restrictions, whether or not to throw an exception as soon + * as we exceed maxLength. + * + * @param failFast Whether or not to throw an exception as soon as we exceed maxLength + */ + public HAProxyMessageDecoder(boolean failFast) { v2MaxHeaderSize = V2_MAX_LENGTH; + this.failFast = failFast; } /** - * Creates a new decoder with restricted additional data (TLV) size + * Creates a new decoder with restricted additional data (TLV) size, and should throw an exception as soon as + * we exceed maxLength. *

    * Note: limiting TLV size only affects processing of v2, binary headers. Also, as allowed by the 1.5 spec * TLV data is currently ignored. For maximum performance it would be best to configure your upstream proxy host to @@ -142,18 +134,30 @@ public HAProxyMessageDecoder() { * @param maxTlvSize maximum number of bytes allowed for additional data (Type-Length-Value vectors) in a v2 header */ public HAProxyMessageDecoder(int maxTlvSize) { + this(maxTlvSize, true); + } + + /** + * Creates a new decoder with restricted additional data (TLV) size, whether or not to throw an exception as soon + * as we exceed maxLength. + * + * @param maxTlvSize maximum number of bytes allowed for additional data (Type-Length-Value vectors) in a v2 header + * @param failFast Whether or not to throw an exception as soon as we exceed maxLength + */ + public HAProxyMessageDecoder(int maxTlvSize, boolean failFast) { if (maxTlvSize < 1) { v2MaxHeaderSize = V2_MIN_LENGTH; } else if (maxTlvSize > V2_MAX_TLV) { v2MaxHeaderSize = V2_MAX_LENGTH; } else { int calcMax = maxTlvSize + V2_MIN_LENGTH; - if (calcMax > V2_MAX_LENGTH) { + if (calcMax > V2_MAX_LENGTH) { // lgtm[java/constant-comparison] v2MaxHeaderSize = V2_MAX_LENGTH; } else { v2MaxHeaderSize = calcMax; } } + this.failFast = failFast; } /** @@ -227,7 +231,15 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception } @Override - protected final void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + ctx.fireExceptionCaught(cause); + if (cause instanceof HAProxyProtocolException) { + ctx.close(); // drop connection immediately per spec + } + } + + @Override + protected final void decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { // determine the specification version if (version == -1) { if ((version = findVersion(in)) == -1) { @@ -247,9 +259,9 @@ protected final void decode(ChannelHandlerContext ctx, ByteBuf in, List finished = true; try { if (version == 1) { - out.add(HAProxyMessage.decodeHeader(decoded.toString(CharsetUtil.US_ASCII))); + ctx.fireChannelRead(HAProxyMessage.decodeHeader(decoded.toString(CharsetUtil.US_ASCII))); } else { - out.add(HAProxyMessage.decodeHeader(decoded)); + ctx.fireChannelRead(HAProxyMessage.decodeHeader(decoded)); } } catch (HAProxyProtocolException e) { fail(ctx, null, e); @@ -259,7 +271,6 @@ protected final void decode(ChannelHandlerContext ctx, ByteBuf in, List /** * Create a frame out of the {@link ByteBuf} and return it. - * Based on code from {@link LineBasedFrameDecoder#decode(ChannelHandlerContext, ByteBuf)}. * * @param ctx the {@link ChannelHandlerContext} which this {@link HAProxyMessageDecoder} belongs to * @param buffer the {@link ByteBuf} from which to read data @@ -267,42 +278,14 @@ protected final void decode(ChannelHandlerContext ctx, ByteBuf in, List * be created */ private ByteBuf decodeStruct(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception { - final int eoh = findEndOfHeader(buffer); - if (!discarding) { - if (eoh >= 0) { - final int length = eoh - buffer.readerIndex(); - if (length > v2MaxHeaderSize) { - buffer.readerIndex(eoh); - failOverLimit(ctx, length); - return null; - } - return buffer.readSlice(length); - } else { - final int length = buffer.readableBytes(); - if (length > v2MaxHeaderSize) { - discardedBytes = length; - buffer.skipBytes(length); - discarding = true; - failOverLimit(ctx, "over " + discardedBytes); - } - return null; - } - } else { - if (eoh >= 0) { - buffer.readerIndex(eoh); - discardedBytes = 0; - discarding = false; - } else { - discardedBytes = buffer.readableBytes(); - buffer.skipBytes(discardedBytes); - } - return null; + if (headerExtractor == null) { + headerExtractor = new StructHeaderExtractor(v2MaxHeaderSize); } + return headerExtractor.extract(ctx, buffer); } /** * Create a frame out of the {@link ByteBuf} and return it. - * Based on code from {@link LineBasedFrameDecoder#decode(ChannelHandlerContext, ByteBuf)}. * * @param ctx the {@link ChannelHandlerContext} which this {@link HAProxyMessageDecoder} belongs to * @param buffer the {@link ByteBuf} from which to read data @@ -310,40 +293,10 @@ private ByteBuf decodeStruct(ChannelHandlerContext ctx, ByteBuf buffer) throws E * be created */ private ByteBuf decodeLine(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception { - final int eol = findEndOfLine(buffer); - if (!discarding) { - if (eol >= 0) { - final int length = eol - buffer.readerIndex(); - if (length > V1_MAX_LENGTH) { - buffer.readerIndex(eol + DELIMITER_LENGTH); - failOverLimit(ctx, length); - return null; - } - ByteBuf frame = buffer.readSlice(length); - buffer.skipBytes(DELIMITER_LENGTH); - return frame; - } else { - final int length = buffer.readableBytes(); - if (length > V1_MAX_LENGTH) { - discardedBytes = length; - buffer.skipBytes(length); - discarding = true; - failOverLimit(ctx, "over " + discardedBytes); - } - return null; - } - } else { - if (eol >= 0) { - final int delimLength = buffer.getByte(eol) == '\r' ? 2 : 1; - buffer.readerIndex(eol + delimLength); - discardedBytes = 0; - discarding = false; - } else { - discardedBytes = buffer.readableBytes(); - buffer.skipBytes(discardedBytes); - } - return null; + if (headerExtractor == null) { + headerExtractor = new LineHeaderExtractor(V1_MAX_LENGTH); } + return headerExtractor.extract(ctx, buffer); } private void failOverLimit(final ChannelHandlerContext ctx, int length) { @@ -357,7 +310,6 @@ private void failOverLimit(final ChannelHandlerContext ctx, String length) { private void fail(final ChannelHandlerContext ctx, String errMsg, Exception e) { finished = true; - ctx.close(); // drop connection immediately per spec HAProxyProtocolException ppex; if (errMsg != null && e != null) { ppex = new HAProxyProtocolException(errMsg, e); @@ -399,4 +351,119 @@ private static boolean match(byte[] prefix, ByteBuf buffer, int idx) { } return true; } + + /** + * HeaderExtractor create a header frame out of the {@link ByteBuf}. + */ + private abstract class HeaderExtractor { + /** Header max size */ + private final int maxHeaderSize; + + protected HeaderExtractor(int maxHeaderSize) { + this.maxHeaderSize = maxHeaderSize; + } + + /** + * Create a frame out of the {@link ByteBuf} and return it. + * + * @param ctx the {@link ChannelHandlerContext} which this {@link HAProxyMessageDecoder} belongs to + * @param buffer the {@link ByteBuf} from which to read data + * @return frame the {@link ByteBuf} which represent the frame or {@code null} if no frame could + * be created + * @throws Exception if exceed maxLength + */ + public ByteBuf extract(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception { + final int eoh = findEndOfHeader(buffer); + if (!discarding) { + if (eoh >= 0) { + final int length = eoh - buffer.readerIndex(); + if (length > maxHeaderSize) { + buffer.readerIndex(eoh + delimiterLength(buffer, eoh)); + failOverLimit(ctx, length); + return null; + } + ByteBuf frame = buffer.readSlice(length); + buffer.skipBytes(delimiterLength(buffer, eoh)); + return frame; + } else { + final int length = buffer.readableBytes(); + if (length > maxHeaderSize) { + discardedBytes = length; + buffer.skipBytes(length); + discarding = true; + if (failFast) { + failOverLimit(ctx, "over " + discardedBytes); + } + } + return null; + } + } else { + if (eoh >= 0) { + final int length = discardedBytes + eoh - buffer.readerIndex(); + buffer.readerIndex(eoh + delimiterLength(buffer, eoh)); + discardedBytes = 0; + discarding = false; + if (!failFast) { + failOverLimit(ctx, "over " + length); + } + } else { + discardedBytes += buffer.readableBytes(); + buffer.skipBytes(buffer.readableBytes()); + } + return null; + } + } + + /** + * Find the end of the header from the given {@link ByteBuf}īŧŒthe end may be a CRLF, or the length given by the + * header. + * + * @param buffer the buffer to be searched + * @return {@code -1} if can not find the end, otherwise return the buffer index of end + */ + protected abstract int findEndOfHeader(ByteBuf buffer); + + /** + * Get the length of the header delimiter. + * + * @param buffer the buffer where delimiter is located + * @param eoh index of delimiter + * @return length of the delimiter + */ + protected abstract int delimiterLength(ByteBuf buffer, int eoh); + } + + private final class LineHeaderExtractor extends HeaderExtractor { + + LineHeaderExtractor(int maxHeaderSize) { + super(maxHeaderSize); + } + + @Override + protected int findEndOfHeader(ByteBuf buffer) { + return findEndOfLine(buffer); + } + + @Override + protected int delimiterLength(ByteBuf buffer, int eoh) { + return buffer.getByte(eoh) == '\r' ? 2 : 1; + } + } + + private final class StructHeaderExtractor extends HeaderExtractor { + + StructHeaderExtractor(int maxHeaderSize) { + super(maxHeaderSize); + } + + @Override + protected int findEndOfHeader(ByteBuf buffer) { + return HAProxyMessageDecoder.findEndOfHeader(buffer); + } + + @Override + protected int delimiterLength(ByteBuf buffer, int eoh) { + return 0; + } + } } diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageEncoder.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageEncoder.java new file mode 100644 index 00000000000..a745bf6e966 --- /dev/null +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyMessageEncoder.java @@ -0,0 +1,134 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.haproxy; + +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandler.Sharable; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.MessageToByteEncoder; +import io.netty.util.CharsetUtil; +import io.netty.util.NetUtil; + +import java.util.List; + +import static io.netty.handler.codec.haproxy.HAProxyConstants.*; + +/** + * Encodes an HAProxy proxy protocol message + * + * @see Proxy Protocol Specification + */ +@Sharable +public final class HAProxyMessageEncoder extends MessageToByteEncoder { + + private static final int V2_VERSION_BITMASK = 0x02 << 4; + + // Length for source/destination addresses for the UNIX family must be 108 bytes each. + static final int UNIX_ADDRESS_BYTES_LENGTH = 108; + static final int TOTAL_UNIX_ADDRESS_BYTES_LENGTH = UNIX_ADDRESS_BYTES_LENGTH * 2; + + public static final HAProxyMessageEncoder INSTANCE = new HAProxyMessageEncoder(); + + private HAProxyMessageEncoder() { + } + + @Override + protected void encode(ChannelHandlerContext ctx, HAProxyMessage msg, ByteBuf out) throws Exception { + switch (msg.protocolVersion()) { + case V1: + encodeV1(msg, out); + break; + case V2: + encodeV2(msg, out); + break; + default: + throw new HAProxyProtocolException("Unsupported version: " + msg.protocolVersion()); + } + } + + private static void encodeV1(HAProxyMessage msg, ByteBuf out) { + out.writeBytes(TEXT_PREFIX); + out.writeByte((byte) ' '); + out.writeCharSequence(msg.proxiedProtocol().name(), CharsetUtil.US_ASCII); + out.writeByte((byte) ' '); + out.writeCharSequence(msg.sourceAddress(), CharsetUtil.US_ASCII); + out.writeByte((byte) ' '); + out.writeCharSequence(msg.destinationAddress(), CharsetUtil.US_ASCII); + out.writeByte((byte) ' '); + out.writeCharSequence(String.valueOf(msg.sourcePort()), CharsetUtil.US_ASCII); + out.writeByte((byte) ' '); + out.writeCharSequence(String.valueOf(msg.destinationPort()), CharsetUtil.US_ASCII); + out.writeByte((byte) '\r'); + out.writeByte((byte) '\n'); + } + + private static void encodeV2(HAProxyMessage msg, ByteBuf out) { + out.writeBytes(BINARY_PREFIX); + out.writeByte(V2_VERSION_BITMASK | msg.command().byteValue()); + out.writeByte(msg.proxiedProtocol().byteValue()); + + switch (msg.proxiedProtocol().addressFamily()) { + case AF_IPv4: + case AF_IPv6: + byte[] srcAddrBytes = NetUtil.createByteArrayFromIpAddressString(msg.sourceAddress()); + byte[] dstAddrBytes = NetUtil.createByteArrayFromIpAddressString(msg.destinationAddress()); + // srcAddrLen + dstAddrLen + 4 (srcPort + dstPort) + numTlvBytes + out.writeShort(srcAddrBytes.length + dstAddrBytes.length + 4 + msg.tlvNumBytes()); + out.writeBytes(srcAddrBytes); + out.writeBytes(dstAddrBytes); + out.writeShort(msg.sourcePort()); + out.writeShort(msg.destinationPort()); + encodeTlvs(msg.tlvs(), out); + break; + case AF_UNIX: + out.writeShort(TOTAL_UNIX_ADDRESS_BYTES_LENGTH + msg.tlvNumBytes()); + int srcAddrBytesWritten = out.writeCharSequence(msg.sourceAddress(), CharsetUtil.US_ASCII); + out.writeZero(UNIX_ADDRESS_BYTES_LENGTH - srcAddrBytesWritten); + int dstAddrBytesWritten = out.writeCharSequence(msg.destinationAddress(), CharsetUtil.US_ASCII); + out.writeZero(UNIX_ADDRESS_BYTES_LENGTH - dstAddrBytesWritten); + encodeTlvs(msg.tlvs(), out); + break; + case AF_UNSPEC: + out.writeShort(0); + break; + default: + throw new HAProxyProtocolException("unexpected addrFamily"); + } + } + + private static void encodeTlv(HAProxyTLV haProxyTLV, ByteBuf out) { + if (haProxyTLV instanceof HAProxySSLTLV) { + HAProxySSLTLV ssltlv = (HAProxySSLTLV) haProxyTLV; + out.writeByte(haProxyTLV.typeByteValue()); + out.writeShort(ssltlv.contentNumBytes()); + out.writeByte(ssltlv.client()); + out.writeInt(ssltlv.verify()); + encodeTlvs(ssltlv.encapsulatedTLVs(), out); + } else { + out.writeByte(haProxyTLV.typeByteValue()); + ByteBuf value = haProxyTLV.content(); + int readableBytes = value.readableBytes(); + out.writeShort(readableBytes); + out.writeBytes(value.readSlice(readableBytes)); + } + } + + private static void encodeTlvs(List haProxyTLVs, ByteBuf out) { + for (int i = 0; i < haProxyTLVs.size(); i++) { + encodeTlv(haProxyTLVs.get(i), out); + } + } +} diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProtocolException.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProtocolException.java index 20748370b17..cedae736006 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProtocolException.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProtocolException.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProtocolVersion.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProtocolVersion.java index 11fcb60fe74..3fa23137f42 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProtocolVersion.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProtocolVersion.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProxiedProtocol.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProxiedProtocol.java index 6e5483e40ea..41e5eb66788 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProxiedProtocol.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyProxiedProtocol.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java index 5d3dc103676..e880769d9d4 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxySSLTLV.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,6 +17,8 @@ package io.netty.handler.codec.haproxy; import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.util.internal.StringUtil; import java.util.Collections; import java.util.List; @@ -35,7 +37,19 @@ public final class HAProxySSLTLV extends HAProxyTLV { * Creates a new HAProxySSLTLV * * @param verify the verification result as defined in the specification for the pp2_tlv_ssl struct (see - * http://www.haproxy.org/download/1.5/doc/proxy-protocol.txt) + * https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) + * @param clientBitField the bitfield with client information + * @param tlvs the encapsulated {@link HAProxyTLV}s + */ + public HAProxySSLTLV(final int verify, final byte clientBitField, final List tlvs) { + this(verify, clientBitField, tlvs, Unpooled.EMPTY_BUFFER); + } + + /** + * Creates a new HAProxySSLTLV + * + * @param verify the verification result as defined in the specification for the pp2_tlv_ssl struct (see + * https://www.haproxy.org/download/1.8/doc/proxy-protocol.txt) * @param clientBitField the bitfield with client information * @param tlvs the encapsulated {@link HAProxyTLV}s * @param rawContent the raw TLV content @@ -69,6 +83,13 @@ public boolean isPP2ClientCertSess() { return (clientBitField & 0x4) != 0; } + /** + * Returns the client bit field + */ + public byte client() { + return clientBitField; + } + /** * Returns the verification result */ @@ -83,4 +104,22 @@ public List encapsulatedTLVs() { return tlvs; } + @Override + int contentNumBytes() { + int tlvNumBytes = 0; + for (int i = 0; i < tlvs.size(); i++) { + tlvNumBytes += tlvs.get(i).totalNumBytes(); + } + return 5 + tlvNumBytes; // clientBit(1) + verify(4) + tlvs + } + + @Override + public String toString() { + return StringUtil.simpleClassName(this) + + "(type: " + type() + + ", typeByteValue: " + typeByteValue() + + ", client: " + client() + + ", verify: " + verify() + + ", numEncapsulatedTlvs: " + tlvs.size() + ')'; + } } diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java index 380c6aa0586..31bf1fabca1 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/HAProxyTLV.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,8 +18,9 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.DefaultByteBufHolder; +import io.netty.util.internal.StringUtil; -import static io.netty.util.internal.ObjectUtil.*; +import static java.util.Objects.requireNonNull; /** * A Type-Length Value (TLV vector) that can be added to the PROXY protocol @@ -32,6 +33,18 @@ public class HAProxyTLV extends DefaultByteBufHolder { private final Type type; private final byte typeByteValue; + /** + * The size of this tlv in bytes. + * @return the number of bytes. + */ + int totalNumBytes() { + return 3 + contentNumBytes(); // type(1) + length(2) + content + } + + int contentNumBytes() { + return content().readableBytes(); + } + /** * The registered types a TLV can have regarding the PROXY protocol 1.5 spec */ @@ -56,7 +69,7 @@ public enum Type { * * @return the {@link Type} of a TLV */ - public static Type typeForByteValue(final byte byteValue) { + public static Type typeForByteValue(byte byteValue) { switch (byteValue) { case 0x01: return PP2_TYPE_ALPN; @@ -74,6 +87,52 @@ public static Type typeForByteValue(final byte byteValue) { return OTHER; } } + + /** + * Returns the byte value for the {@link Type} as defined in the PROXY protocol 1.5 spec. + * + * @param type the {@link Type} + * + * @return the byte value of the {@link Type}. + */ + public static byte byteValueForType(Type type) { + switch (type) { + case PP2_TYPE_ALPN: + return 0x01; + case PP2_TYPE_AUTHORITY: + return 0x02; + case PP2_TYPE_SSL: + return 0x20; + case PP2_TYPE_SSL_VERSION: + return 0x21; + case PP2_TYPE_SSL_CN: + return 0x22; + case PP2_TYPE_NETNS: + return 0x30; + default: + throw new IllegalArgumentException("unknown type: " + type); + } + } + } + + /** + * Creates a new HAProxyTLV + * + * @param typeByteValue the byteValue of the TLV. This is especially important if non-standard TLVs are used + * @param content the raw content of the TLV + */ + public HAProxyTLV(byte typeByteValue, ByteBuf content) { + this(Type.typeForByteValue(typeByteValue), typeByteValue, content); + } + + /** + * Creates a new HAProxyTLV + * + * @param type the {@link Type} of the TLV + * @param content the raw content of the TLV + */ + public HAProxyTLV(Type type, ByteBuf content) { + this(type, Type.byteValueForType(type), content); } /** @@ -85,7 +144,7 @@ public static Type typeForByteValue(final byte byteValue) { */ HAProxyTLV(final Type type, final byte typeByteValue, final ByteBuf content) { super(content); - checkNotNull(type, "type"); + requireNonNull(type, "type"); this.type = type; this.typeByteValue = typeByteValue; @@ -148,4 +207,12 @@ public HAProxyTLV touch(Object hint) { super.touch(hint); return this; } + + @Override + public String toString() { + return StringUtil.simpleClassName(this) + + "(type: " + type() + + ", typeByteValue: " + typeByteValue() + + ", content: " + contentToString() + ')'; + } } diff --git a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/package-info.java b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/package-info.java index 6fdd68eed59..64f2c578e89 100644 --- a/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/package-info.java +++ b/codec-haproxy/src/main/java/io/netty/handler/codec/haproxy/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,6 +17,6 @@ /** * Decodes an HAProxy proxy protocol header * - * @see Proxy Protocol Specification + * @see Proxy Protocol Specification */ package io.netty.handler.codec.haproxy; diff --git a/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyIntegrationTest.java b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyIntegrationTest.java new file mode 100644 index 00000000000..92437f27f39 --- /dev/null +++ b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyIntegrationTest.java @@ -0,0 +1,97 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.handler.codec.haproxy; + +import io.netty.bootstrap.Bootstrap; +import io.netty.bootstrap.ServerBootstrap; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInitializer; +import io.netty.channel.EventLoopGroup; +import io.netty.channel.MultithreadEventLoopGroup; +import io.netty.channel.SimpleChannelInboundHandler; +import io.netty.channel.local.LocalAddress; +import io.netty.channel.local.LocalChannel; +import io.netty.channel.local.LocalHandler; +import io.netty.channel.local.LocalServerChannel; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.CountDownLatch; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicReference; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class HAProxyIntegrationTest { + + @Test + public void testBasicCase() throws Exception { + final CountDownLatch latch = new CountDownLatch(1); + final AtomicReference msgHolder = new AtomicReference<>(); + LocalAddress localAddress = new LocalAddress("HAProxyIntegrationTest"); + + EventLoopGroup group = new MultithreadEventLoopGroup(LocalHandler.newFactory()); + ServerBootstrap sb = new ServerBootstrap(); + sb.channel(LocalServerChannel.class) + .group(group) + .childHandler(new ChannelInitializer() { + @Override + protected void initChannel(Channel ch) { + ch.pipeline().addLast(new HAProxyMessageDecoder()); + ch.pipeline().addLast(new SimpleChannelInboundHandler() { + @Override + protected void messageReceived(ChannelHandlerContext ctx, HAProxyMessage msg) { + msgHolder.set(msg.retain()); + latch.countDown(); + } + }); + } + }); + Channel serverChannel = sb.bind(localAddress).get(); + + Bootstrap b = new Bootstrap(); + Channel clientChannel = b.channel(LocalChannel.class) + .handler(HAProxyMessageEncoder.INSTANCE) + .group(group) + .connect(localAddress).get(); + + try { + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443); + clientChannel.writeAndFlush(message).sync(); + + assertTrue(latch.await(5, TimeUnit.SECONDS)); + HAProxyMessage readMessage = msgHolder.get(); + + assertEquals(message.protocolVersion(), readMessage.protocolVersion()); + assertEquals(message.command(), readMessage.command()); + assertEquals(message.proxiedProtocol(), readMessage.proxiedProtocol()); + assertEquals(message.sourceAddress(), readMessage.sourceAddress()); + assertEquals(message.destinationAddress(), readMessage.destinationAddress()); + assertEquals(message.sourcePort(), readMessage.sourcePort()); + assertEquals(message.destinationPort(), readMessage.destinationPort()); + + readMessage.release(); + } finally { + clientChannel.close().sync(); + serverChannel.close().sync(); + group.shutdownGracefully().sync(); + } + } +} diff --git a/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoderTest.java b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoderTest.java index 2d4039de3d0..da7aa8c3aff 100644 --- a/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoderTest.java +++ b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxyMessageDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,26 +16,32 @@ package io.netty.handler.codec.haproxy; import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelFuture; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.ProtocolDetectionResult; import io.netty.handler.codec.ProtocolDetectionState; import io.netty.handler.codec.haproxy.HAProxyProxiedProtocol.AddressFamily; import io.netty.handler.codec.haproxy.HAProxyProxiedProtocol.TransportProtocol; import io.netty.util.CharsetUtil; -import org.junit.Before; -import org.junit.Test; +import io.netty.util.concurrent.Future; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.util.List; -import static io.netty.buffer.Unpooled.*; -import static org.junit.Assert.*; +import static io.netty.buffer.Unpooled.buffer; +import static io.netty.buffer.Unpooled.copiedBuffer; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class HAProxyMessageDecoderTest { - private EmbeddedChannel ch; - @Before + @BeforeEach public void setUp() { ch = new EmbeddedChannel(new HAProxyMessageDecoder()); } @@ -58,6 +64,7 @@ public void testIPV4Decode() { assertEquals(443, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } @Test @@ -78,6 +85,7 @@ public void testIPV6Decode() { assertEquals(443, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } @Test @@ -98,67 +106,113 @@ public void testUnknownProtocolDecode() { assertEquals(0, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testV1NoUDP() { String header = "PROXY UDP4 192.168.0.1 192.168.0.11 56324 443\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testInvalidPort() { String header = "PROXY TCP4 192.168.0.1 192.168.0.11 80000 443\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testInvalidIPV4Address() { String header = "PROXY TCP4 299.168.0.1 192.168.0.11 56324 443\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testInvalidIPV6Address() { String header = "PROXY TCP6 r001:0db8:85a3:0000:0000:8a2e:0370:7334 1050:0:0:0:5:600:300c:326b 56324 443\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testInvalidProtocol() { String header = "PROXY TCP7 192.168.0.1 192.168.0.11 56324 443\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testMissingParams() { String header = "PROXY TCP4 192.168.0.1 192.168.0.11 56324\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testTooManyParams() { String header = "PROXY TCP4 192.168.0.1 192.168.0.11 56324 443 123\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testInvalidCommand() { String header = "PING TCP4 192.168.0.1 192.168.0.11 56324 443\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testInvalidEOL() { String header = "PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\nGET / HTTP/1.1\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testHeaderTooLong() { String header = "PROXY TCP4 192.168.0.1 192.168.0.11 56324 " + "00000000000000000000000000000000000000000000000000000000000000000443\r\n"; - ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); + assertThrows(HAProxyProtocolException.class, + () -> ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII))); + } + + @Test + public void testFailSlowHeaderTooLong() { + EmbeddedChannel slowFailCh = new EmbeddedChannel(new HAProxyMessageDecoder(false)); + try { + String headerPart1 = "PROXY TCP4 192.168.0.1 192.168.0.11 56324 " + + "000000000000000000000000000000000000000000000000000000000000000000000443"; + // Should not throw exception + assertFalse(slowFailCh.writeInbound(copiedBuffer(headerPart1, CharsetUtil.US_ASCII))); + String headerPart2 = "more header data"; + // Should not throw exception + assertFalse(slowFailCh.writeInbound(copiedBuffer(headerPart2, CharsetUtil.US_ASCII))); + String headerPart3 = "end of header\r\n"; + + int discarded = headerPart1.length() + headerPart2.length() + headerPart3.length() - 2; + assertThrows(HAProxyProtocolException.class, + () -> slowFailCh.writeInbound(copiedBuffer(headerPart3, CharsetUtil.US_ASCII)), "over " + discarded); + } finally { + assertFalse(slowFailCh.finishAndReleaseAll()); + } + } + + @Test + public void testFailFastHeaderTooLong() { + EmbeddedChannel fastFailCh = new EmbeddedChannel(new HAProxyMessageDecoder(true)); + try { + String headerPart1 = "PROXY TCP4 192.168.0.1 192.168.0.11 56324 " + + "000000000000000000000000000000000000000000000000000000000000000000000443"; + assertThrows(HAProxyProtocolException.class, + () -> fastFailCh.writeInbound(copiedBuffer(headerPart1, CharsetUtil.US_ASCII)), + "over " + headerPart1.length()); + } finally { + assertFalse(fastFailCh.finishAndReleaseAll()); + } } @Test @@ -171,7 +225,7 @@ public void testIncompleteHeader() { @Test public void testCloseOnInvalid() { - ChannelFuture closeFuture = ch.closeFuture(); + Future closeFuture = ch.closeFuture(); String header = "GET / HTTP/1.1\r\n"; try { ch.writeInbound(copiedBuffer(header, CharsetUtil.US_ASCII)); @@ -179,7 +233,7 @@ public void testCloseOnInvalid() { // swallow this exception since we're just testing to be sure the channel was closed } boolean isComplete = closeFuture.awaitUninterruptibly(5000); - if (!isComplete || !closeFuture.isDone() || !closeFuture.isSuccess()) { + if (!isComplete || !closeFuture.isDone() || closeFuture.isFailed()) { fail("Expected channel close"); } } @@ -264,6 +318,7 @@ public void testV2IPV4Decode() { assertEquals(443, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } @Test @@ -319,6 +374,7 @@ public void testV2UDPDecode() { assertEquals(443, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } @Test @@ -398,6 +454,7 @@ public void testv2IPV6Decode() { assertEquals(443, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } @Test @@ -476,6 +533,7 @@ public void testv2UnixDecode() { assertEquals(0, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } @Test @@ -531,6 +589,7 @@ public void testV2LocalProtocolDecode() { assertEquals(0, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } @Test @@ -586,6 +645,7 @@ public void testV2UnknownProtocolDecode() { assertEquals(0, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } @Test @@ -642,9 +702,7 @@ public void testV2WithSslTLVs() throws Exception { assertTrue(0 < firstTlv.refCnt()); assertTrue(0 < secondTlv.refCnt()); assertTrue(0 < thirdTLV.refCnt()); - assertFalse(thirdTLV.release()); - assertFalse(secondTlv.release()); - assertTrue(firstTlv.release()); + assertTrue(msg.release()); assertEquals(0, firstTlv.refCnt()); assertEquals(0, secondTlv.refCnt()); assertEquals(0, thirdTLV.refCnt()); @@ -653,6 +711,51 @@ public void testV2WithSslTLVs() throws Exception { assertFalse(ch.finish()); } + @Test + public void testReleaseHAProxyMessage() { + ch = new EmbeddedChannel(new HAProxyMessageDecoder()); + + final byte[] bytes = { + 13, 10, 13, 10, 0, 13, 10, 81, 85, 73, 84, 10, 33, 17, 0, 35, 127, 0, 0, 1, 127, 0, 0, 1, + -55, -90, 7, 89, 32, 0, 20, 5, 0, 0, 0, 0, 33, 0, 5, 84, 76, 83, 118, 49, 34, 0, 4, 76, 69, 65, 70 + }; + + int startChannels = ch.pipeline().names().size(); + assertTrue(ch.writeInbound(copiedBuffer(bytes))); + Object msgObj = ch.readInbound(); + assertEquals(startChannels - 1, ch.pipeline().names().size()); + HAProxyMessage msg = (HAProxyMessage) msgObj; + + final List tlvs = msg.tlvs(); + assertEquals(3, tlvs.size()); + + assertEquals(1, msg.refCnt()); + for (HAProxyTLV tlv : tlvs) { + assertEquals(3, tlv.refCnt()); + } + + // Retain the haproxy message + msg.retain(); + assertEquals(2, msg.refCnt()); + for (HAProxyTLV tlv : tlvs) { + assertEquals(3, tlv.refCnt()); + } + + // Decrease the haproxy message refCnt + msg.release(); + assertEquals(1, msg.refCnt()); + for (HAProxyTLV tlv : tlvs) { + assertEquals(3, tlv.refCnt()); + } + + // Release haproxy message, TLVs will be released with it + msg.release(); + assertEquals(0, msg.refCnt()); + for (HAProxyTLV tlv : tlvs) { + assertEquals(0, tlv.refCnt()); + } + } + @Test public void testV2WithTLV() { ch = new EmbeddedChannel(new HAProxyMessageDecoder(4)); @@ -738,9 +841,10 @@ public void testV2WithTLV() { assertEquals(0, msg.destinationPort()); assertNull(ch.readInbound()); assertFalse(ch.finish()); + assertTrue(msg.release()); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testV2InvalidProtocol() { byte[] header = new byte[28]; header[0] = 0x0D; // Binary Prefix @@ -778,10 +882,10 @@ public void testV2InvalidProtocol() { header[26] = 0x01; // Destination Port header[27] = (byte) 0xbb; // ----- - ch.writeInbound(copiedBuffer(header)); + assertThrows(HAProxyProtocolException.class, () -> ch.writeInbound(copiedBuffer(header))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testV2MissingParams() { byte[] header = new byte[26]; header[0] = 0x0D; // Binary Prefix @@ -816,10 +920,10 @@ public void testV2MissingParams() { header[24] = (byte) 0xdc; // Source Port header[25] = 0x04; // ----- - ch.writeInbound(copiedBuffer(header)); + assertThrows(HAProxyProtocolException.class, () -> ch.writeInbound(copiedBuffer(header))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testV2InvalidCommand() { byte[] header = new byte[28]; header[0] = 0x0D; // Binary Prefix @@ -857,10 +961,10 @@ public void testV2InvalidCommand() { header[26] = 0x01; // Destination Port header[27] = (byte) 0xbb; // ----- - ch.writeInbound(copiedBuffer(header)); + assertThrows(HAProxyProtocolException.class, () -> ch.writeInbound(copiedBuffer(header))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testV2InvalidVersion() { byte[] header = new byte[28]; header[0] = 0x0D; // Binary Prefix @@ -898,10 +1002,10 @@ public void testV2InvalidVersion() { header[26] = 0x01; // Destination Port header[27] = (byte) 0xbb; // ----- - ch.writeInbound(copiedBuffer(header)); + assertThrows(HAProxyProtocolException.class, () -> ch.writeInbound(copiedBuffer(header))); } - @Test(expected = HAProxyProtocolException.class) + @Test public void testV2HeaderTooLong() { ch = new EmbeddedChannel(new HAProxyMessageDecoder(0)); @@ -941,7 +1045,7 @@ public void testV2HeaderTooLong() { header[26] = 0x01; // Destination Port header[27] = (byte) 0xbb; // ----- - ch.writeInbound(copiedBuffer(header)); + assertThrows(HAProxyProtocolException.class, () -> ch.writeInbound(copiedBuffer(header))); } @Test diff --git a/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxySSLTLVTest.java b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxySSLTLVTest.java index 51cc85d1a72..a2065b41d52 100644 --- a/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxySSLTLVTest.java +++ b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HAProxySSLTLVTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,11 +17,12 @@ package io.netty.handler.codec.haproxy; import io.netty.buffer.Unpooled; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Collections; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HAProxySSLTLVTest { @@ -31,7 +32,7 @@ public void testClientBitmask() throws Exception { // 0b0000_0111 final byte allClientsEnabled = 0x7; final HAProxySSLTLV allClientsEnabledTLV = - new HAProxySSLTLV(0, allClientsEnabled, Collections.emptyList(), Unpooled.buffer()); + new HAProxySSLTLV(0, allClientsEnabled, Collections.emptyList(), Unpooled.buffer()); assertTrue(allClientsEnabledTLV.isPP2ClientCertConn()); assertTrue(allClientsEnabledTLV.isPP2ClientSSL()); @@ -43,7 +44,7 @@ public void testClientBitmask() throws Exception { final byte clientSSLandClientCertSessEnabled = 0x5; final HAProxySSLTLV clientSSLandClientCertSessTLV = - new HAProxySSLTLV(0, clientSSLandClientCertSessEnabled, Collections.emptyList(), + new HAProxySSLTLV(0, clientSSLandClientCertSessEnabled, Collections.emptyList(), Unpooled.buffer()); assertFalse(clientSSLandClientCertSessTLV.isPP2ClientCertConn()); @@ -55,7 +56,7 @@ public void testClientBitmask() throws Exception { final byte noClientEnabled = 0x0; final HAProxySSLTLV noClientTlv = - new HAProxySSLTLV(0, noClientEnabled, Collections.emptyList(), + new HAProxySSLTLV(0, noClientEnabled, Collections.emptyList(), Unpooled.buffer()); assertFalse(noClientTlv.isPP2ClientCertConn()); diff --git a/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HaProxyMessageEncoderTest.java b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HaProxyMessageEncoderTest.java new file mode 100644 index 00000000000..efc201e7dde --- /dev/null +++ b/codec-haproxy/src/test/java/io/netty/handler/codec/haproxy/HaProxyMessageEncoderTest.java @@ -0,0 +1,408 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.handler.codec.haproxy; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.haproxy.HAProxyTLV.Type; +import io.netty.util.ByteProcessor; +import io.netty.util.CharsetUtil; +import org.junit.jupiter.api.Test; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import static io.netty.handler.codec.haproxy.HAProxyConstants.*; +import static io.netty.handler.codec.haproxy.HAProxyMessageEncoder.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class HaProxyMessageEncoderTest { + + private static final int V2_HEADER_BYTES_LENGTH = 16; + private static final int IPv4_ADDRESS_BYTES_LENGTH = 12; + private static final int IPv6_ADDRESS_BYTES_LENGTH = 36; + + @Test + public void testIPV4EncodeProxyV1() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + assertEquals("PROXY TCP4 192.168.0.1 192.168.0.11 56324 443\r\n", + byteBuf.toString(CharsetUtil.US_ASCII)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testIPV6EncodeProxyV1() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP6, + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", "1050:0:0:0:5:600:300c:326b", 56324, 443); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + assertEquals("PROXY TCP6 2001:0db8:85a3:0000:0000:8a2e:0370:7334 1050:0:0:0:5:600:300c:326b 56324 443\r\n", + byteBuf.toString(CharsetUtil.US_ASCII)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testIPv4EncodeProxyV2() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // header + byte[] headerBytes = ByteBufUtil.getBytes(byteBuf, 0, 12); + assertArrayEquals(BINARY_PREFIX, headerBytes); + + // command + byte commandByte = byteBuf.getByte(12); + assertEquals(0x02, (commandByte & 0xf0) >> 4); + assertEquals(0x01, commandByte & 0x0f); + + // transport protocol, address family + byte transportByte = byteBuf.getByte(13); + assertEquals(0x01, (transportByte & 0xf0) >> 4); + assertEquals(0x01, transportByte & 0x0f); + + // source address length + int sourceAddrLength = byteBuf.getUnsignedShort(14); + assertEquals(12, sourceAddrLength); + + // source address + byte[] sourceAddr = ByteBufUtil.getBytes(byteBuf, 16, 4); + assertArrayEquals(new byte[] { (byte) 0xc0, (byte) 0xa8, 0x00, 0x01 }, sourceAddr); + + // destination address + byte[] destAddr = ByteBufUtil.getBytes(byteBuf, 20, 4); + assertArrayEquals(new byte[] { (byte) 0xc0, (byte) 0xa8, 0x00, 0x0b }, destAddr); + + // source port + int sourcePort = byteBuf.getUnsignedShort(24); + assertEquals(56324, sourcePort); + + // destination port + int destPort = byteBuf.getUnsignedShort(26); + assertEquals(443, destPort); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testIPv6EncodeProxyV2() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP6, + "2001:0db8:85a3:0000:0000:8a2e:0370:7334", "1050:0:0:0:5:600:300c:326b", 56324, 443); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // header + byte[] headerBytes = ByteBufUtil.getBytes(byteBuf, 0, 12); + assertArrayEquals(BINARY_PREFIX, headerBytes); + + // command + byte commandByte = byteBuf.getByte(12); + assertEquals(0x02, (commandByte & 0xf0) >> 4); + assertEquals(0x01, commandByte & 0x0f); + + // transport protocol, address family + byte transportByte = byteBuf.getByte(13); + assertEquals(0x02, (transportByte & 0xf0) >> 4); + assertEquals(0x01, transportByte & 0x0f); + + // source address length + int sourceAddrLength = byteBuf.getUnsignedShort(14); + assertEquals(IPv6_ADDRESS_BYTES_LENGTH, sourceAddrLength); + + // source address + byte[] sourceAddr = ByteBufUtil.getBytes(byteBuf, 16, 16); + assertArrayEquals(new byte[] { + (byte) 0x20, (byte) 0x01, 0x0d, (byte) 0xb8, + (byte) 0x85, (byte) 0xa3, 0x00, 0x00, 0x00, 0x00, (byte) 0x8a, 0x2e, + 0x03, 0x70, 0x73, 0x34 + }, sourceAddr); + + // destination address + byte[] destAddr = ByteBufUtil.getBytes(byteBuf, 32, 16); + assertArrayEquals(new byte[] { + (byte) 0x10, (byte) 0x50, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x05, 0x06, 0x00, 0x30, 0x0c, 0x32, 0x6b + }, destAddr); + + // source port + int sourcePort = byteBuf.getUnsignedShort(48); + assertEquals(56324, sourcePort); + + // destination port + int destPort = byteBuf.getUnsignedShort(50); + assertEquals(443, destPort); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testUnixEncodeProxyV2() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + "/var/run/src.sock", "/var/run/dst.sock", 0, 0); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // header + byte[] headerBytes = ByteBufUtil.getBytes(byteBuf, 0, 12); + assertArrayEquals(BINARY_PREFIX, headerBytes); + + // command + byte commandByte = byteBuf.getByte(12); + assertEquals(0x02, (commandByte & 0xf0) >> 4); + assertEquals(0x01, commandByte & 0x0f); + + // transport protocol, address family + byte transportByte = byteBuf.getByte(13); + assertEquals(0x03, (transportByte & 0xf0) >> 4); + assertEquals(0x01, transportByte & 0x0f); + + // address length + int addrLength = byteBuf.getUnsignedShort(14); + assertEquals(TOTAL_UNIX_ADDRESS_BYTES_LENGTH, addrLength); + + // source address + int srcAddrEnd = byteBuf.forEachByte(16, 108, ByteProcessor.FIND_NUL); + assertEquals("/var/run/src.sock", + byteBuf.slice(16, srcAddrEnd - 16).toString(CharsetUtil.US_ASCII)); + + // destination address + int dstAddrEnd = byteBuf.forEachByte(124, 108, ByteProcessor.FIND_NUL); + assertEquals("/var/run/dst.sock", + byteBuf.slice(124, dstAddrEnd - 124).toString(CharsetUtil.US_ASCII)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testTLVEncodeProxy() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + List tlvs = new ArrayList(); + + ByteBuf helloWorld = Unpooled.copiedBuffer("hello world", CharsetUtil.US_ASCII); + HAProxyTLV alpnTlv = new HAProxyTLV(Type.PP2_TYPE_ALPN, (byte) 0x01, helloWorld.copy()); + tlvs.add(alpnTlv); + + ByteBuf arbitrary = Unpooled.copiedBuffer("an arbitrary string", CharsetUtil.US_ASCII); + HAProxyTLV authorityTlv = new HAProxyTLV(Type.PP2_TYPE_AUTHORITY, (byte) 0x01, arbitrary.copy()); + tlvs.add(authorityTlv); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443, tlvs); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // length + assertEquals(byteBuf.getUnsignedShort(14), byteBuf.readableBytes() - V2_HEADER_BYTES_LENGTH); + + // skip to tlv section + ByteBuf tlv = byteBuf.skipBytes(V2_HEADER_BYTES_LENGTH + IPv4_ADDRESS_BYTES_LENGTH); + + // alpn tlv + assertEquals(alpnTlv.typeByteValue(), tlv.readByte()); + short bufLength = tlv.readShort(); + assertEquals(helloWorld.array().length, bufLength); + assertEquals(helloWorld, tlv.readSlice(bufLength)); + + // authority tlv + assertEquals(authorityTlv.typeByteValue(), tlv.readByte()); + bufLength = tlv.readShort(); + assertEquals(arbitrary.array().length, bufLength); + assertEquals(arbitrary, tlv.readSlice(bufLength)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testSslTLVEncodeProxy() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + List tlvs = new ArrayList(); + + ByteBuf helloWorld = Unpooled.copiedBuffer("hello world", CharsetUtil.US_ASCII); + HAProxyTLV alpnTlv = new HAProxyTLV(Type.PP2_TYPE_ALPN, (byte) 0x01, helloWorld.copy()); + tlvs.add(alpnTlv); + + ByteBuf arbitrary = Unpooled.copiedBuffer("an arbitrary string", CharsetUtil.US_ASCII); + HAProxyTLV authorityTlv = new HAProxyTLV(Type.PP2_TYPE_AUTHORITY, (byte) 0x01, arbitrary.copy()); + tlvs.add(authorityTlv); + + ByteBuf sslContent = Unpooled.copiedBuffer("some ssl content", CharsetUtil.US_ASCII); + HAProxySSLTLV haProxySSLTLV = new HAProxySSLTLV(1, (byte) 0x01, tlvs, sslContent.copy()); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + "192.168.0.1", "192.168.0.11", 56324, 443, + Collections.singletonList(haProxySSLTLV)); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + assertEquals(byteBuf.getUnsignedShort(14), byteBuf.readableBytes() - V2_HEADER_BYTES_LENGTH); + ByteBuf tlv = byteBuf.skipBytes(V2_HEADER_BYTES_LENGTH + IPv4_ADDRESS_BYTES_LENGTH); + + // ssl tlv type + assertEquals(haProxySSLTLV.typeByteValue(), tlv.readByte()); + + // length + int bufLength = tlv.readUnsignedShort(); + assertEquals(bufLength, tlv.readableBytes()); + + // client, verify + assertEquals(0x01, byteBuf.readByte()); + assertEquals(1, byteBuf.readInt()); + + // alpn tlv + assertEquals(alpnTlv.typeByteValue(), tlv.readByte()); + bufLength = tlv.readShort(); + assertEquals(helloWorld.array().length, bufLength); + assertEquals(helloWorld, tlv.readSlice(bufLength)); + + // authority tlv + assertEquals(authorityTlv.typeByteValue(), tlv.readByte()); + bufLength = tlv.readShort(); + assertEquals(arbitrary.array().length, bufLength); + assertEquals(arbitrary, tlv.readSlice(bufLength)); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testEncodeLocalProxyV2() { + EmbeddedChannel ch = new EmbeddedChannel(INSTANCE); + + HAProxyMessage message = new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.LOCAL, HAProxyProxiedProtocol.UNKNOWN, + null, null, 0, 0); + assertTrue(ch.writeOutbound(message)); + + ByteBuf byteBuf = ch.readOutbound(); + + // header + byte[] headerBytes = new byte[12]; + byteBuf.readBytes(headerBytes); + assertArrayEquals(BINARY_PREFIX, headerBytes); + + // command + byte commandByte = byteBuf.readByte(); + assertEquals(0x02, (commandByte & 0xf0) >> 4); + assertEquals(0x00, commandByte & 0x0f); + + // transport protocol, address family + byte transportByte = byteBuf.readByte(); + assertEquals(0x00, transportByte); + + // source address length + int sourceAddrLength = byteBuf.readUnsignedShort(); + assertEquals(0, sourceAddrLength); + + assertFalse(byteBuf.isReadable()); + + byteBuf.release(); + assertFalse(ch.finish()); + } + + @Test + public void testInvalidIpV4Address() { + String invalidIpv4Address = "192.168.0.1234"; + assertThrows(IllegalArgumentException.class, () -> new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP4, + invalidIpv4Address, "192.168.0.11", 56324, 443)); + } + + @Test + public void testInvalidIpV6Address() { + String invalidIpv6Address = "2001:0db8:85a3:0000:0000:8a2e:0370:73345"; + assertThrows(IllegalArgumentException.class, () -> new HAProxyMessage( + HAProxyProtocolVersion.V1, HAProxyCommand.PROXY, HAProxyProxiedProtocol.TCP6, + invalidIpv6Address, "1050:0:0:0:5:600:300c:326b", 56324, 443)); + } + + @Test + public void testInvalidUnixAddress() { + String invalidUnixAddress = new String(new byte[UNIX_ADDRESS_BYTES_LENGTH + 1]); + assertThrows(IllegalArgumentException.class, () -> new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + invalidUnixAddress, "/var/run/dst.sock", 0, 0)); + } + + @Test + public void testNullUnixAddress() { + assertThrows(NullPointerException.class, () -> new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + null, null, 0, 0)); + } + + @Test + public void testLongUnixAddress() { + String longUnixAddress = new String(new char[109]).replace("\0", "a"); + assertThrows(IllegalArgumentException.class, () -> new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + "source", longUnixAddress, 0, 0)); + } + + @Test + public void testInvalidUnixPort() { + assertThrows(IllegalArgumentException.class, () -> new HAProxyMessage( + HAProxyProtocolVersion.V2, HAProxyCommand.PROXY, HAProxyProxiedProtocol.UNIX_STREAM, + "/var/run/src.sock", "/var/run/dst.sock", 80, 443)); + } +} diff --git a/codec-http/pom.xml b/codec-http/pom.xml index 6558e321c9c..f2f93ea21cc 100644 --- a/codec-http/pom.xml +++ b/codec-http/pom.xml @@ -6,7 +6,7 @@ ~ version 2.0 (the "License"); you may not use this file except in compliance ~ with the License. You may obtain a copy of the License at: ~ - ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ https://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -14,13 +14,13 @@ ~ License for the specific language governing permissions and limitations ~ under the License. --> - + 4.0.0 io.netty netty-parent - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT netty-codec-http @@ -33,6 +33,21 @@ + + ${project.groupId} + netty-common + ${project.version} + + + ${project.groupId} + netty-buffer + ${project.version} + + + ${project.groupId} + netty-transport + ${project.version} + ${project.groupId} netty-codec @@ -42,7 +57,6 @@ ${project.groupId} netty-handler ${project.version} - true com.jcraft @@ -53,6 +67,35 @@ org.mockito mockito-core + + commons-io + commons-io + + + com.aayushatharva.brotli4j + brotli4j + true + + + com.aayushatharva.brotli4j + native-linux-x86_64 + test + + + com.aayushatharva.brotli4j + native-osx-x86_64 + test + + + com.aayushatharva.brotli4j + native-windows-x86_64 + test + + + com.github.luben + zstd-jni + true + diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/ClientCookieEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/ClientCookieEncoder.java deleted file mode 100644 index 51122585d46..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/http/ClientCookieEncoder.java +++ /dev/null @@ -1,88 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http; - -import io.netty.handler.codec.http.cookie.ClientCookieDecoder; - -/** - * A RFC6265 compliant cookie encoder to be used client side, - * so only name=value pairs are sent. - * - * User-Agents are not supposed to interpret cookies, so, if present, {@link Cookie#rawValue()} will be used. - * Otherwise, {@link Cookie#value()} will be used unquoted. - * - * Note that multiple cookies are supposed to be sent at once in a single "Cookie" header. - * - *
    - * // Example
    - * {@link HttpRequest} req = ...;
    - * res.setHeader("Cookie", {@link ClientCookieEncoder}.encode("JSESSIONID", "1234"));
    - * 
    - * - * @see ClientCookieDecoder - */ -@Deprecated -public final class ClientCookieEncoder { - - /** - * Encodes the specified cookie into a Cookie header value. - * - * @param name the cookie name - * @param value the cookie value - * @return a Rfc6265 style Cookie header value - */ - @Deprecated - public static String encode(String name, String value) { - return io.netty.handler.codec.http.cookie.ClientCookieEncoder.LAX.encode(name, value); - } - - /** - * Encodes the specified cookie into a Cookie header value. - * - * @param cookie the specified cookie - * @return a Rfc6265 style Cookie header value - */ - @Deprecated - public static String encode(Cookie cookie) { - return io.netty.handler.codec.http.cookie.ClientCookieEncoder.LAX.encode(cookie); - } - - /** - * Encodes the specified cookies into a single Cookie header value. - * - * @param cookies some cookies - * @return a Rfc6265 style Cookie header value, null if no cookies are passed. - */ - @Deprecated - public static String encode(Cookie... cookies) { - return io.netty.handler.codec.http.cookie.ClientCookieEncoder.LAX.encode(cookies); - } - - /** - * Encodes the specified cookies into a single Cookie header value. - * - * @param cookies some cookies - * @return a Rfc6265 style Cookie header value, null if no cookies are passed. - */ - @Deprecated - public static String encode(Iterable cookies) { - return io.netty.handler.codec.http.cookie.ClientCookieEncoder.LAX.encode(cookies); - } - - private ClientCookieEncoder() { - // unused - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/CombinedHttpHeaders.java b/codec-http/src/main/java/io/netty/handler/codec/http/CombinedHttpHeaders.java index 2d43b7ad04a..34bca41a29b 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/CombinedHttpHeaders.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/CombinedHttpHeaders.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -26,6 +26,7 @@ import java.util.List; import java.util.Map; +import static io.netty.handler.codec.http.HttpHeaderNames.SET_COOKIE; import static io.netty.util.AsciiString.CASE_INSENSITIVE_HASHER; import static io.netty.util.internal.StringUtil.COMMA; import static io.netty.util.internal.StringUtil.unescapeCsvFields; @@ -56,29 +57,19 @@ private static final class CombinedHttpHeadersImpl private CsvValueEscaper objectEscaper() { if (objectEscaper == null) { - objectEscaper = new CsvValueEscaper() { - @Override - public CharSequence escape(Object value) { - return StringUtil.escapeCsv(valueConverter().convertObject(value), true); - } - }; + objectEscaper = value -> StringUtil.escapeCsv(valueConverter().convertObject(value), true); } return objectEscaper; } private CsvValueEscaper charSequenceEscaper() { if (charSequenceEscaper == null) { - charSequenceEscaper = new CsvValueEscaper() { - @Override - public CharSequence escape(CharSequence value) { - return StringUtil.escapeCsv(value, true); - } - }; + charSequenceEscaper = value -> StringUtil.escapeCsv(value, true); } return charSequenceEscaper; } - public CombinedHttpHeadersImpl(HashingStrategy nameHashingStrategy, + CombinedHttpHeadersImpl(HashingStrategy nameHashingStrategy, ValueConverter valueConverter, io.netty.handler.codec.DefaultHeaders.NameValidator nameValidator) { super(nameHashingStrategy, valueConverter, nameValidator); @@ -87,7 +78,7 @@ public CombinedHttpHeadersImpl(HashingStrategy nameHashingStrategy @Override public Iterator valueIterator(CharSequence name) { Iterator itr = super.valueIterator(name); - if (!itr.hasNext()) { + if (!itr.hasNext() || cannotBeCombined(name)) { return itr; } Iterator unescapedItr = unescapeCsvFields(itr.next()).iterator(); @@ -100,7 +91,7 @@ public Iterator valueIterator(CharSequence name) { @Override public List getAll(CharSequence name) { List values = super.getAll(name); - if (values.isEmpty()) { + if (values.isEmpty() || cannotBeCombined(name)) { return values; } if (values.size() != 1) { @@ -213,9 +204,13 @@ public CombinedHttpHeadersImpl setObject(CharSequence name, Iterable values) return this; } + private static boolean cannotBeCombined(CharSequence name) { + return SET_COOKIE.contentEqualsIgnoreCase(name); + } + private CombinedHttpHeadersImpl addEscapedValue(CharSequence name, CharSequence escapedValue) { CharSequence currentValue = super.get(name); - if (currentValue == null) { + if (currentValue == null || cannotBeCombined(name)) { super.add(name, escapedValue); } else { super.set(name, commaSeparateEscapedValues(currentValue, escapedValue)); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/ComposedLastHttpContent.java b/codec-http/src/main/java/io/netty/handler/codec/http/ComposedLastHttpContent.java index b8b7dc967fa..8ceaf4b19d9 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/ComposedLastHttpContent.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/ComposedLastHttpContent.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -28,6 +28,11 @@ final class ComposedLastHttpContent implements LastHttpContent { this.trailingHeaders = trailingHeaders; } + ComposedLastHttpContent(HttpHeaders trailingHeaders, DecoderResult result) { + this(trailingHeaders); + this.result = result; + } + @Override public HttpHeaders trailingHeaders() { return trailingHeaders; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/CompressionEncoderFactory.java b/codec-http/src/main/java/io/netty/handler/codec/http/CompressionEncoderFactory.java new file mode 100644 index 00000000000..f3d361d9e75 --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/CompressionEncoderFactory.java @@ -0,0 +1,27 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http; + +import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.MessageToByteEncoder; + +/** + * Compression Encoder Factory for create {@link MessageToByteEncoder} + * used to compress http content + */ +interface CompressionEncoderFactory { + MessageToByteEncoder createEncoder(); +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/Cookie.java b/codec-http/src/main/java/io/netty/handler/codec/http/Cookie.java deleted file mode 100644 index 4a362b81401..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/http/Cookie.java +++ /dev/null @@ -1,221 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http; - -import java.util.Set; - -/** - * An interface defining an - * HTTP cookie. - * @deprecated Use {@link io.netty.handler.codec.http.cookie.Cookie} instead. - */ -@Deprecated -public interface Cookie extends io.netty.handler.codec.http.cookie.Cookie { - - /** - * @deprecated Use {@link #name()} instead. - */ - @Deprecated - String getName(); - - /** - * @deprecated Use {@link #value()} instead. - */ - @Deprecated - String getValue(); - - /** - * @deprecated Use {@link #domain()} instead. - */ - @Deprecated - String getDomain(); - - /** - * @deprecated Use {@link #path()} instead. - */ - @Deprecated - String getPath(); - - /** - * @deprecated Use {@link #comment()} instead. - */ - @Deprecated - String getComment(); - - /** - * Returns the comment of this {@link Cookie}. - * - * @return The comment of this {@link Cookie} - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - String comment(); - - /** - * Sets the comment of this {@link Cookie}. - * - * @param comment The comment to use - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - void setComment(String comment); - - /** - * @deprecated Use {@link #maxAge()} instead. - */ - @Deprecated - long getMaxAge(); - - /** - * Returns the maximum age of this {@link Cookie} in seconds or {@link Long#MIN_VALUE} if unspecified - * - * @return The maximum age of this {@link Cookie} - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - @Override - long maxAge(); - - /** - * Sets the maximum age of this {@link Cookie} in seconds. - * If an age of {@code 0} is specified, this {@link Cookie} will be - * automatically removed by browser because it will expire immediately. - * If {@link Long#MIN_VALUE} is specified, this {@link Cookie} will be removed when the - * browser is closed. - * - * @param maxAge The maximum age of this {@link Cookie} in seconds - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - @Override - void setMaxAge(long maxAge); - - /** - * @deprecated Use {@link #version()} instead. - */ - @Deprecated - int getVersion(); - - /** - * Returns the version of this {@link Cookie}. - * - * @return The version of this {@link Cookie} - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - int version(); - - /** - * Sets the version of this {@link Cookie}. - * - * @param version The new version to use - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - void setVersion(int version); - - /** - * @deprecated Use {@link #commentUrl()} instead. - */ - @Deprecated - String getCommentUrl(); - - /** - * Returns the comment URL of this {@link Cookie}. - * - * @return The comment URL of this {@link Cookie} - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - String commentUrl(); - - /** - * Sets the comment URL of this {@link Cookie}. - * - * @param commentUrl The comment URL to use - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - void setCommentUrl(String commentUrl); - - /** - * Checks to see if this {@link Cookie} is to be discarded by the browser - * at the end of the current session. - * - * @return True if this {@link Cookie} is to be discarded, otherwise false - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - boolean isDiscard(); - - /** - * Sets the discard flag of this {@link Cookie}. - * If set to true, this {@link Cookie} will be discarded by the browser - * at the end of the current session - * - * @param discard True if the {@link Cookie} is to be discarded - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - void setDiscard(boolean discard); - - /** - * @deprecated Use {@link #ports()} instead. - */ - @Deprecated - Set getPorts(); - - /** - * Returns the ports that this {@link Cookie} can be accessed on. - * - * @return The {@link Set} of ports that this {@link Cookie} can use - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - Set ports(); - - /** - * Sets the ports that this {@link Cookie} can be accessed on. - * - * @param ports The ports that this {@link Cookie} can be accessed on - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - void setPorts(int... ports); - - /** - * Sets the ports that this {@link Cookie} can be accessed on. - * - * @param ports The {@link Iterable} collection of ports that this - * {@link Cookie} can be accessed on. - * - * @deprecated Not part of RFC6265 - */ - @Deprecated - void setPorts(Iterable ports); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/CookieDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/CookieDecoder.java deleted file mode 100644 index 040e12d7109..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/http/CookieDecoder.java +++ /dev/null @@ -1,369 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http; - -import static io.netty.handler.codec.http.CookieUtil.firstInvalidCookieNameOctet; -import static io.netty.handler.codec.http.CookieUtil.firstInvalidCookieValueOctet; -import static io.netty.handler.codec.http.CookieUtil.unwrapValue; - -import io.netty.handler.codec.DateFormatter; -import io.netty.handler.codec.http.cookie.CookieHeaderNames; -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; - -import java.util.ArrayList; -import java.util.Collections; -import java.util.Date; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; - -/** - * @deprecated Use {@link io.netty.handler.codec.http.cookie.ClientCookieDecoder} - * or {@link io.netty.handler.codec.http.cookie.ServerCookieDecoder} instead. - * - * Decodes an HTTP header value into {@link Cookie}s. This decoder can decode - * the HTTP cookie version 0, 1, and 2. - * - *
    - * {@link HttpRequest} req = ...;
    - * String value = req.getHeader("Cookie");
    - * Set<{@link Cookie}> cookies = {@link CookieDecoder}.decode(value);
    - * 
    - * - * @see io.netty.handler.codec.http.cookie.ClientCookieDecoder - * @see io.netty.handler.codec.http.cookie.ServerCookieDecoder - */ -@Deprecated -public final class CookieDecoder { - - private final InternalLogger logger = InternalLoggerFactory.getInstance(getClass()); - - private static final CookieDecoder STRICT = new CookieDecoder(true); - - private static final CookieDecoder LAX = new CookieDecoder(false); - - private static final String COMMENT = "Comment"; - - private static final String COMMENTURL = "CommentURL"; - - private static final String DISCARD = "Discard"; - - private static final String PORT = "Port"; - - private static final String VERSION = "Version"; - - private final boolean strict; - - public static Set decode(String header) { - return decode(header, true); - } - - public static Set decode(String header, boolean strict) { - return (strict ? STRICT : LAX).doDecode(header); - } - - /** - * Decodes the specified HTTP header value into {@link Cookie}s. - * - * @return the decoded {@link Cookie}s - */ - private Set doDecode(String header) { - List names = new ArrayList(8); - List values = new ArrayList(8); - extractKeyValuePairs(header, names, values); - - if (names.isEmpty()) { - return Collections.emptySet(); - } - - int i; - int version = 0; - - // $Version is the only attribute that can appear before the actual - // cookie name-value pair. - if (names.get(0).equalsIgnoreCase(VERSION)) { - try { - version = Integer.parseInt(values.get(0)); - } catch (NumberFormatException e) { - // Ignore. - } - i = 1; - } else { - i = 0; - } - - if (names.size() <= i) { - // There's a version attribute, but nothing more. - return Collections.emptySet(); - } - - Set cookies = new TreeSet(); - for (; i < names.size(); i ++) { - String name = names.get(i); - String value = values.get(i); - if (value == null) { - value = ""; - } - - Cookie c = initCookie(name, value); - - if (c == null) { - break; - } - - boolean discard = false; - boolean secure = false; - boolean httpOnly = false; - String comment = null; - String commentURL = null; - String domain = null; - String path = null; - long maxAge = Long.MIN_VALUE; - List ports = new ArrayList(2); - - for (int j = i + 1; j < names.size(); j++, i++) { - name = names.get(j); - value = values.get(j); - - if (DISCARD.equalsIgnoreCase(name)) { - discard = true; - } else if (CookieHeaderNames.SECURE.equalsIgnoreCase(name)) { - secure = true; - } else if (CookieHeaderNames.HTTPONLY.equalsIgnoreCase(name)) { - httpOnly = true; - } else if (COMMENT.equalsIgnoreCase(name)) { - comment = value; - } else if (COMMENTURL.equalsIgnoreCase(name)) { - commentURL = value; - } else if (CookieHeaderNames.DOMAIN.equalsIgnoreCase(name)) { - domain = value; - } else if (CookieHeaderNames.PATH.equalsIgnoreCase(name)) { - path = value; - } else if (CookieHeaderNames.EXPIRES.equalsIgnoreCase(name)) { - Date date = DateFormatter.parseHttpDate(value); - if (date != null) { - long maxAgeMillis = date.getTime() - System.currentTimeMillis(); - maxAge = maxAgeMillis / 1000 + (maxAgeMillis % 1000 != 0? 1 : 0); - } - } else if (CookieHeaderNames.MAX_AGE.equalsIgnoreCase(name)) { - maxAge = Integer.parseInt(value); - } else if (VERSION.equalsIgnoreCase(name)) { - version = Integer.parseInt(value); - } else if (PORT.equalsIgnoreCase(name)) { - String[] portList = value.split(","); - for (String s1: portList) { - try { - ports.add(Integer.valueOf(s1)); - } catch (NumberFormatException e) { - // Ignore. - } - } - } else { - break; - } - } - - c.setVersion(version); - c.setMaxAge(maxAge); - c.setPath(path); - c.setDomain(domain); - c.setSecure(secure); - c.setHttpOnly(httpOnly); - if (version > 0) { - c.setComment(comment); - } - if (version > 1) { - c.setCommentUrl(commentURL); - c.setPorts(ports); - c.setDiscard(discard); - } - - cookies.add(c); - } - - return cookies; - } - - private static void extractKeyValuePairs( - final String header, final List names, final List values) { - final int headerLen = header.length(); - loop: for (int i = 0;;) { - - // Skip spaces and separators. - for (;;) { - if (i == headerLen) { - break loop; - } - switch (header.charAt(i)) { - case '\t': case '\n': case 0x0b: case '\f': case '\r': - case ' ': case ',': case ';': - i ++; - continue; - } - break; - } - - // Skip '$'. - for (;;) { - if (i == headerLen) { - break loop; - } - if (header.charAt(i) == '$') { - i ++; - continue; - } - break; - } - - String name; - String value; - - if (i == headerLen) { - name = null; - value = null; - } else { - int newNameStart = i; - keyValLoop: for (;;) { - switch (header.charAt(i)) { - case ';': - // NAME; (no value till ';') - name = header.substring(newNameStart, i); - value = null; - break keyValLoop; - case '=': - // NAME=VALUE - name = header.substring(newNameStart, i); - i ++; - if (i == headerLen) { - // NAME= (empty value, i.e. nothing after '=') - value = ""; - break keyValLoop; - } - - int newValueStart = i; - char c = header.charAt(i); - if (c == '"' || c == '\'') { - // NAME="VALUE" or NAME='VALUE' - StringBuilder newValueBuf = new StringBuilder(header.length() - i); - final char q = c; - boolean hadBackslash = false; - i ++; - for (;;) { - if (i == headerLen) { - value = newValueBuf.toString(); - break keyValLoop; - } - if (hadBackslash) { - hadBackslash = false; - c = header.charAt(i ++); - switch (c) { - case '\\': case '"': case '\'': - // Escape last backslash. - newValueBuf.setCharAt(newValueBuf.length() - 1, c); - break; - default: - // Do not escape last backslash. - newValueBuf.append(c); - } - } else { - c = header.charAt(i ++); - if (c == q) { - value = newValueBuf.toString(); - break keyValLoop; - } - newValueBuf.append(c); - if (c == '\\') { - hadBackslash = true; - } - } - } - } else { - // NAME=VALUE; - int semiPos = header.indexOf(';', i); - if (semiPos > 0) { - value = header.substring(newValueStart, semiPos); - i = semiPos; - } else { - value = header.substring(newValueStart); - i = headerLen; - } - } - break keyValLoop; - default: - i ++; - } - - if (i == headerLen) { - // NAME (no value till the end of string) - name = header.substring(newNameStart); - value = null; - break; - } - } - } - - names.add(name); - values.add(value); - } - } - - private CookieDecoder(boolean strict) { - this.strict = strict; - } - - private DefaultCookie initCookie(String name, String value) { - if (name == null || name.length() == 0) { - logger.debug("Skipping cookie with null name"); - return null; - } - - if (value == null) { - logger.debug("Skipping cookie with null value"); - return null; - } - - CharSequence unwrappedValue = unwrapValue(value); - if (unwrappedValue == null) { - logger.debug("Skipping cookie because starting quotes are not properly balanced in '{}'", - unwrappedValue); - return null; - } - - int invalidOctetPos; - if (strict && (invalidOctetPos = firstInvalidCookieNameOctet(name)) >= 0) { - if (logger.isDebugEnabled()) { - logger.debug("Skipping cookie because name '{}' contains invalid char '{}'", - name, name.charAt(invalidOctetPos)); - } - return null; - } - - final boolean wrap = unwrappedValue.length() != value.length(); - - if (strict && (invalidOctetPos = firstInvalidCookieValueOctet(unwrappedValue)) >= 0) { - if (logger.isDebugEnabled()) { - logger.debug("Skipping cookie because value '{}' contains invalid char '{}'", - unwrappedValue, unwrappedValue.charAt(invalidOctetPos)); - } - return null; - } - - DefaultCookie cookie = new DefaultCookie(name, unwrappedValue.toString()); - cookie.setWrap(wrap); - return cookie; - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/CookieUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/CookieUtil.java deleted file mode 100644 index fdaae035fd3..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/http/CookieUtil.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2015 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http; - -import java.util.BitSet; - -/** - * @deprecated Duplicate of package private ${@link io.netty.handler.codec.http.cookie.CookieUtil} - */ -@Deprecated -final class CookieUtil { - - private static final BitSet VALID_COOKIE_VALUE_OCTETS = validCookieValueOctets(); - - private static final BitSet VALID_COOKIE_NAME_OCTETS = validCookieNameOctets(VALID_COOKIE_VALUE_OCTETS); - - // US-ASCII characters excluding CTLs, whitespace, DQUOTE, comma, semicolon, and backslash - private static BitSet validCookieValueOctets() { - BitSet bits = new BitSet(8); - for (int i = 35; i < 127; i++) { - // US-ASCII characters excluding CTLs (%x00-1F / %x7F) - bits.set(i); - } - bits.set('"', false); // exclude DQUOTE = %x22 - bits.set(',', false); // exclude comma = %x2C - bits.set(';', false); // exclude semicolon = %x3B - bits.set('\\', false); // exclude backslash = %x5C - return bits; - } - - // token = 1* - // separators = "(" | ")" | "<" | ">" | "@" - // | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" - // | "{" | "}" | SP | HT - private static BitSet validCookieNameOctets(BitSet validCookieValueOctets) { - BitSet bits = new BitSet(8); - bits.or(validCookieValueOctets); - bits.set('(', false); - bits.set(')', false); - bits.set('<', false); - bits.set('>', false); - bits.set('@', false); - bits.set(':', false); - bits.set('/', false); - bits.set('[', false); - bits.set(']', false); - bits.set('?', false); - bits.set('=', false); - bits.set('{', false); - bits.set('}', false); - bits.set(' ', false); - bits.set('\t', false); - return bits; - } - - static int firstInvalidCookieNameOctet(CharSequence cs) { - return firstInvalidOctet(cs, VALID_COOKIE_NAME_OCTETS); - } - - static int firstInvalidCookieValueOctet(CharSequence cs) { - return firstInvalidOctet(cs, VALID_COOKIE_VALUE_OCTETS); - } - - static int firstInvalidOctet(CharSequence cs, BitSet bits) { - for (int i = 0; i < cs.length(); i++) { - char c = cs.charAt(i); - if (!bits.get(c)) { - return i; - } - } - return -1; - } - - static CharSequence unwrapValue(CharSequence cs) { - final int len = cs.length(); - if (len > 0 && cs.charAt(0) == '"') { - if (len >= 2 && cs.charAt(len - 1) == '"') { - // properly balanced - return len == 2 ? "" : cs.subSequence(1, len - 1); - } else { - return null; - } - } - return cs; - } - - private CookieUtil() { - // Unused - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultCookie.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultCookie.java deleted file mode 100644 index 902ba16a2d8..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultCookie.java +++ /dev/null @@ -1,195 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http; - -import java.util.Collections; -import java.util.Set; -import java.util.TreeSet; - -/** - * The default {@link Cookie} implementation. - * - * @deprecated Use {@link io.netty.handler.codec.http.cookie.DefaultCookie} instead. - */ -@Deprecated -public class DefaultCookie extends io.netty.handler.codec.http.cookie.DefaultCookie implements Cookie { - - private String comment; - private String commentUrl; - private boolean discard; - private Set ports = Collections.emptySet(); - private Set unmodifiablePorts = ports; - private int version; - - /** - * Creates a new cookie with the specified name and value. - */ - public DefaultCookie(String name, String value) { - super(name, value); - } - - @Override - @Deprecated - public String getName() { - return name(); - } - - @Override - @Deprecated - public String getValue() { - return value(); - } - - @Override - @Deprecated - public String getDomain() { - return domain(); - } - - @Override - @Deprecated - public String getPath() { - return path(); - } - - @Override - @Deprecated - public String getComment() { - return comment(); - } - - @Override - @Deprecated - public String comment() { - return comment; - } - - @Override - @Deprecated - public void setComment(String comment) { - this.comment = validateValue("comment", comment); - } - - @Override - @Deprecated - public String getCommentUrl() { - return commentUrl(); - } - - @Override - @Deprecated - public String commentUrl() { - return commentUrl; - } - - @Override - @Deprecated - public void setCommentUrl(String commentUrl) { - this.commentUrl = validateValue("commentUrl", commentUrl); - } - - @Override - @Deprecated - public boolean isDiscard() { - return discard; - } - - @Override - @Deprecated - public void setDiscard(boolean discard) { - this.discard = discard; - } - - @Override - @Deprecated - public Set getPorts() { - return ports(); - } - - @Override - @Deprecated - public Set ports() { - if (unmodifiablePorts == null) { - unmodifiablePorts = Collections.unmodifiableSet(ports); - } - return unmodifiablePorts; - } - - @Override - @Deprecated - public void setPorts(int... ports) { - if (ports == null) { - throw new NullPointerException("ports"); - } - - int[] portsCopy = ports.clone(); - if (portsCopy.length == 0) { - unmodifiablePorts = this.ports = Collections.emptySet(); - } else { - Set newPorts = new TreeSet(); - for (int p: portsCopy) { - if (p <= 0 || p > 65535) { - throw new IllegalArgumentException("port out of range: " + p); - } - newPorts.add(Integer.valueOf(p)); - } - this.ports = newPorts; - unmodifiablePorts = null; - } - } - - @Override - @Deprecated - public void setPorts(Iterable ports) { - Set newPorts = new TreeSet(); - for (int p: ports) { - if (p <= 0 || p > 65535) { - throw new IllegalArgumentException("port out of range: " + p); - } - newPorts.add(Integer.valueOf(p)); - } - if (newPorts.isEmpty()) { - unmodifiablePorts = this.ports = Collections.emptySet(); - } else { - this.ports = newPorts; - unmodifiablePorts = null; - } - } - - @Override - @Deprecated - public long getMaxAge() { - return maxAge(); - } - - @Override - @Deprecated - public int getVersion() { - return version(); - } - - @Override - @Deprecated - public int version() { - return version; - } - - @Override - @Deprecated - public void setVersion(int version) { - this.version = version; - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpRequest.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpRequest.java index 117e6dbf648..841f4efad65 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpRequest.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpRequest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,9 +16,10 @@ package io.netty.handler.codec.http; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; import io.netty.util.IllegalReferenceCountException; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * Default implementation of {@link FullHttpRequest}. @@ -47,15 +48,15 @@ public DefaultFullHttpRequest(HttpVersion httpVersion, HttpMethod method, String public DefaultFullHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, ByteBuf content, boolean validateHeaders) { super(httpVersion, method, uri, validateHeaders); - this.content = checkNotNull(content, "content"); + this.content = requireNonNull(content, "content"); trailingHeader = new DefaultHttpHeaders(validateHeaders); } public DefaultFullHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, ByteBuf content, HttpHeaders headers, HttpHeaders trailingHeader) { super(httpVersion, method, uri, headers); - this.content = checkNotNull(content, "content"); - this.trailingHeader = checkNotNull(trailingHeader, "trailingHeader"); + this.content = requireNonNull(content, "content"); + this.trailingHeader = requireNonNull(trailingHeader, "trailingHeader"); } @Override @@ -152,7 +153,7 @@ public FullHttpRequest replace(ByteBuf content) { public int hashCode() { int hash = this.hash; if (hash == 0) { - if (content().refCnt() != 0) { + if (ByteBufUtil.isAccessible(content())) { try { hash = 31 + content().hashCode(); } catch (IllegalReferenceCountException ignored) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpResponse.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpResponse.java index 012b996a240..180af3137ef 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpResponse.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultFullHttpResponse.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,10 +16,11 @@ package io.netty.handler.codec.http; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; import io.netty.util.IllegalReferenceCountException; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * Default implementation of a {@link FullHttpResponse}. @@ -59,7 +60,7 @@ public DefaultFullHttpResponse(HttpVersion version, HttpResponseStatus status, public DefaultFullHttpResponse(HttpVersion version, HttpResponseStatus status, ByteBuf content, boolean validateHeaders, boolean singleFieldHeaders) { super(version, status, validateHeaders, singleFieldHeaders); - this.content = checkNotNull(content, "content"); + this.content = requireNonNull(content, "content"); this.trailingHeaders = singleFieldHeaders ? new CombinedHttpHeaders(validateHeaders) : new DefaultHttpHeaders(validateHeaders); } @@ -67,8 +68,8 @@ public DefaultFullHttpResponse(HttpVersion version, HttpResponseStatus status, public DefaultFullHttpResponse(HttpVersion version, HttpResponseStatus status, ByteBuf content, HttpHeaders headers, HttpHeaders trailingHeaders) { super(version, status, headers); - this.content = checkNotNull(content, "content"); - this.trailingHeaders = checkNotNull(trailingHeaders, "trailingHeaders"); + this.content = requireNonNull(content, "content"); + this.trailingHeaders = requireNonNull(trailingHeaders, "trailingHeaders"); } @Override @@ -159,7 +160,7 @@ public FullHttpResponse replace(ByteBuf content) { public int hashCode() { int hash = this.hash; if (hash == 0) { - if (content().refCnt() != 0) { + if (ByteBufUtil.isAccessible(content())) { try { hash = 31 + content().hashCode(); } catch (IllegalReferenceCountException ignored) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpContent.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpContent.java index 48fad8eb1ab..5680b4fc836 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpContent.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpContent.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http; +import static java.util.Objects.requireNonNull; + import io.netty.buffer.ByteBuf; import io.netty.util.internal.StringUtil; @@ -29,9 +31,7 @@ public class DefaultHttpContent extends DefaultHttpObject implements HttpContent * Creates a new instance with the specified chunk content. */ public DefaultHttpContent(ByteBuf content) { - if (content == null) { - throw new NullPointerException("content"); - } + requireNonNull(content, "content"); this.content = content; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpHeaders.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpHeaders.java index 88af27f738e..71bbefaf205 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpHeaders.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpHeaders.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -24,7 +24,6 @@ import io.netty.handler.codec.ValueConverter; import io.netty.util.AsciiString; import io.netty.util.ByteProcessor; -import io.netty.util.internal.PlatformDependent; import java.util.ArrayList; import java.util.Calendar; @@ -44,30 +43,20 @@ */ public class DefaultHttpHeaders extends HttpHeaders { private static final int HIGHEST_INVALID_VALUE_CHAR_MASK = ~15; - private static final ByteProcessor HEADER_NAME_VALIDATOR = new ByteProcessor() { - @Override - public boolean process(byte value) throws Exception { - validateHeaderNameElement(value); - return true; - } + private static final ByteProcessor HEADER_NAME_VALIDATOR = value -> { + validateHeaderNameElement(value); + return true; }; - static final NameValidator HttpNameValidator = new NameValidator() { - @Override - public void validateName(CharSequence name) { - if (name == null || name.length() == 0) { - throw new IllegalArgumentException("empty headers are not allowed [" + name + "]"); - } - if (name instanceof AsciiString) { - try { - ((AsciiString) name).forEachByte(HEADER_NAME_VALIDATOR); - } catch (Exception e) { - PlatformDependent.throwException(e); - } - } else { - // Go through each character in the name - for (int index = 0; index < name.length(); ++index) { - validateHeaderNameElement(name.charAt(index)); - } + static final NameValidator HttpNameValidator = name -> { + if (name == null || name.length() == 0) { + throw new IllegalArgumentException("empty headers are not allowed [" + name + ']'); + } + if (name instanceof AsciiString) { + ((AsciiString) name).forEachByte(HEADER_NAME_VALIDATOR); + } else { + // Go through each character in the name + for (int index = 0; index < name.length(); ++index) { + validateHeaderNameElement(name.charAt(index)); } } }; @@ -78,14 +67,26 @@ public DefaultHttpHeaders() { this(true); } + /** + * Warning! Setting {@code validate} to {@code false} will mean that Netty won't + * validate & protect against user-supplied header values that are malicious. + * This can leave your server implementation vulnerable to + * + * CWE-113: Improper Neutralization of CRLF Sequences in HTTP Headers ('HTTP Response Splitting') + * . + * When disabling this validation, it is the responsibility of the caller to ensure that the values supplied + * do not contain a non-url-escaped carriage return (CR) and/or line feed (LF) characters. + * + * @param validate Should Netty validate Header values to ensure they aren't malicious. + */ public DefaultHttpHeaders(boolean validate) { this(validate, nameValidator(validate)); } protected DefaultHttpHeaders(boolean validate, NameValidator nameValidator) { - this(new DefaultHeadersImpl(CASE_INSENSITIVE_HASHER, - valueConverter(validate), - nameValidator)); + this(new DefaultHeadersImpl<>(CASE_INSENSITIVE_HASHER, + valueConverter(validate), + nameValidator)); } protected DefaultHttpHeaders(DefaultHeaders headers) { @@ -257,7 +258,7 @@ public List> entries() { if (isEmpty()) { return Collections.emptyList(); } - List> entriesConverted = new ArrayList>( + List> entriesConverted = new ArrayList<>( headers.size()); for (Entry entry : this) { entriesConverted.add(entry); @@ -372,8 +373,7 @@ private static void validateHeaderNameElement(byte value) { default: // Check to see if the character is not an ASCII character, or invalid if (value < 0) { - throw new IllegalArgumentException("a header name cannot contain non-ASCII character: " + - value); + throw new IllegalArgumentException("a header name cannot contain non-ASCII character: " + value); } } } @@ -464,6 +464,8 @@ private static int validateValueChar(CharSequence seq, int state, char character throw new IllegalArgumentException("a header value contains a prohibited character '\\v': " + seq); case '\f': throw new IllegalArgumentException("a header value contains a prohibited character '\\f': " + seq); + default: + break; } } @@ -475,15 +477,15 @@ private static int validateValueChar(CharSequence seq, int state, char character return 1; case '\n': return 2; + default: + break; } break; case 1: - switch (character) { - case '\n': - return 2; - default: - throw new IllegalArgumentException("only '\\n' is allowed after '\\r': " + seq); + if (character == '\n') { + return 2; } + throw new IllegalArgumentException("only '\\n' is allowed after '\\r': " + seq); case 2: switch (character) { case '\t': @@ -492,6 +494,8 @@ private static int validateValueChar(CharSequence seq, int state, char character default: throw new IllegalArgumentException("only ' ' and '\\t' are allowed after '\\n': " + seq); } + default: + break; } return state; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpMessage.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpMessage.java index 5a6a45a376d..e2d083c5826 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpMessage.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpMessage.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ package io.netty.handler.codec.http; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The default {@link HttpMessage} implementation. @@ -45,8 +45,8 @@ protected DefaultHttpMessage(final HttpVersion version, boolean validateHeaders, * Creates a new instance. */ protected DefaultHttpMessage(final HttpVersion version, HttpHeaders headers) { - this.version = checkNotNull(version, "version"); - this.headers = checkNotNull(headers, "headers"); + this.version = requireNonNull(version, "version"); + this.headers = requireNonNull(headers, "headers"); } @Override @@ -89,9 +89,7 @@ public boolean equals(Object o) { @Override public HttpMessage setProtocolVersion(HttpVersion version) { - if (version == null) { - throw new NullPointerException("version"); - } + requireNonNull(version, "version"); this.version = version; return this; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpObject.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpObject.java index c26ad39a7bb..9011d1ceaaf 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpObject.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpObject.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http; +import static java.util.Objects.requireNonNull; + import io.netty.handler.codec.DecoderResult; public class DefaultHttpObject implements HttpObject { @@ -39,9 +41,7 @@ public DecoderResult getDecoderResult() { @Override public void setDecoderResult(DecoderResult decoderResult) { - if (decoderResult == null) { - throw new NullPointerException("decoderResult"); - } + requireNonNull(decoderResult, "decoderResult"); this.decoderResult = decoderResult; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpRequest.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpRequest.java index 84be3bb72c9..88aff4f7f4e 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpRequest.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpRequest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ package io.netty.handler.codec.http; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The default {@link HttpRequest} implementation. @@ -46,8 +46,8 @@ public DefaultHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri */ public DefaultHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, boolean validateHeaders) { super(httpVersion, validateHeaders, false); - this.method = checkNotNull(method, "method"); - this.uri = checkNotNull(uri, "uri"); + this.method = requireNonNull(method, "method"); + this.uri = requireNonNull(uri, "uri"); } /** @@ -60,8 +60,8 @@ public DefaultHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri */ public DefaultHttpRequest(HttpVersion httpVersion, HttpMethod method, String uri, HttpHeaders headers) { super(httpVersion, headers); - this.method = checkNotNull(method, "method"); - this.uri = checkNotNull(uri, "uri"); + this.method = requireNonNull(method, "method"); + this.uri = requireNonNull(uri, "uri"); } @Override @@ -88,18 +88,14 @@ public String uri() { @Override public HttpRequest setMethod(HttpMethod method) { - if (method == null) { - throw new NullPointerException("method"); - } + requireNonNull(method, "method"); this.method = method; return this; } @Override public HttpRequest setUri(String uri) { - if (uri == null) { - throw new NullPointerException("uri"); - } + requireNonNull(uri, "uri"); this.uri = uri; return this; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpResponse.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpResponse.java index 86858108a27..a77ecb0f181 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpResponse.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultHttpResponse.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ package io.netty.handler.codec.http; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The default {@link HttpResponse} implementation. @@ -60,7 +60,7 @@ public DefaultHttpResponse(HttpVersion version, HttpResponseStatus status, boole public DefaultHttpResponse(HttpVersion version, HttpResponseStatus status, boolean validateHeaders, boolean singleFieldHeaders) { super(version, validateHeaders, singleFieldHeaders); - this.status = checkNotNull(status, "status"); + this.status = requireNonNull(status, "status"); } /** @@ -72,7 +72,7 @@ public DefaultHttpResponse(HttpVersion version, HttpResponseStatus status, boole */ public DefaultHttpResponse(HttpVersion version, HttpResponseStatus status, HttpHeaders headers) { super(version, headers); - this.status = checkNotNull(status, "status"); + this.status = requireNonNull(status, "status"); } @Override @@ -88,9 +88,7 @@ public HttpResponseStatus status() { @Override public HttpResponse setStatus(HttpResponseStatus status) { - if (status == null) { - throw new NullPointerException("status"); - } + requireNonNull(status, "status"); this.status = status; return this; } @@ -105,4 +103,23 @@ public HttpResponse setProtocolVersion(HttpVersion version) { public String toString() { return HttpMessageUtil.appendResponse(new StringBuilder(256), this).toString(); } + + @Override + public int hashCode() { + int result = 1; + result = 31 * result + status.hashCode(); + result = 31 * result + super.hashCode(); + return result; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DefaultHttpResponse)) { + return false; + } + + DefaultHttpResponse other = (DefaultHttpResponse) o; + + return status.equals(other.status()) && super.equals(o); + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultLastHttpContent.java b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultLastHttpContent.java index 67c619a521a..1cc1c33e26e 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/DefaultLastHttpContent.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DefaultLastHttpContent.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -115,15 +115,12 @@ private void appendHeaders(StringBuilder buf) { } private static final class TrailingHttpHeaders extends DefaultHttpHeaders { - private static final NameValidator TrailerNameValidator = new NameValidator() { - @Override - public void validateName(CharSequence name) { - DefaultHttpHeaders.HttpNameValidator.validateName(name); - if (HttpHeaderNames.CONTENT_LENGTH.contentEqualsIgnoreCase(name) - || HttpHeaderNames.TRANSFER_ENCODING.contentEqualsIgnoreCase(name) - || HttpHeaderNames.TRAILER.contentEqualsIgnoreCase(name)) { - throw new IllegalArgumentException("prohibited trailing header: " + name); - } + private static final NameValidator TrailerNameValidator = name -> { + DefaultHttpHeaders.HttpNameValidator.validateName(name); + if (HttpHeaderNames.CONTENT_LENGTH.contentEqualsIgnoreCase(name) + || HttpHeaderNames.TRANSFER_ENCODING.contentEqualsIgnoreCase(name) + || HttpHeaderNames.TRAILER.contentEqualsIgnoreCase(name)) { + throw new IllegalArgumentException("prohibited trailing header: " + name); } }; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/DelegatingChannelHandlerContext.java b/codec-http/src/main/java/io/netty/handler/codec/http/DelegatingChannelHandlerContext.java new file mode 100644 index 00000000000..2236766155d --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/DelegatingChannelHandlerContext.java @@ -0,0 +1,211 @@ +/* + * Copyright 2018 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http; + +import io.netty.buffer.ByteBufAllocator; +import io.netty.channel.Channel; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelPipeline; +import io.netty.util.Attribute; +import io.netty.util.AttributeKey; +import io.netty.util.concurrent.EventExecutor; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; + +import java.net.SocketAddress; +import java.util.Objects; + +abstract class DelegatingChannelHandlerContext implements ChannelHandlerContext { + + private final ChannelHandlerContext ctx; + + DelegatingChannelHandlerContext(ChannelHandlerContext ctx) { + this.ctx = Objects.requireNonNull(ctx, "ctx"); + } + + @Override + public Channel channel() { + return ctx.channel(); + } + + @Override + public EventExecutor executor() { + return ctx.executor(); + } + + @Override + public String name() { + return ctx.name(); + } + + @Override + public ChannelHandler handler() { + return ctx.handler(); + } + + @Override + public boolean isRemoved() { + return ctx.isRemoved(); + } + + @Override + public ChannelHandlerContext fireChannelRegistered() { + ctx.fireChannelRegistered(); + return this; + } + + @Override + public ChannelHandlerContext fireChannelUnregistered() { + ctx.fireChannelUnregistered(); + return this; + } + + @Override + public ChannelHandlerContext fireChannelActive() { + ctx.fireChannelActive(); + return this; + } + + @Override + public ChannelHandlerContext fireChannelInactive() { + ctx.fireChannelInactive(); + return this; + } + + @Override + public ChannelHandlerContext fireExceptionCaught(Throwable cause) { + ctx.fireExceptionCaught(cause); + return this; + } + + @Override + public ChannelHandlerContext fireUserEventTriggered(Object evt) { + ctx.fireUserEventTriggered(evt); + return this; + } + + @Override + public ChannelHandlerContext fireChannelRead(Object msg) { + + ctx.fireChannelRead(msg); + return this; + } + + @Override + public ChannelHandlerContext fireChannelReadComplete() { + ctx.fireChannelReadComplete(); + return this; + } + + @Override + public ChannelHandlerContext fireChannelWritabilityChanged() { + ctx.fireChannelWritabilityChanged(); + return this; + } + + @Override + public ChannelHandlerContext read() { + ctx.read(); + return this; + } + + @Override + public ChannelHandlerContext flush() { + ctx.flush(); + return this; + } + + @Override + public ChannelPipeline pipeline() { + return ctx.pipeline(); + } + + @Override + public ByteBufAllocator alloc() { + return ctx.alloc(); + } + + @Deprecated + public Attribute attr(AttributeKey key) { + return ctx.attr(key); + } + + @Deprecated + public boolean hasAttr(AttributeKey key) { + return ctx.hasAttr(key); + } + + @Override + public Future bind(SocketAddress localAddress) { + return ctx.bind(localAddress); + } + + @Override + public Future connect(SocketAddress remoteAddress) { + return ctx.connect(remoteAddress); + } + + @Override + public Future connect(SocketAddress remoteAddress, SocketAddress localAddress) { + return ctx.connect(remoteAddress, localAddress); + } + + @Override + public Future disconnect() { + return ctx.disconnect(); + } + + @Override + public Future close() { + return ctx.close(); + } + + @Override + public Future deregister() { + return ctx.deregister(); + } + + @Override + public Future register() { + return ctx.register(); + } + + @Override + public Future write(Object msg) { + return ctx.write(msg); + } + + @Override + public Future writeAndFlush(Object msg) { + return ctx.writeAndFlush(msg); + } + + @Override + public Promise newPromise() { + return ctx.newPromise(); + } + + @Override + public Future newSucceededFuture() { + return ctx.newSucceededFuture(); + } + + @Override + public Future newFailedFuture(Throwable cause) { + return ctx.newFailedFuture(cause); + } +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/EmptyHttpHeaders.java b/codec-http/src/main/java/io/netty/handler/codec/http/EmptyHttpHeaders.java index 7e281725391..f223909de79 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/EmptyHttpHeaders.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/EmptyHttpHeaders.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -23,22 +23,10 @@ import java.util.Set; public class EmptyHttpHeaders extends HttpHeaders { - static final Iterator> EMPTY_CHARS_ITERATOR = + private static final Iterator> EMPTY_CHARS_ITERATOR = Collections.>emptyList().iterator(); - public static final EmptyHttpHeaders INSTANCE = instance(); - - /** - * @see InstanceInitializer#EMPTY_HEADERS - * @deprecated Use {@link EmptyHttpHeaders#INSTANCE} - *

    - * This is needed to break a cyclic static initialization loop between {@link HttpHeaders} and {@link - * EmptyHttpHeaders}. - */ - @Deprecated - static EmptyHttpHeaders instance() { - return InstanceInitializer.EMPTY_HEADERS; - } + public static final EmptyHttpHeaders INSTANCE = new EmptyHttpHeaders(); protected EmptyHttpHeaders() { } @@ -168,21 +156,4 @@ public Iterator> iteratorCharSequence() { return EMPTY_CHARS_ITERATOR; } - /** - * This class is needed to break a cyclic static initialization loop between {@link HttpHeaders} and - * {@link EmptyHttpHeaders}. - */ - @Deprecated - private static final class InstanceInitializer { - /** - * The instance is instantiated here to break the cyclic static initialization between {@link EmptyHttpHeaders} - * and {@link HttpHeaders}. The issue is that if someone accesses {@link EmptyHttpHeaders#INSTANCE} before - * {@link HttpHeaders#EMPTY_HEADERS} then {@link HttpHeaders#EMPTY_HEADERS} will be {@code null}. - */ - @Deprecated - private static final EmptyHttpHeaders EMPTY_HEADERS = new EmptyHttpHeaders(); - - private InstanceInitializer() { - } - } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpMessage.java b/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpMessage.java index 40fc28f8a5d..735f4b63f7d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpMessage.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpMessage.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpRequest.java b/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpRequest.java index 3cbae69a849..1db1cec9c7c 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpRequest.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpRequest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpResponse.java b/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpResponse.java index 0b9474938d8..9f9be630457 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpResponse.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/FullHttpResponse.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpChunkedInput.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpChunkedInput.java index 7ac61119dce..7bf7b21111a 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpChunkedInput.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpChunkedInput.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -36,7 +36,7 @@ * * HttpContentChunkedInput httpChunkWriter = new HttpChunkedInput( * new ChunkedFile("/tmp/myfile.txt")); - * ChannelFuture sendFileFuture = ctx.write(httpChunkWriter); + * Future<Void> sendFileFuture = ctx.write(httpChunkWriter); * } * */ diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpClientCodec.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpClientCodec.java index dd1da34742b..0db29f825b4 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpClientCodec.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpClientCodec.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -28,6 +28,11 @@ import java.util.Queue; import java.util.concurrent.atomic.AtomicLong; +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS; +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_MAX_HEADER_SIZE; +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_MAX_INITIAL_LINE_LENGTH; +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_VALIDATE_HEADERS; + /** * A combination of {@link HttpRequestEncoder} and {@link HttpResponseDecoder} * which enables easier client side HTTP implementation. {@link HttpClientCodec} @@ -44,9 +49,11 @@ */ public final class HttpClientCodec extends CombinedChannelDuplexHandler implements HttpClientUpgradeHandler.SourceCodec { + public static final boolean DEFAULT_FAIL_ON_MISSING_RESPONSE = false; + public static final boolean DEFAULT_PARSE_HTTP_AFTER_CONNECT_REQUEST = false; /** A queue that is used for correlating a request and a response. */ - private final Queue queue = new ArrayDeque(); + private final Queue queue = new ArrayDeque<>(); private final boolean parseHttpAfterConnectRequest; /** If true, decoding stops (i.e. pass-through) */ @@ -61,40 +68,41 @@ public final class HttpClientCodec extends CombinedChannelDuplexHandler out) throws Exception { + protected void handlerAdded0(ChannelHandlerContext ctx) { + if (failOnMissingResponse) { + context = new DelegatingChannelHandlerContext(ctx) { + @Override + public ChannelHandlerContext fireChannelRead(Object msg) { + decrement(msg); + + super.fireChannelRead(msg); + return this; + } + }; + } else { + context = ctx; + } + } + + @Override + protected void decode(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception { if (done) { int readable = actualReadableBytes(); if (readable == 0) { @@ -196,16 +236,9 @@ protected void decode( // https://github.com/netty/netty/issues/1159 return; } - out.add(buffer.readBytes(readable)); + ctx.fireChannelRead(buffer.readBytes(readable)); } else { - int oldSize = out.size(); - super.decode(ctx, buffer, out); - if (failOnMissingResponse) { - int size = out.size(); - for (int i = oldSize; i < size; i++) { - decrement(out.get(i)); - } - } + super.decode(context, buffer); } } @@ -222,57 +255,65 @@ private void decrement(Object msg) { @Override protected boolean isContentAlwaysEmpty(HttpMessage msg) { + // Get the method of the HTTP request that corresponds to the + // current response. + // + // Even if we do not use the method to compare we still need to poll it to ensure we keep + // request / response pairs in sync. + HttpMethod method = queue.poll(); + final int statusCode = ((HttpResponse) msg).status().code(); - if (statusCode == 100 || statusCode == 101) { - // 100-continue and 101 switching protocols response should be excluded from paired comparison. + if (statusCode >= 100 && statusCode < 200) { + // An informational response should be excluded from paired comparison. // Just delegate to super method which has all the needed handling. return super.isContentAlwaysEmpty(msg); } - // Get the getMethod of the HTTP request that corresponds to the - // current response. - HttpMethod method = queue.poll(); - - char firstChar = method.name().charAt(0); - switch (firstChar) { - case 'H': - // According to 4.3, RFC2616: - // All responses to the HEAD request method MUST NOT include a - // message-body, even though the presence of entity-header fields - // might lead one to believe they do. - if (HttpMethod.HEAD.equals(method)) { - return true; - - // The following code was inserted to work around the servers - // that behave incorrectly. It has been commented out - // because it does not work with well behaving servers. - // Please note, even if the 'Transfer-Encoding: chunked' - // header exists in the HEAD response, the response should - // have absolutely no content. - // - //// Interesting edge case: - //// Some poorly implemented servers will send a zero-byte - //// chunk if Transfer-Encoding of the response is 'chunked'. - //// - //// return !msg.isChunked(); - } - break; - case 'C': - // Successful CONNECT request results in a response with empty body. - if (statusCode == 200) { - if (HttpMethod.CONNECT.equals(method)) { - // Proxy connection established - Parse HTTP only if configured by parseHttpAfterConnectRequest, - // else pass through. - if (!parseHttpAfterConnectRequest) { - done = true; - queue.clear(); + // If the remote peer did for example send multiple responses for one request (which is not allowed per + // spec but may still be possible) method will be null so guard against it. + if (method != null) { + char firstChar = method.name().charAt(0); + switch (firstChar) { + case 'H': + // According to 4.3, RFC2616: + // All responses to the HEAD request method MUST NOT include a + // message-body, even though the presence of entity-header fields + // might lead one to believe they do. + if (HttpMethod.HEAD.equals(method)) { + return true; + + // The following code was inserted to work around the servers + // that behave incorrectly. It has been commented out + // because it does not work with well behaving servers. + // Please note, even if the 'Transfer-Encoding: chunked' + // header exists in the HEAD response, the response should + // have absolutely no content. + // + //// Interesting edge case: + //// Some poorly implemented servers will send a zero-byte + //// chunk if Transfer-Encoding of the response is 'chunked'. + //// + //// return !msg.isChunked(); + } + break; + case 'C': + // Successful CONNECT request results in a response with empty body. + if (statusCode == 200) { + if (HttpMethod.CONNECT.equals(method)) { + // Proxy connection established - Parse HTTP only if configured by + // parseHttpAfterConnectRequest, else pass through. + if (!parseHttpAfterConnectRequest) { + done = true; + queue.clear(); + } + return true; + } } - return true; - } + break; + default: + break; } - break; } - return super.isContentAlwaysEmpty(msg); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpClientUpgradeHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpClientUpgradeHandler.java index 235dbb1b5d8..9f0f1fa6036 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpClientUpgradeHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpClientUpgradeHandler.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -15,18 +15,16 @@ package io.netty.handler.codec.http; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelOutboundHandler; -import io.netty.channel.ChannelPromise; import io.netty.util.AsciiString; +import io.netty.util.concurrent.Future; -import java.net.SocketAddress; import java.util.Collection; import java.util.LinkedHashSet; -import java.util.List; import java.util.Set; import static io.netty.handler.codec.http.HttpResponseStatus.SWITCHING_PROTOCOLS; import static io.netty.util.ReferenceCountUtil.release; +import static java.util.Objects.requireNonNull; /** * Client-side handler for handling an HTTP upgrade handshake to another protocol. When the first @@ -35,7 +33,7 @@ * simply removes itself from the pipeline. If the upgrade is successful, upgrades the pipeline to * the new protocol. */ -public class HttpClientUpgradeHandler extends HttpObjectAggregator implements ChannelOutboundHandler { +public class HttpClientUpgradeHandler extends HttpObjectAggregator { /** * User events that are fired to notify about upgrade status. @@ -115,79 +113,37 @@ public interface UpgradeCodec { public HttpClientUpgradeHandler(SourceCodec sourceCodec, UpgradeCodec upgradeCodec, int maxContentLength) { super(maxContentLength); - if (sourceCodec == null) { - throw new NullPointerException("sourceCodec"); - } - if (upgradeCodec == null) { - throw new NullPointerException("upgradeCodec"); - } + requireNonNull(sourceCodec, "sourceCodec"); + requireNonNull(upgradeCodec, "upgradeCodec"); this.sourceCodec = sourceCodec; this.upgradeCodec = upgradeCodec; } @Override - public void bind(ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise promise) throws Exception { - ctx.bind(localAddress, promise); - } - - @Override - public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, SocketAddress localAddress, - ChannelPromise promise) throws Exception { - ctx.connect(remoteAddress, localAddress, promise); - } - - @Override - public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { - ctx.disconnect(promise); - } - - @Override - public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { - ctx.close(promise); - } - - @Override - public void deregister(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { - ctx.deregister(promise); - } - - @Override - public void read(ChannelHandlerContext ctx) throws Exception { - ctx.read(); - } - - @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) - throws Exception { + public Future write(ChannelHandlerContext ctx, Object msg) { if (!(msg instanceof HttpRequest)) { - ctx.write(msg, promise); - return; + return ctx.write(msg); } if (upgradeRequested) { - promise.setFailure(new IllegalStateException( + return ctx.newFailedFuture(new IllegalStateException( "Attempting to write HTTP request with upgrade in progress")); - return; } upgradeRequested = true; setUpgradeRequestHeaders(ctx, (HttpRequest) msg); // Continue writing the request. - ctx.write(msg, promise); + Future f = ctx.write(msg); // Notify that the upgrade request was issued. ctx.fireUserEventTriggered(UpgradeEvent.UPGRADE_ISSUED); // Now we wait for the next HTTP response to see if we switch protocols. + return f; } @Override - public void flush(ChannelHandlerContext ctx) throws Exception { - ctx.flush(); - } - - @Override - protected void decode(ChannelHandlerContext ctx, HttpObject msg, List out) + protected void decode(final ChannelHandlerContext ctx, HttpObject msg) throws Exception { FullHttpResponse response = null; try { @@ -203,29 +159,38 @@ protected void decode(ChannelHandlerContext ctx, HttpObject msg, List ou // NOTE: not releasing the response since we're letting it propagate to the // next handler. ctx.fireUserEventTriggered(UpgradeEvent.UPGRADE_REJECTED); - removeThisHandler(ctx); ctx.fireChannelRead(msg); + removeThisHandler(ctx); return; } } if (msg instanceof FullHttpResponse) { response = (FullHttpResponse) msg; + // Need to retain since the base class will release after returning from this method. - response.retain(); - out.add(response); + tryUpgrade(ctx, response.retain()); } else { // Call the base class to handle the aggregation of the full request. - super.decode(ctx, msg, out); - if (out.isEmpty()) { - // The full request hasn't been created yet, still awaiting more data. - return; - } - - assert out.size() == 1; - response = (FullHttpResponse) out.get(0); + super.decode(new DelegatingChannelHandlerContext(ctx) { + @Override + public ChannelHandlerContext fireChannelRead(Object msg) { + FullHttpResponse response = (FullHttpResponse) msg; + tryUpgrade(ctx, response); + return this; + } + }, msg); } + } catch (Throwable t) { + release(response); + ctx.fireExceptionCaught(t); + removeThisHandler(ctx); + } + } + + private void tryUpgrade(ChannelHandlerContext ctx, FullHttpResponse response) { + try { CharSequence upgradeHeader = response.headers().get(HttpHeaderNames.UPGRADE); if (upgradeHeader != null && !AsciiString.contentEqualsIgnoreCase(upgradeCodec.protocol(), upgradeHeader)) { throw new IllegalStateException( @@ -246,7 +211,6 @@ protected void decode(ChannelHandlerContext ctx, HttpObject msg, List ou // We switched protocols, so we're done with the upgrade response. // Release it and clear it from the output. response.release(); - out.clear(); removeThisHandler(ctx); } catch (Throwable t) { release(response); @@ -267,7 +231,7 @@ private void setUpgradeRequestHeaders(ChannelHandlerContext ctx, HttpRequest req request.headers().set(HttpHeaderNames.UPGRADE, upgradeCodec.protocol()); // Add all protocol-specific headers to the request. - Set connectionParts = new LinkedHashSet(2); + Set connectionParts = new LinkedHashSet<>(2); connectionParts.addAll(upgradeCodec.setUpgradeHeaders(ctx, request)); // Set the CONNECTION header from the set of all protocol-specific headers that were added. @@ -277,6 +241,6 @@ private void setUpgradeRequestHeaders(ChannelHandlerContext ctx, HttpRequest req builder.append(','); } builder.append(HttpHeaderValues.UPGRADE); - request.headers().set(HttpHeaderNames.CONNECTION, builder.toString()); + request.headers().add(HttpHeaderNames.CONNECTION, builder.toString()); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpConstants.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpConstants.java index 7fde88974cd..9bb1f70554f 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpConstants.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpConstants.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContent.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContent.java index 0dae011e248..d6b9e2e3354 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContent.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContent.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentCompressor.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentCompressor.java index e80469f26e4..9fb30f768c1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentCompressor.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentCompressor.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,10 +15,27 @@ */ package io.netty.handler.codec.http; +import java.util.HashMap; +import java.util.Map; + +import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.MessageToByteEncoder; +import io.netty.handler.codec.compression.Brotli; +import io.netty.handler.codec.compression.BrotliEncoder; +import io.netty.handler.codec.compression.BrotliOptions; +import io.netty.handler.codec.compression.CompressionOptions; +import io.netty.handler.codec.compression.DeflateOptions; +import io.netty.handler.codec.compression.GzipOptions; +import io.netty.handler.codec.compression.StandardCompressionOptions; import io.netty.handler.codec.compression.ZlibCodecFactory; +import io.netty.handler.codec.compression.ZlibEncoder; import io.netty.handler.codec.compression.ZlibWrapper; +import io.netty.handler.codec.compression.Zstd; +import io.netty.handler.codec.compression.ZstdEncoder; +import io.netty.handler.codec.compression.ZstdOptions; +import io.netty.util.internal.ObjectUtil; /** * Compresses an {@link HttpMessage} and an {@link HttpContent} in {@code gzip} or @@ -29,11 +46,18 @@ */ public class HttpContentCompressor extends HttpContentEncoder { + private final boolean supportsCompressionOptions; + private final BrotliOptions brotliOptions; + private final GzipOptions gzipOptions; + private final DeflateOptions deflateOptions; + private final ZstdOptions zstdOptions; + private final int compressionLevel; private final int windowBits; private final int memLevel; private final int contentSizeThreshold; private ChannelHandlerContext ctx; + private final Map factories; /** * Creates a new handler with the default compression level (6), @@ -52,6 +76,7 @@ public HttpContentCompressor() { * best compression. {@code 0} means no compression. The default * compression level is {@code 6}. */ + @Deprecated public HttpContentCompressor(int compressionLevel) { this(compressionLevel, 15, 8, 0); } @@ -75,6 +100,7 @@ public HttpContentCompressor(int compressionLevel) { * memory. Larger values result in better and faster compression * at the expense of memory usage. The default value is {@code 8} */ + @Deprecated public HttpContentCompressor(int compressionLevel, int windowBits, int memLevel) { this(compressionLevel, windowBits, memLevel, 0); } @@ -102,28 +128,96 @@ public HttpContentCompressor(int compressionLevel, int windowBits, int memLevel) * body exceeds the threshold. The value should be a non negative * number. {@code 0} will enable compression for all responses. */ + @Deprecated public HttpContentCompressor(int compressionLevel, int windowBits, int memLevel, int contentSizeThreshold) { - if (compressionLevel < 0 || compressionLevel > 9) { - throw new IllegalArgumentException( - "compressionLevel: " + compressionLevel + - " (expected: 0-9)"); - } - if (windowBits < 9 || windowBits > 15) { - throw new IllegalArgumentException( - "windowBits: " + windowBits + " (expected: 9-15)"); - } - if (memLevel < 1 || memLevel > 9) { - throw new IllegalArgumentException( - "memLevel: " + memLevel + " (expected: 1-9)"); - } - if (contentSizeThreshold < 0) { - throw new IllegalArgumentException( - "contentSizeThreshold: " + contentSizeThreshold + " (expected: non negative number)"); - } - this.compressionLevel = compressionLevel; - this.windowBits = windowBits; - this.memLevel = memLevel; - this.contentSizeThreshold = contentSizeThreshold; + this.compressionLevel = ObjectUtil.checkInRange(compressionLevel, 0, 9, "compressionLevel"); + this.windowBits = ObjectUtil.checkInRange(windowBits, 9, 15, "windowBits"); + this.memLevel = ObjectUtil.checkInRange(memLevel, 1, 9, "memLevel"); + this.contentSizeThreshold = ObjectUtil.checkPositiveOrZero(contentSizeThreshold, "contentSizeThreshold"); + this.brotliOptions = null; + this.gzipOptions = null; + this.deflateOptions = null; + this.zstdOptions = null; + this.factories = null; + this.supportsCompressionOptions = false; + } + + /** + * Create a new {@link HttpContentCompressor} Instance with specified + * {@link CompressionOptions}s and contentSizeThreshold set to {@code 0} + * + * @param compressionOptions {@link CompressionOptions} or {@code null} if the default + * should be used. + */ + public HttpContentCompressor(CompressionOptions... compressionOptions) { + this(0, compressionOptions); + } + + /** + * Create a new {@link HttpContentCompressor} instance with specified + * {@link CompressionOptions}s + * + * @param contentSizeThreshold + * The response body is compressed when the size of the response + * body exceeds the threshold. The value should be a non negative + * number. {@code 0} will enable compression for all responses. + * @param compressionOptions {@link CompressionOptions} or {@code null} + * if the default should be used. + */ + public HttpContentCompressor(int contentSizeThreshold, CompressionOptions... compressionOptions) { + this.contentSizeThreshold = ObjectUtil.checkPositiveOrZero(contentSizeThreshold, "contentSizeThreshold"); + BrotliOptions brotliOptions = null; + GzipOptions gzipOptions = null; + DeflateOptions deflateOptions = null; + ZstdOptions zstdOptions = null; + if (compressionOptions == null || compressionOptions.length == 0) { + brotliOptions = Brotli.isAvailable() ? StandardCompressionOptions.brotli() : null; + gzipOptions = StandardCompressionOptions.gzip(); + deflateOptions = StandardCompressionOptions.deflate(); + zstdOptions = Zstd.isAvailable() ? StandardCompressionOptions.zstd() : null; + } else { + ObjectUtil.deepCheckNotNull("compressionOptions", compressionOptions); + for (CompressionOptions compressionOption : compressionOptions) { + if (compressionOption instanceof BrotliOptions) { + // if we have BrotliOptions, it means Brotli is available + brotliOptions = (BrotliOptions) compressionOption; + } else if (compressionOption instanceof GzipOptions) { + gzipOptions = (GzipOptions) compressionOption; + } else if (compressionOption instanceof DeflateOptions) { + deflateOptions = (DeflateOptions) compressionOption; + } else if (compressionOption instanceof ZstdOptions) { + zstdOptions = (ZstdOptions) compressionOption; + } else { + throw new IllegalArgumentException("Unsupported " + CompressionOptions.class.getSimpleName() + + ": " + compressionOption); + } + } + } + + this.gzipOptions = gzipOptions; + this.deflateOptions = deflateOptions; + this.brotliOptions = brotliOptions; + this.zstdOptions = zstdOptions; + + this.factories = new HashMap(); + + if (this.gzipOptions != null) { + this.factories.put("gzip", new GzipEncoderFactory()); + } + if (this.deflateOptions != null) { + this.factories.put("deflate", new DeflateEncoderFactory()); + } + if (this.brotliOptions != null) { + this.factories.put("br", new BrEncoderFactory()); + } + if (this.zstdOptions != null) { + this.factories.put("zstd", new ZstdEncoderFactory()); + } + + this.compressionLevel = -1; + this.windowBits = -1; + this.memLevel = -1; + supportsCompressionOptions = true; } @Override @@ -132,45 +226,121 @@ public void handlerAdded(ChannelHandlerContext ctx) throws Exception { } @Override - protected Result beginEncode(HttpResponse headers, String acceptEncoding) throws Exception { + protected Result beginEncode(HttpResponse httpResponse, String acceptEncoding) throws Exception { if (this.contentSizeThreshold > 0) { - if (headers instanceof HttpContent && - ((HttpContent) headers).content().readableBytes() < contentSizeThreshold) { + if (httpResponse instanceof HttpContent && + ((HttpContent) httpResponse).content().readableBytes() < contentSizeThreshold) { return null; } } - String contentEncoding = headers.headers().get(HttpHeaderNames.CONTENT_ENCODING); + String contentEncoding = httpResponse.headers().get(HttpHeaderNames.CONTENT_ENCODING); if (contentEncoding != null) { // Content-Encoding was set, either as something specific or as the IDENTITY encoding // Therefore, we should NOT encode here return null; } - ZlibWrapper wrapper = determineWrapper(acceptEncoding); - if (wrapper == null) { - return null; - } + if (supportsCompressionOptions) { + String targetContentEncoding = determineEncoding(acceptEncoding); + if (targetContentEncoding == null) { + return null; + } + + CompressionEncoderFactory encoderFactory = factories.get(targetContentEncoding); + + if (encoderFactory == null) { + throw new Error(); + } + + return new Result(targetContentEncoding, + new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), encoderFactory.createEncoder())); + } else { + ZlibWrapper wrapper = determineWrapper(acceptEncoding); + if (wrapper == null) { + return null; + } + + String targetContentEncoding; + switch (wrapper) { + case GZIP: + targetContentEncoding = "gzip"; + break; + case ZLIB: + targetContentEncoding = "deflate"; + break; + default: + throw new Error(); + } - String targetContentEncoding; - switch (wrapper) { - case GZIP: - targetContentEncoding = "gzip"; - break; - case ZLIB: - targetContentEncoding = "deflate"; - break; - default: - throw new Error(); + return new Result( + targetContentEncoding, + new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibEncoder( + wrapper, compressionLevel, windowBits, memLevel))); } + } - return new Result( - targetContentEncoding, - new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), - ctx.channel().config(), ZlibCodecFactory.newZlibEncoder( - wrapper, compressionLevel, windowBits, memLevel))); + @SuppressWarnings("FloatingPointEquality") + protected String determineEncoding(String acceptEncoding) { + float starQ = -1.0f; + float brQ = -1.0f; + float zstdQ = -1.0f; + float gzipQ = -1.0f; + float deflateQ = -1.0f; + for (String encoding : acceptEncoding.split(",")) { + float q = 1.0f; + int equalsPos = encoding.indexOf('='); + if (equalsPos != -1) { + try { + q = Float.parseFloat(encoding.substring(equalsPos + 1)); + } catch (NumberFormatException e) { + // Ignore encoding + q = 0.0f; + } + } + if (encoding.contains("*")) { + starQ = q; + } else if (encoding.contains("br") && q > brQ) { + brQ = q; + } else if (encoding.contains("zstd") && q > zstdQ) { + zstdQ = q; + } else if (encoding.contains("gzip") && q > gzipQ) { + gzipQ = q; + } else if (encoding.contains("deflate") && q > deflateQ) { + deflateQ = q; + } + } + if (brQ > 0.0f || zstdQ > 0.0f || gzipQ > 0.0f || deflateQ > 0.0f) { + if (brQ != -1.0f && brQ >= zstdQ && this.brotliOptions != null) { + return "br"; + } else if (zstdQ != -1.0f && zstdQ >= gzipQ && this.zstdOptions != null) { + return "zstd"; + } else if (gzipQ != -1.0f && gzipQ >= deflateQ && this.gzipOptions != null) { + return "gzip"; + } else if (deflateQ != -1.0f && this.deflateOptions != null) { + return "deflate"; + } + } + if (starQ > 0.0f) { + if (brQ == -1.0f && this.brotliOptions != null) { + return "br"; + } + if (zstdQ == -1.0f && this.zstdOptions != null) { + return "zstd"; + } + if (gzipQ == -1.0f && this.gzipOptions != null) { + return "gzip"; + } + if (deflateQ == -1.0f && this.deflateOptions != null) { + return "deflate"; + } + } + return null; } + @Deprecated @SuppressWarnings("FloatingPointEquality") protected ZlibWrapper determineWrapper(String acceptEncoding) { float starQ = -1.0f; @@ -212,4 +382,57 @@ protected ZlibWrapper determineWrapper(String acceptEncoding) { } return null; } + + /** + * Compression Encoder Factory that creates {@link ZlibEncoder}s + * used to compress http content for gzip content encoding + */ + private final class GzipEncoderFactory implements CompressionEncoderFactory { + + @Override + public MessageToByteEncoder createEncoder() { + return ZlibCodecFactory.newZlibEncoder( + ZlibWrapper.GZIP, gzipOptions.compressionLevel(), + gzipOptions.windowBits(), gzipOptions.memLevel()); + } + } + + /** + * Compression Encoder Factory that creates {@link ZlibEncoder}s + * used to compress http content for deflate content encoding + */ + private final class DeflateEncoderFactory implements CompressionEncoderFactory { + + @Override + public MessageToByteEncoder createEncoder() { + return ZlibCodecFactory.newZlibEncoder( + ZlibWrapper.ZLIB, deflateOptions.compressionLevel(), + deflateOptions.windowBits(), deflateOptions.memLevel()); + } + } + + /** + * Compression Encoder Factory that creates {@link BrotliEncoder}s + * used to compress http content for br content encoding + */ + private final class BrEncoderFactory implements CompressionEncoderFactory { + + @Override + public MessageToByteEncoder createEncoder() { + return new BrotliEncoder(brotliOptions.parameters()); + } + } + + /** + * Compression Encoder Factory for create {@link ZstdEncoder} + * used to compress http content for zstd content encoding + */ + private final class ZstdEncoderFactory implements CompressionEncoderFactory { + + @Override + public MessageToByteEncoder createEncoder() { + return new ZstdEncoder(zstdOptions.compressionLevel(), + zstdOptions.blockSize(), zstdOptions.maxEncodeSize()); + } + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java index e85adaaa373..41869e5d467 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,11 +19,10 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.CodecException; +import io.netty.handler.codec.DecoderResult; import io.netty.handler.codec.MessageToMessageDecoder; import io.netty.util.ReferenceCountUtil; -import java.util.List; - /** * Decodes the content of the received {@link HttpRequest} and {@link HttpContent}. * The original content is replaced with the new content decoded by the @@ -50,16 +49,17 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder out) throws Exception { + protected void decode(ChannelHandlerContext ctx, HttpObject msg) throws Exception { if (msg instanceof HttpResponse && ((HttpResponse) msg).status().code() == 100) { if (!(msg instanceof LastHttpContent)) { - continueResponse = true; + continueResponse = true; } // 100-continue response must be passed through. - out.add(ReferenceCountUtil.retain(msg)); + fireChannelRead(ctx, ReferenceCountUtil.retain(msg)); return; } @@ -68,7 +68,7 @@ protected void decode(ChannelHandlerContext ctx, HttpObject msg, List ou continueResponse = false; } // 100-continue response must be passed through. - out.add(ReferenceCountUtil.retain(msg)); + fireChannelRead(ctx, ReferenceCountUtil.retain(msg)); return; } @@ -82,7 +82,17 @@ protected void decode(ChannelHandlerContext ctx, HttpObject msg, List ou if (contentEncoding != null) { contentEncoding = contentEncoding.trim(); } else { - contentEncoding = IDENTITY; + String transferEncoding = headers.get(HttpHeaderNames.TRANSFER_ENCODING); + if (transferEncoding != null) { + int idx = transferEncoding.indexOf(","); + if (idx != -1) { + contentEncoding = transferEncoding.substring(0, idx).trim(); + } else { + contentEncoding = transferEncoding.trim(); + } + } else { + contentEncoding = IDENTITY; + } } decoder = newContentDecoder(contentEncoding); @@ -90,7 +100,7 @@ protected void decode(ChannelHandlerContext ctx, HttpObject msg, List ou if (message instanceof HttpContent) { ((HttpContent) message).retain(); } - out.add(message); + fireChannelRead(ctx, message); return; } @@ -109,7 +119,7 @@ protected void decode(ChannelHandlerContext ctx, HttpObject msg, List ou CharSequence targetContentEncoding = getTargetContentEncoding(contentEncoding); if (HttpHeaderValues.IDENTITY.contentEquals(targetContentEncoding)) { // Do NOT set the 'Content-Encoding' header if the target encoding is 'identity' - // as per: http://tools.ietf.org/html/rfc2616#section-14.11 + // as per: https://tools.ietf.org/html/rfc2616#section-14.11 headers.remove(HttpHeaderNames.CONTENT_ENCODING); } else { headers.set(HttpHeaderNames.CONTENT_ENCODING, targetContentEncoding); @@ -129,42 +139,56 @@ protected void decode(ChannelHandlerContext ctx, HttpObject msg, List ou copy = new DefaultHttpResponse(r.protocolVersion(), r.status()); } else { throw new CodecException("Object of class " + message.getClass().getName() + - " is not a HttpRequest or HttpResponse"); + " is not an HttpRequest or HttpResponse"); } copy.headers().set(message.headers()); copy.setDecoderResult(message.decoderResult()); - out.add(copy); + fireChannelRead(ctx, copy); } else { - out.add(message); + fireChannelRead(ctx, message); } } if (msg instanceof HttpContent) { final HttpContent c = (HttpContent) msg; if (decoder == null) { - out.add(c.retain()); + fireChannelRead(ctx, c.retain()); } else { - decodeContent(c, out); + decodeContent(ctx, c); } } } - private void decodeContent(HttpContent c, List out) { + private void decodeContent(ChannelHandlerContext ctx, HttpContent c) { ByteBuf content = c.content(); - decode(content, out); + decode(ctx, content); if (c instanceof LastHttpContent) { - finishDecode(out); + finishDecode(ctx); LastHttpContent last = (LastHttpContent) c; // Generate an additional chunk if the decoder produced // the last product on closure, HttpHeaders headers = last.trailingHeaders(); if (headers.isEmpty()) { - out.add(LastHttpContent.EMPTY_LAST_CONTENT); + fireChannelRead(ctx, LastHttpContent.EMPTY_LAST_CONTENT); } else { - out.add(new ComposedLastHttpContent(headers)); + fireChannelRead(ctx, new ComposedLastHttpContent(headers, DecoderResult.SUCCESS)); + } + } + } + + @Override + public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { + boolean needRead = this.needRead; + this.needRead = true; + + try { + ctx.fireChannelReadComplete(); + } finally { + if (needRead && !ctx.channel().config().isAutoRead()) { + ctx.read(); } } } @@ -229,20 +253,20 @@ private void cleanupSafely(ChannelHandlerContext ctx) { } } - private void decode(ByteBuf in, List out) { + private void decode(ChannelHandlerContext ctx, ByteBuf in) { // call retain here as it will call release after its written to the channel decoder.writeInbound(in.retain()); - fetchDecoderOutput(out); + fetchDecoderOutput(ctx); } - private void finishDecode(List out) { + private void finishDecode(ChannelHandlerContext ctx) { if (decoder.finish()) { - fetchDecoderOutput(out); + fetchDecoderOutput(ctx); } decoder = null; } - private void fetchDecoderOutput(List out) { + private void fetchDecoderOutput(ChannelHandlerContext ctx) { for (;;) { ByteBuf buf = decoder.readInbound(); if (buf == null) { @@ -252,7 +276,12 @@ private void fetchDecoderOutput(List out) { buf.release(); continue; } - out.add(new DefaultHttpContent(buf)); + ctx.fireChannelRead(new DefaultHttpContent(buf)); } } + + private void fireChannelRead(ChannelHandlerContext ctx, Object msg) { + needRead = false; + ctx.fireChannelRead(msg); + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecompressor.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecompressor.java index 0ec0d4168d7..d66151d463c 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecompressor.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecompressor.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,12 +15,15 @@ */ package io.netty.handler.codec.http; +import static io.netty.handler.codec.http.HttpHeaderValues.BR; import static io.netty.handler.codec.http.HttpHeaderValues.DEFLATE; import static io.netty.handler.codec.http.HttpHeaderValues.GZIP; import static io.netty.handler.codec.http.HttpHeaderValues.X_DEFLATE; import static io.netty.handler.codec.http.HttpHeaderValues.X_GZIP; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.compression.Brotli; +import io.netty.handler.codec.compression.BrotliDecoder; import io.netty.handler.codec.compression.ZlibCodecFactory; import io.netty.handler.codec.compression.ZlibWrapper; @@ -64,6 +67,10 @@ protected EmbeddedChannel newContentDecoder(String contentEncoding) throws Excep return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(wrapper)); } + if (Brotli.isAvailable() && BR.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), new BrotliDecoder()); + } // 'identity' or unsupported return null; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java index 0078edc4213..61bcd845d07 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,17 +15,23 @@ */ package io.netty.handler.codec.http; +import static java.util.Objects.requireNonNull; + import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufHolder; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.DecoderResult; import io.netty.handler.codec.MessageToMessageCodec; import io.netty.util.ReferenceCountUtil; +import io.netty.util.internal.StringUtil; import java.util.ArrayDeque; import java.util.List; import java.util.Queue; +import static io.netty.handler.codec.http.HttpHeaderNames.*; + /** * Encodes the content of the outbound {@link HttpResponse} and {@link HttpContent}. * The original content is replaced with the new content encoded by the @@ -60,7 +66,7 @@ private enum State { private static final CharSequence ZERO_LENGTH_CONNECT = "CONNECT"; private static final int CONTINUE_CODE = HttpResponseStatus.CONTINUE.code(); - private final Queue acceptEncodingQueue = new ArrayDeque(); + private final Queue acceptEncodingQueue = new ArrayDeque<>(); private EmbeddedChannel encoder; private State state = State.AWAIT_HEADERS; @@ -70,22 +76,31 @@ public boolean acceptOutboundMessage(Object msg) throws Exception { } @Override - protected void decode(ChannelHandlerContext ctx, HttpRequest msg, List out) - throws Exception { - CharSequence acceptedEncoding = msg.headers().get(HttpHeaderNames.ACCEPT_ENCODING); - if (acceptedEncoding == null) { - acceptedEncoding = HttpContentDecoder.IDENTITY; + protected void decode(ChannelHandlerContext ctx, HttpRequest msg) throws Exception { + CharSequence acceptEncoding; + List acceptEncodingHeaders = msg.headers().getAll(ACCEPT_ENCODING); + switch (acceptEncodingHeaders.size()) { + case 0: + acceptEncoding = HttpContentDecoder.IDENTITY; + break; + case 1: + acceptEncoding = acceptEncodingHeaders.get(0); + break; + default: + // Multiple message-header fields https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 + acceptEncoding = StringUtil.join(",", acceptEncodingHeaders); + break; } - HttpMethod meth = msg.method(); - if (meth == HttpMethod.HEAD) { - acceptedEncoding = ZERO_LENGTH_HEAD; - } else if (meth == HttpMethod.CONNECT) { - acceptedEncoding = ZERO_LENGTH_CONNECT; + HttpMethod method = msg.method(); + if (HttpMethod.HEAD.equals(method)) { + acceptEncoding = ZERO_LENGTH_HEAD; + } else if (HttpMethod.CONNECT.equals(method)) { + acceptEncoding = ZERO_LENGTH_CONNECT; } - acceptEncodingQueue.add(acceptedEncoding); - out.add(ReferenceCountUtil.retain(msg)); + acceptEncodingQueue.add(acceptEncoding); + ctx.fireChannelRead(ReferenceCountUtil.retain(msg)); } @Override @@ -128,7 +143,7 @@ protected void encode(ChannelHandlerContext ctx, HttpObject msg, List ou if (isFull) { out.add(ReferenceCountUtil.retain(res)); } else { - out.add(res); + out.add(ReferenceCountUtil.retain(res)); // Pass through all following contents. state = State.PASS_THROUGH; } @@ -151,7 +166,7 @@ protected void encode(ChannelHandlerContext ctx, HttpObject msg, List ou if (isFull) { out.add(ReferenceCountUtil.retain(res)); } else { - out.add(res); + out.add(ReferenceCountUtil.retain(res)); // Pass through all following contents. state = State.PASS_THROUGH; } @@ -179,7 +194,7 @@ protected void encode(ChannelHandlerContext ctx, HttpObject msg, List ou res.headers().remove(HttpHeaderNames.CONTENT_LENGTH); res.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); - out.add(res); + out.add(ReferenceCountUtil.retain(res)); state = State.AWAIT_CONTENT; if (!(msg instanceof HttpContent)) { // only break out the switch statement if we have not content to process @@ -264,7 +279,7 @@ private boolean encodeContent(HttpContent c, List out) { if (headers.isEmpty()) { out.add(LastHttpContent.EMPTY_LAST_CONTENT); } else { - out.add(new ComposedLastHttpContent(headers)); + out.add(new ComposedLastHttpContent(headers, DecoderResult.SUCCESS)); } return true; } @@ -274,8 +289,8 @@ private boolean encodeContent(HttpContent c, List out) { /** * Prepare to encode the HTTP message content. * - * @param headers - * the headers + * @param httpResponse + * the http response * @param acceptEncoding * the value of the {@code "Accept-Encoding"} header * @@ -285,7 +300,7 @@ private boolean encodeContent(HttpContent c, List out) { * {@code null} if {@code acceptEncoding} is unsupported or rejected * and thus the content should be handled as-is (i.e. no encoding). */ - protected abstract Result beginEncode(HttpResponse headers, String acceptEncoding) throws Exception; + protected abstract Result beginEncode(HttpResponse httpResponse, String acceptEncoding) throws Exception; @Override public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { @@ -349,12 +364,8 @@ public static final class Result { private final EmbeddedChannel contentEncoder; public Result(String targetContentEncoding, EmbeddedChannel contentEncoder) { - if (targetContentEncoding == null) { - throw new NullPointerException("targetContentEncoding"); - } - if (contentEncoder == null) { - throw new NullPointerException("contentEncoder"); - } + requireNonNull(targetContentEncoding, "targetContentEncoding"); + requireNonNull(contentEncoder, "contentEncoder"); this.targetContentEncoding = targetContentEncoding; this.contentEncoder = contentEncoder; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpExpectationFailedEvent.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpExpectationFailedEvent.java index 5c9f91fb100..c9a5b19dbe4 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpExpectationFailedEvent.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpExpectationFailedEvent.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderDateFormat.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderDateFormat.java deleted file mode 100644 index 45de26df618..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderDateFormat.java +++ /dev/null @@ -1,104 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http; - -import io.netty.util.concurrent.FastThreadLocal; -import io.netty.handler.codec.DateFormatter; - -import java.text.ParsePosition; -import java.text.SimpleDateFormat; -import java.util.Date; -import java.util.Locale; -import java.util.TimeZone; - -/** - * This DateFormat decodes 3 formats of {@link Date}, but only encodes the one, - * the first: - *
      - *
    • Sun, 06 Nov 1994 08:49:37 GMT: standard specification, the only one with - * valid generation
    • - *
    • Sunday, 06-Nov-94 08:49:37 GMT: obsolete specification
    • - *
    • Sun Nov 6 08:49:37 1994: obsolete specification
    • - *
    - * @deprecated Use {@link DateFormatter} instead - */ -@Deprecated -public final class HttpHeaderDateFormat extends SimpleDateFormat { - private static final long serialVersionUID = -925286159755905325L; - - private final SimpleDateFormat format1 = new HttpHeaderDateFormatObsolete1(); - private final SimpleDateFormat format2 = new HttpHeaderDateFormatObsolete2(); - - private static final FastThreadLocal dateFormatThreadLocal = - new FastThreadLocal() { - @Override - protected HttpHeaderDateFormat initialValue() { - return new HttpHeaderDateFormat(); - } - }; - - public static HttpHeaderDateFormat get() { - return dateFormatThreadLocal.get(); - } - - /** - * Standard date format

    - * Sun, 06 Nov 1994 08:49:37 GMT -> E, d MMM yyyy HH:mm:ss z - */ - private HttpHeaderDateFormat() { - super("E, dd MMM yyyy HH:mm:ss z", Locale.ENGLISH); - setTimeZone(TimeZone.getTimeZone("GMT")); - } - - @Override - public Date parse(String text, ParsePosition pos) { - Date date = super.parse(text, pos); - if (date == null) { - date = format1.parse(text, pos); - } - if (date == null) { - date = format2.parse(text, pos); - } - return date; - } - - /** - * First obsolete format

    - * Sunday, 06-Nov-94 08:49:37 GMT -> E, d-MMM-y HH:mm:ss z - */ - private static final class HttpHeaderDateFormatObsolete1 extends SimpleDateFormat { - private static final long serialVersionUID = -3178072504225114298L; - - HttpHeaderDateFormatObsolete1() { - super("E, dd-MMM-yy HH:mm:ss z", Locale.ENGLISH); - setTimeZone(TimeZone.getTimeZone("GMT")); - } - } - - /** - * Second obsolete format - *

    - * Sun Nov 6 08:49:37 1994 -> EEE, MMM d HH:mm:ss yyyy - */ - private static final class HttpHeaderDateFormatObsolete2 extends SimpleDateFormat { - private static final long serialVersionUID = 3010674519968303714L; - - HttpHeaderDateFormatObsolete2() { - super("E MMM d HH:mm:ss yyyy", Locale.ENGLISH); - setTimeZone(TimeZone.getTimeZone("GMT")); - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderNames.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderNames.java index f6f15ddc13c..7ac98265dc9 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderNames.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderNames.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -160,6 +160,10 @@ public final class HttpHeaderNames { * {@code "date"} */ public static final AsciiString DATE = AsciiString.cached("date"); + /** + * {@code "dnt"} + */ + public static final AsciiString DNT = AsciiString.cached("dnt"); /** * {@code "etag"} */ @@ -318,6 +322,10 @@ public final class HttpHeaderNames { * {@code "upgrade"} */ public static final AsciiString UPGRADE = AsciiString.cached("upgrade"); + /** + * {@code "upgrade-insecure-requests"} + */ + public static final AsciiString UPGRADE_INSECURE_REQUESTS = AsciiString.cached("upgrade-insecure-requests"); /** * {@code "user-agent"} */ @@ -354,6 +362,10 @@ public final class HttpHeaderNames { * {@code "x-frame-options"} */ public static final AsciiString X_FRAME_OPTIONS = AsciiString.cached("x-frame-options"); + /** + * {@code "x-requested-with"} + */ + public static final AsciiString X_REQUESTED_WITH = AsciiString.cached("x-requested-with"); private HttpHeaderNames() { } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderValues.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderValues.java index 2b5b6bbe023..5fb10861290 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderValues.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaderValues.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -35,6 +35,18 @@ public final class HttpHeaderValues { * {@code "application/octet-stream"} */ public static final AsciiString APPLICATION_OCTET_STREAM = AsciiString.cached("application/octet-stream"); + /** + * {@code "application/xhtml+xml"} + */ + public static final AsciiString APPLICATION_XHTML = AsciiString.cached("application/xhtml+xml"); + /** + * {@code "application/xml"} + */ + public static final AsciiString APPLICATION_XML = AsciiString.cached("application/xml"); + /** + * {@code "application/zstd"} + */ + public static final AsciiString APPLICATION_ZSTD = AsciiString.cached("application/zstd"); /** * {@code "attachment"} * See {@link HttpHeaderNames#CONTENT_DISPOSITION} @@ -103,6 +115,14 @@ public final class HttpHeaderValues { * {@code "gzip"} */ public static final AsciiString GZIP = AsciiString.cached("gzip"); + /** + * {@code "br"} + */ + public static final AsciiString BR = AsciiString.cached("br"); + /** + * {@code "zstd"} + */ + public static final AsciiString ZSTD = AsciiString.cached("zstd"); /** * {@code "gzip,deflate"} */ @@ -192,6 +212,18 @@ public final class HttpHeaderValues { * {@code "s-maxage"} */ public static final AsciiString S_MAXAGE = AsciiString.cached("s-maxage"); + /** + * {@code "text/css"} + */ + public static final AsciiString TEXT_CSS = AsciiString.cached("text/css"); + /** + * {@code "text/html"} + */ + public static final AsciiString TEXT_HTML = AsciiString.cached("text/html"); + /** + * {@code "text/event-stream"} + */ + public static final AsciiString TEXT_EVENT_STREAM = AsciiString.cached("text/event-stream"); /** * {@code "text/plain"} */ @@ -208,6 +240,10 @@ public final class HttpHeaderValues { * {@code "websocket"} */ public static final AsciiString WEBSOCKET = AsciiString.cached("websocket"); + /** + * {@code "XmlHttpRequest"} + */ + public static final AsciiString XML_HTTP_REQUEST = AsciiString.cached("XMLHttpRequest"); private HttpHeaderValues() { } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaders.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaders.java index 35cfc5c5f18..bf7a262c342 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaders.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeaders.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -35,475 +35,13 @@ import static io.netty.util.AsciiString.contentEquals; import static io.netty.util.AsciiString.contentEqualsIgnoreCase; import static io.netty.util.AsciiString.trim; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * Provides the constants for the standard HTTP header names and values and * commonly used utility methods that accesses an {@link HttpMessage}. */ public abstract class HttpHeaders implements Iterable> { - /** - * @deprecated Use {@link EmptyHttpHeaders#INSTANCE}. - *

    - * The instance is instantiated here to break the cyclic static initialization between {@link EmptyHttpHeaders} and - * {@link HttpHeaders}. The issue is that if someone accesses {@link EmptyHttpHeaders#INSTANCE} before - * {@link HttpHeaders#EMPTY_HEADERS} then {@link HttpHeaders#EMPTY_HEADERS} will be {@code null}. - */ - @Deprecated - public static final HttpHeaders EMPTY_HEADERS = EmptyHttpHeaders.instance(); - - /** - * @deprecated Use {@link HttpHeaderNames} instead. - * - * Standard HTTP header names. - */ - @Deprecated - public static final class Names { - /** - * {@code "Accept"} - */ - public static final String ACCEPT = "Accept"; - /** - * {@code "Accept-Charset"} - */ - public static final String ACCEPT_CHARSET = "Accept-Charset"; - /** - * {@code "Accept-Encoding"} - */ - public static final String ACCEPT_ENCODING = "Accept-Encoding"; - /** - * {@code "Accept-Language"} - */ - public static final String ACCEPT_LANGUAGE = "Accept-Language"; - /** - * {@code "Accept-Ranges"} - */ - public static final String ACCEPT_RANGES = "Accept-Ranges"; - /** - * {@code "Accept-Patch"} - */ - public static final String ACCEPT_PATCH = "Accept-Patch"; - /** - * {@code "Access-Control-Allow-Credentials"} - */ - public static final String ACCESS_CONTROL_ALLOW_CREDENTIALS = "Access-Control-Allow-Credentials"; - /** - * {@code "Access-Control-Allow-Headers"} - */ - public static final String ACCESS_CONTROL_ALLOW_HEADERS = "Access-Control-Allow-Headers"; - /** - * {@code "Access-Control-Allow-Methods"} - */ - public static final String ACCESS_CONTROL_ALLOW_METHODS = "Access-Control-Allow-Methods"; - /** - * {@code "Access-Control-Allow-Origin"} - */ - public static final String ACCESS_CONTROL_ALLOW_ORIGIN = "Access-Control-Allow-Origin"; - /** - * {@code "Access-Control-Expose-Headers"} - */ - public static final String ACCESS_CONTROL_EXPOSE_HEADERS = "Access-Control-Expose-Headers"; - /** - * {@code "Access-Control-Max-Age"} - */ - public static final String ACCESS_CONTROL_MAX_AGE = "Access-Control-Max-Age"; - /** - * {@code "Access-Control-Request-Headers"} - */ - public static final String ACCESS_CONTROL_REQUEST_HEADERS = "Access-Control-Request-Headers"; - /** - * {@code "Access-Control-Request-Method"} - */ - public static final String ACCESS_CONTROL_REQUEST_METHOD = "Access-Control-Request-Method"; - /** - * {@code "Age"} - */ - public static final String AGE = "Age"; - /** - * {@code "Allow"} - */ - public static final String ALLOW = "Allow"; - /** - * {@code "Authorization"} - */ - public static final String AUTHORIZATION = "Authorization"; - /** - * {@code "Cache-Control"} - */ - public static final String CACHE_CONTROL = "Cache-Control"; - /** - * {@code "Connection"} - */ - public static final String CONNECTION = "Connection"; - /** - * {@code "Content-Base"} - */ - public static final String CONTENT_BASE = "Content-Base"; - /** - * {@code "Content-Encoding"} - */ - public static final String CONTENT_ENCODING = "Content-Encoding"; - /** - * {@code "Content-Language"} - */ - public static final String CONTENT_LANGUAGE = "Content-Language"; - /** - * {@code "Content-Length"} - */ - public static final String CONTENT_LENGTH = "Content-Length"; - /** - * {@code "Content-Location"} - */ - public static final String CONTENT_LOCATION = "Content-Location"; - /** - * {@code "Content-Transfer-Encoding"} - */ - public static final String CONTENT_TRANSFER_ENCODING = "Content-Transfer-Encoding"; - /** - * {@code "Content-MD5"} - */ - public static final String CONTENT_MD5 = "Content-MD5"; - /** - * {@code "Content-Range"} - */ - public static final String CONTENT_RANGE = "Content-Range"; - /** - * {@code "Content-Type"} - */ - public static final String CONTENT_TYPE = "Content-Type"; - /** - * {@code "Cookie"} - */ - public static final String COOKIE = "Cookie"; - /** - * {@code "Date"} - */ - public static final String DATE = "Date"; - /** - * {@code "ETag"} - */ - public static final String ETAG = "ETag"; - /** - * {@code "Expect"} - */ - public static final String EXPECT = "Expect"; - /** - * {@code "Expires"} - */ - public static final String EXPIRES = "Expires"; - /** - * {@code "From"} - */ - public static final String FROM = "From"; - /** - * {@code "Host"} - */ - public static final String HOST = "Host"; - /** - * {@code "If-Match"} - */ - public static final String IF_MATCH = "If-Match"; - /** - * {@code "If-Modified-Since"} - */ - public static final String IF_MODIFIED_SINCE = "If-Modified-Since"; - /** - * {@code "If-None-Match"} - */ - public static final String IF_NONE_MATCH = "If-None-Match"; - /** - * {@code "If-Range"} - */ - public static final String IF_RANGE = "If-Range"; - /** - * {@code "If-Unmodified-Since"} - */ - public static final String IF_UNMODIFIED_SINCE = "If-Unmodified-Since"; - /** - * {@code "Last-Modified"} - */ - public static final String LAST_MODIFIED = "Last-Modified"; - /** - * {@code "Location"} - */ - public static final String LOCATION = "Location"; - /** - * {@code "Max-Forwards"} - */ - public static final String MAX_FORWARDS = "Max-Forwards"; - /** - * {@code "Origin"} - */ - public static final String ORIGIN = "Origin"; - /** - * {@code "Pragma"} - */ - public static final String PRAGMA = "Pragma"; - /** - * {@code "Proxy-Authenticate"} - */ - public static final String PROXY_AUTHENTICATE = "Proxy-Authenticate"; - /** - * {@code "Proxy-Authorization"} - */ - public static final String PROXY_AUTHORIZATION = "Proxy-Authorization"; - /** - * {@code "Range"} - */ - public static final String RANGE = "Range"; - /** - * {@code "Referer"} - */ - public static final String REFERER = "Referer"; - /** - * {@code "Retry-After"} - */ - public static final String RETRY_AFTER = "Retry-After"; - /** - * {@code "Sec-WebSocket-Key1"} - */ - public static final String SEC_WEBSOCKET_KEY1 = "Sec-WebSocket-Key1"; - /** - * {@code "Sec-WebSocket-Key2"} - */ - public static final String SEC_WEBSOCKET_KEY2 = "Sec-WebSocket-Key2"; - /** - * {@code "Sec-WebSocket-Location"} - */ - public static final String SEC_WEBSOCKET_LOCATION = "Sec-WebSocket-Location"; - /** - * {@code "Sec-WebSocket-Origin"} - */ - public static final String SEC_WEBSOCKET_ORIGIN = "Sec-WebSocket-Origin"; - /** - * {@code "Sec-WebSocket-Protocol"} - */ - public static final String SEC_WEBSOCKET_PROTOCOL = "Sec-WebSocket-Protocol"; - /** - * {@code "Sec-WebSocket-Version"} - */ - public static final String SEC_WEBSOCKET_VERSION = "Sec-WebSocket-Version"; - /** - * {@code "Sec-WebSocket-Key"} - */ - public static final String SEC_WEBSOCKET_KEY = "Sec-WebSocket-Key"; - /** - * {@code "Sec-WebSocket-Accept"} - */ - public static final String SEC_WEBSOCKET_ACCEPT = "Sec-WebSocket-Accept"; - /** - * {@code "Server"} - */ - public static final String SERVER = "Server"; - /** - * {@code "Set-Cookie"} - */ - public static final String SET_COOKIE = "Set-Cookie"; - /** - * {@code "Set-Cookie2"} - */ - public static final String SET_COOKIE2 = "Set-Cookie2"; - /** - * {@code "TE"} - */ - public static final String TE = "TE"; - /** - * {@code "Trailer"} - */ - public static final String TRAILER = "Trailer"; - /** - * {@code "Transfer-Encoding"} - */ - public static final String TRANSFER_ENCODING = "Transfer-Encoding"; - /** - * {@code "Upgrade"} - */ - public static final String UPGRADE = "Upgrade"; - /** - * {@code "User-Agent"} - */ - public static final String USER_AGENT = "User-Agent"; - /** - * {@code "Vary"} - */ - public static final String VARY = "Vary"; - /** - * {@code "Via"} - */ - public static final String VIA = "Via"; - /** - * {@code "Warning"} - */ - public static final String WARNING = "Warning"; - /** - * {@code "WebSocket-Location"} - */ - public static final String WEBSOCKET_LOCATION = "WebSocket-Location"; - /** - * {@code "WebSocket-Origin"} - */ - public static final String WEBSOCKET_ORIGIN = "WebSocket-Origin"; - /** - * {@code "WebSocket-Protocol"} - */ - public static final String WEBSOCKET_PROTOCOL = "WebSocket-Protocol"; - /** - * {@code "WWW-Authenticate"} - */ - public static final String WWW_AUTHENTICATE = "WWW-Authenticate"; - - private Names() { - } - } - - /** - * @deprecated Use {@link HttpHeaderValues} instead. - * - * Standard HTTP header values. - */ - @Deprecated - public static final class Values { - /** - * {@code "application/json"} - */ - public static final String APPLICATION_JSON = "application/json"; - /** - * {@code "application/x-www-form-urlencoded"} - */ - public static final String APPLICATION_X_WWW_FORM_URLENCODED = - "application/x-www-form-urlencoded"; - /** - * {@code "base64"} - */ - public static final String BASE64 = "base64"; - /** - * {@code "binary"} - */ - public static final String BINARY = "binary"; - /** - * {@code "boundary"} - */ - public static final String BOUNDARY = "boundary"; - /** - * {@code "bytes"} - */ - public static final String BYTES = "bytes"; - /** - * {@code "charset"} - */ - public static final String CHARSET = "charset"; - /** - * {@code "chunked"} - */ - public static final String CHUNKED = "chunked"; - /** - * {@code "close"} - */ - public static final String CLOSE = "close"; - /** - * {@code "compress"} - */ - public static final String COMPRESS = "compress"; - /** - * {@code "100-continue"} - */ - public static final String CONTINUE = "100-continue"; - /** - * {@code "deflate"} - */ - public static final String DEFLATE = "deflate"; - /** - * {@code "gzip"} - */ - public static final String GZIP = "gzip"; - /** - * {@code "gzip,deflate"} - */ - public static final String GZIP_DEFLATE = "gzip,deflate"; - /** - * {@code "identity"} - */ - public static final String IDENTITY = "identity"; - /** - * {@code "keep-alive"} - */ - public static final String KEEP_ALIVE = "keep-alive"; - /** - * {@code "max-age"} - */ - public static final String MAX_AGE = "max-age"; - /** - * {@code "max-stale"} - */ - public static final String MAX_STALE = "max-stale"; - /** - * {@code "min-fresh"} - */ - public static final String MIN_FRESH = "min-fresh"; - /** - * {@code "multipart/form-data"} - */ - public static final String MULTIPART_FORM_DATA = "multipart/form-data"; - /** - * {@code "must-revalidate"} - */ - public static final String MUST_REVALIDATE = "must-revalidate"; - /** - * {@code "no-cache"} - */ - public static final String NO_CACHE = "no-cache"; - /** - * {@code "no-store"} - */ - public static final String NO_STORE = "no-store"; - /** - * {@code "no-transform"} - */ - public static final String NO_TRANSFORM = "no-transform"; - /** - * {@code "none"} - */ - public static final String NONE = "none"; - /** - * {@code "only-if-cached"} - */ - public static final String ONLY_IF_CACHED = "only-if-cached"; - /** - * {@code "private"} - */ - public static final String PRIVATE = "private"; - /** - * {@code "proxy-revalidate"} - */ - public static final String PROXY_REVALIDATE = "proxy-revalidate"; - /** - * {@code "public"} - */ - public static final String PUBLIC = "public"; - /** - * {@code "quoted-printable"} - */ - public static final String QUOTED_PRINTABLE = "quoted-printable"; - /** - * {@code "s-maxage"} - */ - public static final String S_MAXAGE = "s-maxage"; - /** - * {@code "trailers"} - */ - public static final String TRAILERS = "trailers"; - /** - * {@code "Upgrade"} - */ - public static final String UPGRADE = "Upgrade"; - /** - * {@code "WebSocket"} - */ - public static final String WEBSOCKET = "WebSocket"; - - private Values() { - } - } /** * @deprecated Use {@link HttpUtil#isKeepAlive(HttpMessage)} instead. @@ -609,7 +147,7 @@ public static void setHeader(HttpMessage message, String name, Object value) { * If the specified value is not a {@link String}, it is converted into a * {@link String} by {@link Object#toString()}, except for {@link Date} * and {@link Calendar} which are formatted to the date format defined in - * RFC2616. + * RFC2616. */ @Deprecated public static void setHeader(HttpMessage message, CharSequence name, Object value) { @@ -664,7 +202,7 @@ public static void addHeader(HttpMessage message, String name, Object value) { * If the specified value is not a {@link String}, it is converted into a * {@link String} by {@link Object#toString()}, except for {@link Date} * and {@link Calendar} which are formatted to the date format defined in - * RFC2616. + * RFC2616. */ @Deprecated public static void addHeader(HttpMessage message, CharSequence name, Object value) { @@ -895,7 +433,7 @@ public static void setDateHeader(HttpMessage message, String name, Date value) { * Sets a new date header with the specified name and value. If there * is an existing header with the same name, the existing header is removed. * The specified value is formatted as defined in - * RFC2616 + * RFC2616 */ @Deprecated public static void setDateHeader(HttpMessage message, CharSequence name, Date value) { @@ -922,7 +460,7 @@ public static void setDateHeader(HttpMessage message, String name, IterableRFC2616 + * RFC2616 */ @Deprecated public static void setDateHeader(HttpMessage message, CharSequence name, Iterable values) { @@ -944,7 +482,7 @@ public static void addDateHeader(HttpMessage message, String name, Date value) { * * Adds a new date header with the specified name and value. The specified * value is formatted as defined in - * RFC2616 + * RFC2616 */ @Deprecated public static void addDateHeader(HttpMessage message, CharSequence name, Date value) { @@ -1369,7 +907,7 @@ public boolean contains(CharSequence name) { * If the specified value is not a {@link String}, it is converted * into a {@link String} by {@link Object#toString()}, except in the cases * of {@link Date} and {@link Calendar}, which are formatted to the date - * format defined in RFC2616. + * format defined in RFC2616. * * @param name The name of the header being added * @param value The value of the header being added @@ -1412,9 +950,7 @@ public HttpHeaders add(CharSequence name, Iterable values) { * @return {@code this} */ public HttpHeaders add(HttpHeaders headers) { - if (headers == null) { - throw new NullPointerException("headers"); - } + requireNonNull(headers, "headers"); for (Map.Entry e: headers) { add(e.getKey(), e.getValue()); } @@ -1449,7 +985,7 @@ public HttpHeaders add(HttpHeaders headers) { * If the specified value is not a {@link String}, it is converted into a * {@link String} by {@link Object#toString()}, except for {@link Date} * and {@link Calendar}, which are formatted to the date format defined in - * RFC2616. + * RFC2616. * * @param name The name of the header being set * @param value The value of the header being set @@ -1493,7 +1029,7 @@ public HttpHeaders set(CharSequence name, Iterable values) { * @return {@code this} */ public HttpHeaders set(HttpHeaders headers) { - checkNotNull(headers, "headers"); + requireNonNull(headers, "headers"); clear(); @@ -1514,7 +1050,7 @@ public HttpHeaders set(HttpHeaders headers) { * @return {@code this} */ public HttpHeaders setAll(HttpHeaders headers) { - checkNotNull(headers, "headers"); + requireNonNull(headers, "headers"); if (headers.isEmpty()) { return this; @@ -1695,7 +1231,7 @@ public String toString() { } /** - * Returns a deap copy of the passed in {@link HttpHeaders}. + * Returns a deep copy of the passed in {@link HttpHeaders}. */ public HttpHeaders copy() { return new DefaultHttpHeaders().set(this); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeadersEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeadersEncoder.java index 4f5ea8f97e6..2ba766a4c69 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeadersEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpHeadersEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessage.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessage.java index 357ad99bca3..d0307cef981 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessage.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessage.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,7 @@ /** - * An interface that defines a HTTP message, providing common properties for + * An interface that defines an HTTP message, providing common properties for * {@link HttpRequest} and {@link HttpResponse}. * * @see HttpResponse diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessageDecoderResult.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessageDecoderResult.java new file mode 100644 index 00000000000..b89252b24ec --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessageDecoderResult.java @@ -0,0 +1,58 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http; + +import io.netty.handler.codec.DecoderResult; + +/** + * A {@link DecoderResult} for {@link HttpMessage}s as produced by an {@link HttpObjectDecoder}. + *

    + * Please note that there is no guarantee that a {@link HttpObjectDecoder} will produce a {@link + * HttpMessageDecoderResult}. It may simply produce a regular {@link DecoderResult}. This result is intended for + * successful {@link HttpMessage} decoder results. + */ +public final class HttpMessageDecoderResult extends DecoderResult { + + private final int initialLineLength; + private final int headerSize; + + HttpMessageDecoderResult(int initialLineLength, int headerSize) { + super(SIGNAL_SUCCESS); + this.initialLineLength = initialLineLength; + this.headerSize = headerSize; + } + + /** + * The decoded initial line length (in bytes), as controlled by {@code maxInitialLineLength}. + */ + public int initialLineLength() { + return initialLineLength; + } + + /** + * The decoded header size (in bytes), as controlled by {@code maxHeaderSize}. + */ + public int headerSize() { + return headerSize; + } + + /** + * The decoded initial line length plus the decoded header size (in bytes). + */ + public int totalSize() { + return initialLineLength + headerSize; + } +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessageUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessageUtil.java index f78111d0856..2e6115c62a3 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessageUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpMessageUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpMethod.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpMethod.java index 3d975516d5f..137cab1caa0 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpMethod.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpMethod.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,12 +18,12 @@ import io.netty.util.AsciiString; import static io.netty.util.internal.MathUtil.findNextPositivePowerOfTwo; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static io.netty.util.internal.ObjectUtil.checkNonEmptyAfterTrim; /** * The request method of HTTP or its derived protocols, such as - * RTSP and - * ICAP. + * RTSP and + * ICAP. */ public class HttpMethod implements Comparable { /** @@ -88,16 +88,16 @@ public class HttpMethod implements Comparable { private static final EnumNameMap methodMap; static { - methodMap = new EnumNameMap( - new EnumNameMap.Node(OPTIONS.toString(), OPTIONS), - new EnumNameMap.Node(GET.toString(), GET), - new EnumNameMap.Node(HEAD.toString(), HEAD), - new EnumNameMap.Node(POST.toString(), POST), - new EnumNameMap.Node(PUT.toString(), PUT), - new EnumNameMap.Node(PATCH.toString(), PATCH), - new EnumNameMap.Node(DELETE.toString(), DELETE), - new EnumNameMap.Node(TRACE.toString(), TRACE), - new EnumNameMap.Node(CONNECT.toString(), CONNECT)); + methodMap = new EnumNameMap<>( + new EnumNameMap.Node<>(OPTIONS.toString(), OPTIONS), + new EnumNameMap.Node<>(GET.toString(), GET), + new EnumNameMap.Node<>(HEAD.toString(), HEAD), + new EnumNameMap.Node<>(POST.toString(), POST), + new EnumNameMap.Node<>(PUT.toString(), PUT), + new EnumNameMap.Node<>(PATCH.toString(), PATCH), + new EnumNameMap.Node<>(DELETE.toString(), DELETE), + new EnumNameMap.Node<>(TRACE.toString(), TRACE), + new EnumNameMap.Node<>(CONNECT.toString(), CONNECT)); } /** @@ -116,14 +116,11 @@ public static HttpMethod valueOf(String name) { * Creates a new HTTP method with the specified name. You will not need to * create a new method unless you are implementing a protocol derived from * HTTP, such as - * RTSP and - * ICAP + * RTSP and + * ICAP */ public HttpMethod(String name) { - name = checkNotNull(name, "name").trim(); - if (name.isEmpty()) { - throw new IllegalArgumentException("empty name"); - } + name = checkNonEmptyAfterTrim(name, "name"); for (int i = 0; i < name.length(); i ++) { char c = name.charAt(i); @@ -156,6 +153,9 @@ public int hashCode() { @Override public boolean equals(Object o) { + if (this == o) { + return true; + } if (!(o instanceof HttpMethod)) { return false; } @@ -171,6 +171,9 @@ public String toString() { @Override public int compareTo(HttpMethod o) { + if (o == this) { + return 0; + } return name().compareTo(o.name()); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObject.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObject.java index aaa5a503a39..62c3841eb37 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObject.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObject.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectAggregator.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectAggregator.java index 257581fe232..1ac0c559895 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectAggregator.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectAggregator.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,14 +17,13 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPipeline; import io.netty.handler.codec.DecoderResult; import io.netty.handler.codec.MessageAggregator; import io.netty.handler.codec.TooLongFrameException; +import io.netty.util.concurrent.Future; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; @@ -250,42 +249,42 @@ protected void handleOversizedMessage(final ChannelHandlerContext ctx, HttpMessa // If keep-alive is off and 'Expect: 100-continue' is missing, no need to leave the connection open. if (oversized instanceof FullHttpMessage || !HttpUtil.is100ContinueExpected(oversized) && !HttpUtil.isKeepAlive(oversized)) { - ChannelFuture future = ctx.writeAndFlush(TOO_LARGE_CLOSE.retainedDuplicate()); - future.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { - logger.debug("Failed to send a 413 Request Entity Too Large.", future.cause()); - } - ctx.close(); + Future future = ctx.writeAndFlush(TOO_LARGE_CLOSE.retainedDuplicate()); + future.addListener(future1 -> { + if (future1.isFailed()) { + logger.debug("Failed to send a 413 Request Entity Too Large.", future1.cause()); } + ctx.close(); }); } else { - ctx.writeAndFlush(TOO_LARGE.retainedDuplicate()).addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { - logger.debug("Failed to send a 413 Request Entity Too Large.", future.cause()); - ctx.close(); - } + ctx.writeAndFlush(TOO_LARGE.retainedDuplicate()).addListener(future -> { + if (future.isFailed()) { + logger.debug("Failed to send a 413 Request Entity Too Large.", future.cause()); + ctx.close(); } }); } - - // If an oversized request was handled properly and the connection is still alive - // (i.e. rejected 100-continue). the decoder should prepare to handle a new message. - HttpObjectDecoder decoder = ctx.pipeline().get(HttpObjectDecoder.class); - if (decoder != null) { - decoder.reset(); - } } else if (oversized instanceof HttpResponse) { - ctx.close(); - throw new TooLongFrameException("Response entity too large: " + oversized); + throw new ResponseTooLargeException("Response entity too large: " + oversized); } else { throw new IllegalStateException(); } } + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + super.exceptionCaught(ctx, cause); + if (cause instanceof ResponseTooLargeException) { + ctx.close(); + } + } + + private static final class ResponseTooLargeException extends TooLongFrameException { + ResponseTooLargeException(String message) { + super(message); + } + } + private abstract static class AggregatedFullHttpMessage implements FullHttpMessage { protected final HttpMessage message; private final ByteBuf content; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java index 22271d81d0d..a1b9401f93d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http; +import static io.netty.util.internal.ObjectUtil.checkPositive; + import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.ChannelHandlerContext; @@ -35,10 +37,11 @@ *

    Parameters that prevents excessive memory consumption

    * * - * + * * * * + * * * * + * * * + *
    NameMeaningNameDefault valueMeaning
    {@code maxInitialLineLength}{@value #DEFAULT_MAX_INITIAL_LINE_LENGTH}The maximum length of the initial line * (e.g. {@code "GET / HTTP/1.0"} or {@code "HTTP/1.0 200 OK"}) * If the length of the initial line exceeds this value, a @@ -46,15 +49,24 @@ *
    {@code maxHeaderSize}{@value #DEFAULT_MAX_HEADER_SIZE}The maximum length of all headers. If the sum of the length of each * header exceeds this value, a {@link TooLongFrameException} will be raised.
    + * + *

    Parameters that control parsing behavior

    + * * - * - * + * + * + * + * + * + * * *
    {@code maxChunkSize}The maximum length of the content or each chunk. If the content length - * (or the length of each chunk) exceeds this value, the content or chunk - * will be split into multiple {@link HttpContent}s whose length is - * {@code maxChunkSize} at maximum.NameDefault valueMeaning
    {@code allowDuplicateContentLengths}{@value #DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS}When set to {@code false}, will reject any messages that contain multiple Content-Length header fields. + * When set to {@code true}, will allow multiple Content-Length headers only if they are all the same decimal value. + * The duplicated field-values will be replaced with a single valid Content-Length field. + * See RFC 7230, Section 3.3.2.
    * @@ -94,17 +106,24 @@ * * Please note that this decoder is designed to be extended to implement * a protocol derived from HTTP, such as - * RTSP and - * ICAP. + * RTSP and + * ICAP. * To implement the decoder of such a derived protocol, extend this class and * implement all abstract methods properly. */ public abstract class HttpObjectDecoder extends ByteToMessageDecoder { + public static final int DEFAULT_MAX_INITIAL_LINE_LENGTH = 4096; + public static final int DEFAULT_MAX_HEADER_SIZE = 8192; + public static final boolean DEFAULT_CHUNKED_SUPPORTED = true; + public static final boolean DEFAULT_VALIDATE_HEADERS = true; + public static final int DEFAULT_INITIAL_BUFFER_SIZE = 128; + public static final boolean DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS = false; + private static final String EMPTY_VALUE = ""; - private final int maxChunkSize; private final boolean chunkedSupported; protected final boolean validateHeaders; + private final boolean allowDuplicateContentLengths; private final HeaderParser headerParser; private final LineParser lineParser; @@ -145,65 +164,59 @@ private enum State { * {@code maxChunkSize (8192)}. */ protected HttpObjectDecoder() { - this(4096, 8192, 8192, true); + this(DEFAULT_MAX_INITIAL_LINE_LENGTH, DEFAULT_MAX_HEADER_SIZE, DEFAULT_CHUNKED_SUPPORTED); } /** * Creates a new instance with the specified parameters. */ protected HttpObjectDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean chunkedSupported) { - this(maxInitialLineLength, maxHeaderSize, maxChunkSize, chunkedSupported, true); + int maxInitialLineLength, int maxHeaderSize, boolean chunkedSupported) { + this(maxInitialLineLength, maxHeaderSize, chunkedSupported, DEFAULT_VALIDATE_HEADERS); } /** * Creates a new instance with the specified parameters. */ protected HttpObjectDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, + int maxInitialLineLength, int maxHeaderSize, boolean chunkedSupported, boolean validateHeaders) { - this(maxInitialLineLength, maxHeaderSize, maxChunkSize, chunkedSupported, validateHeaders, 128); + this(maxInitialLineLength, maxHeaderSize, chunkedSupported, validateHeaders, DEFAULT_INITIAL_BUFFER_SIZE); } + /** + * Creates a new instance with the specified parameters. + */ protected HttpObjectDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, + int maxInitialLineLength, int maxHeaderSize, boolean chunkedSupported, boolean validateHeaders, int initialBufferSize) { - if (maxInitialLineLength <= 0) { - throw new IllegalArgumentException( - "maxInitialLineLength must be a positive integer: " + - maxInitialLineLength); - } - if (maxHeaderSize <= 0) { - throw new IllegalArgumentException( - "maxHeaderSize must be a positive integer: " + - maxHeaderSize); - } - if (maxChunkSize <= 0) { - throw new IllegalArgumentException( - "maxChunkSize must be a positive integer: " + - maxChunkSize); - } + this(maxInitialLineLength, maxHeaderSize, chunkedSupported, validateHeaders, initialBufferSize, + DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS); + } + + protected HttpObjectDecoder( + int maxInitialLineLength, int maxHeaderSize, + boolean chunkedSupported, boolean validateHeaders, int initialBufferSize, + boolean allowDuplicateContentLengths) { + checkPositive(maxInitialLineLength, "maxInitialLineLength"); + checkPositive(maxHeaderSize, "maxHeaderSize"); AppendableCharSequence seq = new AppendableCharSequence(initialBufferSize); lineParser = new LineParser(seq, maxInitialLineLength); headerParser = new HeaderParser(seq, maxHeaderSize); - this.maxChunkSize = maxChunkSize; this.chunkedSupported = chunkedSupported; this.validateHeaders = validateHeaders; + this.allowDuplicateContentLengths = allowDuplicateContentLengths; } @Override - protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List out) throws Exception { + protected void decode(ChannelHandlerContext ctx, ByteBuf buffer) throws Exception { if (resetRequested) { resetNow(); } switch (currentState) { - case SKIP_CONTROL_CHARS: { - if (!skipControlCharacters(buffer)) { - return; - } - currentState = State.READ_INITIAL; - } + case SKIP_CONTROL_CHARS: + // Fall-through case READ_INITIAL: try { AppendableCharSequence line = lineParser.parse(buffer); if (line == null) { @@ -220,7 +233,7 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List ou currentState = State.READ_HEADER; // fall-through } catch (Exception e) { - out.add(invalidMessage(buffer, e)); + ctx.fireChannelRead(invalidMessage(buffer, e)); return; } case READ_HEADER: try { @@ -233,8 +246,8 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List ou case SKIP_CONTROL_CHARS: // fast-path // No content is expected. - out.add(message); - out.add(LastHttpContent.EMPTY_LAST_CONTENT); + ctx.fireChannelRead(message); + ctx.fireChannelRead(LastHttpContent.EMPTY_LAST_CONTENT); resetNow(); return; case READ_CHUNK_SIZE: @@ -242,19 +255,19 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List ou throw new IllegalArgumentException("Chunked messages not supported"); } // Chunked encoding - generate HttpMessage first. HttpChunks will follow. - out.add(message); + ctx.fireChannelRead(message); return; default: - /** - * RFC 7230, 3.3.3 states that if a - * request does not have either a transfer-encoding or a content-length header then the message body - * length is 0. However for a response the body length is the number of octets received prior to the - * server closing the connection. So we treat this as variable length chunked encoding. + /* + RFC 7230, 3.3.3 (https://tools.ietf.org/html/rfc7230#section-3.3.3) states that if a + request does not have either a transfer-encoding or a content-length header then the message body + length is 0. However for a response the body length is the number of octets received prior to the + server closing the connection. So we treat this as variable length chunked encoding. */ long contentLength = contentLength(); if (contentLength == 0 || contentLength == -1 && isDecodingRequest()) { - out.add(message); - out.add(LastHttpContent.EMPTY_LAST_CONTENT); + ctx.fireChannelRead(message); + ctx.fireChannelRead(LastHttpContent.EMPTY_LAST_CONTENT); resetNow(); return; } @@ -262,7 +275,7 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List ou assert nextState == State.READ_FIXED_LENGTH_CONTENT || nextState == State.READ_VARIABLE_LENGTH_CONTENT; - out.add(message); + ctx.fireChannelRead(message); if (nextState == State.READ_FIXED_LENGTH_CONTENT) { // chunkSize will be decreased as the READ_FIXED_LENGTH_CONTENT state reads data chunk by chunk. @@ -273,50 +286,50 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List ou return; } } catch (Exception e) { - out.add(invalidMessage(buffer, e)); + ctx.fireChannelRead(invalidMessage(buffer, e)); return; } case READ_VARIABLE_LENGTH_CONTENT: { // Keep reading data as a chunk until the end of connection is reached. - int toRead = Math.min(buffer.readableBytes(), maxChunkSize); + int toRead = buffer.readableBytes(); if (toRead > 0) { ByteBuf content = buffer.readRetainedSlice(toRead); - out.add(new DefaultHttpContent(content)); + ctx.fireChannelRead(new DefaultHttpContent(content)); } return; } case READ_FIXED_LENGTH_CONTENT: { - int readLimit = buffer.readableBytes(); + int toRead = buffer.readableBytes(); // Check if the buffer is readable first as we use the readable byte count // to create the HttpChunk. This is needed as otherwise we may end up with - // create a HttpChunk instance that contains an empty buffer and so is + // create an HttpChunk instance that contains an empty buffer and so is // handled like it is the last HttpChunk. // // See https://github.com/netty/netty/issues/433 - if (readLimit == 0) { + if (toRead == 0) { return; } - int toRead = Math.min(readLimit, maxChunkSize); if (toRead > chunkSize) { toRead = (int) chunkSize; } + ByteBuf content = buffer.readRetainedSlice(toRead); chunkSize -= toRead; if (chunkSize == 0) { // Read all content. - out.add(new DefaultLastHttpContent(content, validateHeaders)); + ctx.fireChannelRead(new DefaultLastHttpContent(content, validateHeaders)); resetNow(); } else { - out.add(new DefaultHttpContent(content)); + ctx.fireChannelRead(new DefaultHttpContent(content)); } return; } - /** - * everything else after this point takes care of reading chunked content. basically, read chunk size, - * read chunk, read and ignore the CRLF and repeat until 0 + /* + everything else after this point takes care of reading chunked content. basically, read chunk size, + read chunk, read and ignore the CRLF and repeat until 0 */ case READ_CHUNK_SIZE: try { AppendableCharSequence line = lineParser.parse(buffer); @@ -332,12 +345,12 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List ou currentState = State.READ_CHUNKED_CONTENT; // fall-through } catch (Exception e) { - out.add(invalidChunk(buffer, e)); + ctx.fireChannelRead(invalidChunk(buffer, e)); return; } case READ_CHUNKED_CONTENT: { assert chunkSize <= Integer.MAX_VALUE; - int toRead = Math.min((int) chunkSize, maxChunkSize); + int toRead = (int) chunkSize; toRead = Math.min(toRead, buffer.readableBytes()); if (toRead == 0) { return; @@ -345,7 +358,7 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List ou HttpContent chunk = new DefaultHttpContent(buffer.readRetainedSlice(toRead)); chunkSize -= toRead; - out.add(chunk); + ctx.fireChannelRead(chunk); if (chunkSize != 0) { return; @@ -371,11 +384,11 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List ou if (trailer == null) { return; } - out.add(trailer); + ctx.fireChannelRead(trailer); resetNow(); return; } catch (Exception e) { - out.add(invalidChunk(buffer, e)); + ctx.fireChannelRead(invalidChunk(buffer, e)); return; } case BAD_MESSAGE: { @@ -390,16 +403,18 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf buffer, List ou // other handler will replace this codec with the upgraded protocol codec to // take the traffic over at some point then. // See https://github.com/netty/netty/issues/2173 - out.add(buffer.readBytes(readableBytes)); + ctx.fireChannelRead(buffer.readBytes(readableBytes)); } break; } + default: + break; } } @Override - protected void decodeLast(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { - super.decodeLast(ctx, in, out); + protected void decodeLast(ChannelHandlerContext ctx, ByteBuf in) throws Exception { + super.decodeLast(ctx, in); if (resetRequested) { // If a reset was requested by decodeLast() we need to do it now otherwise we may produce a @@ -411,7 +426,7 @@ protected void decodeLast(ChannelHandlerContext ctx, ByteBuf in, List ou boolean chunked = HttpUtil.isTransferEncodingChunked(message); if (currentState == State.READ_VARIABLE_LENGTH_CONTENT && !in.isReadable() && !chunked) { // End of connection. - out.add(LastHttpContent.EMPTY_LAST_CONTENT); + ctx.fireChannelRead(LastHttpContent.EMPTY_LAST_CONTENT); resetNow(); return; } @@ -419,7 +434,7 @@ protected void decodeLast(ChannelHandlerContext ctx, ByteBuf in, List ou if (currentState == State.READ_HEADER) { // If we are still in the state of reading headers we need to create a new invalid message that // signals that the connection was closed before we received the headers. - out.add(invalidMessage(Unpooled.EMPTY_BUFFER, + ctx.fireChannelRead(invalidMessage(Unpooled.EMPTY_BUFFER, new PrematureChannelClosureException("Connection closed before received headers"))); resetNow(); return; @@ -438,7 +453,7 @@ protected void decodeLast(ChannelHandlerContext ctx, ByteBuf in, List ou } if (!prematureClosure) { - out.add(LastHttpContent.EMPTY_LAST_CONTENT); + ctx.fireChannelRead(LastHttpContent.EMPTY_LAST_CONTENT); } resetNow(); } @@ -468,7 +483,7 @@ protected boolean isContentAlwaysEmpty(HttpMessage msg) { // Correctly handle return codes of 1xx. // // See: - // - http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html Section 4.4 + // - https://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html Section 4.4 // - https://github.com/netty/netty/issues/222 if (code >= 100 && code < 200) { // One exception: Hixie 76 websocket handshake response @@ -479,6 +494,8 @@ protected boolean isContentAlwaysEmpty(HttpMessage msg) { switch (code) { case 204: case 304: return true; + default: + return false; } } return false; @@ -534,12 +551,10 @@ private HttpMessage invalidMessage(ByteBuf in, Exception cause) { // when we produced an invalid message without consuming anything. in.skipBytes(in.readableBytes()); - if (message != null) { - message.setDecoderResult(DecoderResult.failure(cause)); - } else { + if (message == null) { message = createInvalidMessage(); - message.setDecoderResult(DecoderResult.failure(cause)); } + message.setDecoderResult(DecoderResult.failure(cause)); HttpMessage ret = message; message = null; @@ -560,22 +575,6 @@ private HttpContent invalidChunk(ByteBuf in, Exception cause) { return chunk; } - private static boolean skipControlCharacters(ByteBuf buffer) { - boolean skiped = false; - final int wIdx = buffer.writerIndex(); - int rIdx = buffer.readerIndex(); - while (wIdx > rIdx) { - int c = buffer.getUnsignedByte(rIdx++); - if (!Character.isISOControl(c) && !Character.isWhitespace(c)) { - rIdx--; - skiped = true; - break; - } - } - buffer.readerIndex(rIdx); - return skiped; - } - private State readHeaders(ByteBuf buffer) { final HttpMessage message = this.message; final HttpHeaders headers = message.headers(); @@ -586,7 +585,7 @@ private State readHeaders(ByteBuf buffer) { } if (line.length() > 0) { do { - char firstChar = line.charAt(0); + char firstChar = line.charAtUnsafe(0); if (name != null && (firstChar == ' ' || firstChar == '\t')) { //please do not make one line from below code //as it breaks +XX:OptimizeStringConcat optimization @@ -611,23 +610,68 @@ private State readHeaders(ByteBuf buffer) { if (name != null) { headers.add(name, value); } + // reset name and value fields name = null; value = null; - State nextState; + // Done parsing initial line and headers. Set decoder result. + HttpMessageDecoderResult decoderResult = new HttpMessageDecoderResult(lineParser.size, headerParser.size); + message.setDecoderResult(decoderResult); + + List contentLengthFields = headers.getAll(HttpHeaderNames.CONTENT_LENGTH); + if (!contentLengthFields.isEmpty()) { + HttpVersion version = message.protocolVersion(); + boolean isHttp10OrEarlier = version.majorVersion() < 1 || (version.majorVersion() == 1 + && version.minorVersion() == 0); + // Guard against multiple Content-Length headers as stated in + // https://tools.ietf.org/html/rfc7230#section-3.3.2: + contentLength = HttpUtil.normalizeAndGetContentLength(contentLengthFields, + isHttp10OrEarlier, allowDuplicateContentLengths); + if (contentLength != -1) { + headers.set(HttpHeaderNames.CONTENT_LENGTH, contentLength); + } + } if (isContentAlwaysEmpty(message)) { HttpUtil.setTransferEncodingChunked(message, false); - nextState = State.SKIP_CONTROL_CHARS; + return State.SKIP_CONTROL_CHARS; } else if (HttpUtil.isTransferEncodingChunked(message)) { - nextState = State.READ_CHUNK_SIZE; + if (!contentLengthFields.isEmpty() && message.protocolVersion() == HttpVersion.HTTP_1_1) { + handleTransferEncodingChunkedWithContentLength(message); + } + return State.READ_CHUNK_SIZE; } else if (contentLength() >= 0) { - nextState = State.READ_FIXED_LENGTH_CONTENT; + return State.READ_FIXED_LENGTH_CONTENT; } else { - nextState = State.READ_VARIABLE_LENGTH_CONTENT; + return State.READ_VARIABLE_LENGTH_CONTENT; } - return nextState; + } + + /** + * Invoked when a message with both a "Transfer-Encoding: chunked" and a "Content-Length" header field is detected. + * The default behavior is to remove the Content-Length field, but this method could be overridden + * to change the behavior (to, e.g., throw an exception and produce an invalid message). + *

    + * See: https://tools.ietf.org/html/rfc7230#section-3.3.3 + *

    +     *     If a message is received with both a Transfer-Encoding and a
    +     *     Content-Length header field, the Transfer-Encoding overrides the
    +     *     Content-Length.  Such a message might indicate an attempt to
    +     *     perform request smuggling (Section 9.5) or response splitting
    +     *     (Section 9.4) and ought to be handled as an error.  A sender MUST
    +     *     remove the received Content-Length field prior to forwarding such
    +     *     a message downstream.
    +     * 
    + * Also see: + * https://github.com/apache/tomcat/blob/b693d7c1981fa7f51e58bc8c8e72e3fe80b7b773/ + * java/org/apache/coyote/http11/Http11Processor.java#L747-L755 + * https://github.com/nginx/nginx/blob/0ad4393e30c119d250415cb769e3d8bc8dce5186/ + * src/http/ngx_http_request.c#L1946-L1953 + */ + protected void handleTransferEncodingChunkedWithContentLength(HttpMessage message) { + message.headers().remove(HttpHeaderNames.CONTENT_LENGTH); + contentLength = Long.MIN_VALUE; } private long contentLength() { @@ -642,49 +686,50 @@ private LastHttpContent readTrailingHeaders(ByteBuf buffer) { if (line == null) { return null; } + LastHttpContent trailer = this.trailer; + if (line.length() == 0 && trailer == null) { + // We have received the empty line which signals the trailer is complete and did not parse any trailers + // before. Just return an empty last content to reduce allocations. + return LastHttpContent.EMPTY_LAST_CONTENT; + } + CharSequence lastHeader = null; - if (line.length() > 0) { - LastHttpContent trailer = this.trailer; - if (trailer == null) { - trailer = this.trailer = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER, validateHeaders); - } - do { - char firstChar = line.charAt(0); - if (lastHeader != null && (firstChar == ' ' || firstChar == '\t')) { - List current = trailer.trailingHeaders().getAll(lastHeader); - if (!current.isEmpty()) { - int lastPos = current.size() - 1; - //please do not make one line from below code - //as it breaks +XX:OptimizeStringConcat optimization - String lineTrimmed = line.toString().trim(); - String currentLastPos = current.get(lastPos); - current.set(lastPos, currentLastPos + lineTrimmed); - } - } else { - splitHeader(line); - CharSequence headerName = name; - if (!HttpHeaderNames.CONTENT_LENGTH.contentEqualsIgnoreCase(headerName) && + if (trailer == null) { + trailer = this.trailer = new DefaultLastHttpContent(Unpooled.EMPTY_BUFFER, validateHeaders); + } + while (line.length() > 0) { + char firstChar = line.charAtUnsafe(0); + if (lastHeader != null && (firstChar == ' ' || firstChar == '\t')) { + List current = trailer.trailingHeaders().getAll(lastHeader); + if (!current.isEmpty()) { + int lastPos = current.size() - 1; + //please do not make one line from below code + //as it breaks +XX:OptimizeStringConcat optimization + String lineTrimmed = line.toString().trim(); + String currentLastPos = current.get(lastPos); + current.set(lastPos, currentLastPos + lineTrimmed); + } + } else { + splitHeader(line); + CharSequence headerName = name; + if (!HttpHeaderNames.CONTENT_LENGTH.contentEqualsIgnoreCase(headerName) && !HttpHeaderNames.TRANSFER_ENCODING.contentEqualsIgnoreCase(headerName) && !HttpHeaderNames.TRAILER.contentEqualsIgnoreCase(headerName)) { - trailer.trailingHeaders().add(headerName, value); - } - lastHeader = name; - // reset name and value fields - name = null; - value = null; + trailer.trailingHeaders().add(headerName, value); } - - line = headerParser.parse(buffer); - if (line == null) { - return null; - } - } while (line.length() > 0); - - this.trailer = null; - return trailer; + lastHeader = name; + // reset name and value fields + name = null; + value = null; + } + line = headerParser.parse(buffer); + if (line == null) { + return null; + } } - return LastHttpContent.EMPTY_LAST_CONTENT; + this.trailer = null; + return trailer; } protected abstract boolean isDecodingRequest(); @@ -712,13 +757,13 @@ private static String[] splitInitialLine(AppendableCharSequence sb) { int cStart; int cEnd; - aStart = findNonWhitespace(sb, 0); - aEnd = findWhitespace(sb, aStart); + aStart = findNonSPLenient(sb, 0); + aEnd = findSPLenient(sb, aStart); - bStart = findNonWhitespace(sb, aEnd); - bEnd = findWhitespace(sb, bStart); + bStart = findNonSPLenient(sb, aEnd); + bEnd = findSPLenient(sb, bStart); - cStart = findNonWhitespace(sb, bEnd); + cStart = findNonSPLenient(sb, bEnd); cEnd = findEndOfString(sb); return new String[] { @@ -735,23 +780,42 @@ private void splitHeader(AppendableCharSequence sb) { int valueStart; int valueEnd; - nameStart = findNonWhitespace(sb, 0); + nameStart = findNonWhitespace(sb, 0, false); for (nameEnd = nameStart; nameEnd < length; nameEnd ++) { - char ch = sb.charAt(nameEnd); - if (ch == ':' || Character.isWhitespace(ch)) { + char ch = sb.charAtUnsafe(nameEnd); + // https://tools.ietf.org/html/rfc7230#section-3.2.4 + // + // No whitespace is allowed between the header field-name and colon. In + // the past, differences in the handling of such whitespace have led to + // security vulnerabilities in request routing and response handling. A + // server MUST reject any received request message that contains + // whitespace between a header field-name and colon with a response code + // of 400 (Bad Request). A proxy MUST remove any such whitespace from a + // response message before forwarding the message downstream. + if (ch == ':' || + // In case of decoding a request we will just continue processing and header validation + // is done in the DefaultHttpHeaders implementation. + // + // In the case of decoding a response we will "skip" the whitespace. + (!isDecodingRequest() && isOWS(ch))) { break; } } + if (nameEnd == length) { + // There was no colon present at all. + throw new IllegalArgumentException("No colon found"); + } + for (colonEnd = nameEnd; colonEnd < length; colonEnd ++) { - if (sb.charAt(colonEnd) == ':') { + if (sb.charAtUnsafe(colonEnd) == ':') { colonEnd ++; break; } } name = sb.subStringUnsafe(nameStart, nameEnd); - valueStart = findNonWhitespace(sb, colonEnd); + valueStart = findNonWhitespace(sb, colonEnd, true); if (valueStart == length) { value = EMPTY_VALUE; } else { @@ -760,19 +824,45 @@ private void splitHeader(AppendableCharSequence sb) { } } - private static int findNonWhitespace(AppendableCharSequence sb, int offset) { + private static int findNonSPLenient(AppendableCharSequence sb, int offset) { for (int result = offset; result < sb.length(); ++result) { - if (!Character.isWhitespace(sb.charAtUnsafe(result))) { + char c = sb.charAtUnsafe(result); + // See https://tools.ietf.org/html/rfc7230#section-3.5 + if (isSPLenient(c)) { + continue; + } + if (Character.isWhitespace(c)) { + // Any other whitespace delimiter is invalid + throw new IllegalArgumentException("Invalid separator"); + } + return result; + } + return sb.length(); + } + + private static int findSPLenient(AppendableCharSequence sb, int offset) { + for (int result = offset; result < sb.length(); ++result) { + if (isSPLenient(sb.charAtUnsafe(result))) { return result; } } return sb.length(); } - private static int findWhitespace(AppendableCharSequence sb, int offset) { + private static boolean isSPLenient(char c) { + // See https://tools.ietf.org/html/rfc7230#section-3.5 + return c == ' ' || c == (char) 0x09 || c == (char) 0x0B || c == (char) 0x0C || c == (char) 0x0D; + } + + private static int findNonWhitespace(AppendableCharSequence sb, int offset, boolean validateOWS) { for (int result = offset; result < sb.length(); ++result) { - if (Character.isWhitespace(sb.charAtUnsafe(result))) { + char c = sb.charAtUnsafe(result); + if (!Character.isWhitespace(c)) { return result; + } else if (validateOWS && !isOWS(c)) { + // Only OWS is supported for whitespace + throw new IllegalArgumentException("Invalid separator, only a single space or horizontal tab allowed," + + " but received a '" + c + "' (0x" + Integer.toHexString(c) + ")"); } } return sb.length(); @@ -787,10 +877,14 @@ private static int findEndOfString(AppendableCharSequence sb) { return 0; } + private static boolean isOWS(char ch) { + return ch == ' ' || ch == (char) 0x09; + } + private static class HeaderParser implements ByteProcessor { private final AppendableCharSequence seq; private final int maxLength; - private int size; + int size; HeaderParser(AppendableCharSequence seq, int maxLength) { this.seq = seq; @@ -814,15 +908,25 @@ public void reset() { } @Override - public boolean process(byte value) throws Exception { + public boolean process(byte value) { char nextByte = (char) (value & 0xFF); - if (nextByte == HttpConstants.CR) { - return true; - } if (nextByte == HttpConstants.LF) { + int len = seq.length(); + // Drop CR if we had a CRLF pair + if (len >= 1 && seq.charAtUnsafe(len - 1) == HttpConstants.CR) { + -- size; + seq.setLength(len - 1); + } return false; } + increaseCount(); + + seq.append(nextByte); + return true; + } + + protected final void increaseCount() { if (++ size > maxLength) { // TODO: Respond with Bad Request and discard the traffic // or close the connection. @@ -830,9 +934,6 @@ public boolean process(byte value) throws Exception { // If decoding a response, just throw an exception. throw newException(maxLength); } - - seq.append(nextByte); - return true; } protected TooLongFrameException newException(int maxLength) { @@ -840,7 +941,7 @@ protected TooLongFrameException newException(int maxLength) { } } - private static final class LineParser extends HeaderParser { + private final class LineParser extends HeaderParser { LineParser(AppendableCharSequence seq, int maxLength) { super(seq, maxLength); @@ -848,10 +949,24 @@ private static final class LineParser extends HeaderParser { @Override public AppendableCharSequence parse(ByteBuf buffer) { - reset(); + // Suppress a warning because HeaderParser.reset() is supposed to be called + reset(); // lgtm[java/subtle-inherited-call] return super.parse(buffer); } + @Override + public boolean process(byte value) { + if (currentState == State.SKIP_CONTROL_CHARS) { + char c = (char) (value & 0xFF); + if (Character.isISOControl(c) || Character.isWhitespace(c)) { + increaseCount(); + return true; + } + currentState = State.READ_INITIAL; + } + return super.process(value); + } + @Override protected TooLongFrameException newException(int maxLength) { return new TooLongFrameException("An HTTP line is larger than " + maxLength + " bytes."); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectEncoder.java index fe03378bba6..5b7ba82fb38 100755 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,7 @@ */ package io.netty.handler.codec.http; +import io.netty.buffer.ByteBufConvertible; import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; @@ -41,8 +42,8 @@ * * Please note that this encoder is designed to be extended to implement * a protocol derived from HTTP, such as - * RTSP and - * ICAP. + * RTSP and + * ICAP. * To implement the encoder of such a derived protocol, extend this class and * implement all abstract methods properly. */ @@ -83,7 +84,8 @@ protected void encode(ChannelHandlerContext ctx, Object msg, List out) t ByteBuf buf = null; if (msg instanceof HttpMessage) { if (state != ST_INIT) { - throw new IllegalStateException("unexpected message type: " + StringUtil.simpleClassName(msg)); + throw new IllegalStateException("unexpected message type: " + StringUtil.simpleClassName(msg) + + ", state: " + state); } @SuppressWarnings({ "unchecked", "CastConflictsWithInstanceof" }) @@ -106,21 +108,22 @@ protected void encode(ChannelHandlerContext ctx, Object msg, List out) t // Bypass the encoder in case of an empty buffer, so that the following idiom works: // - // ch.write(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); + // ch.write(Unpooled.EMPTY_BUFFER).addListener(ch, ChannelFutureListeners.CLOSE); // // See https://github.com/netty/netty/issues/2983 for more information. - if (msg instanceof ByteBuf) { - final ByteBuf potentialEmptyBuf = (ByteBuf) msg; + if (msg instanceof ByteBufConvertible) { + final ByteBuf potentialEmptyBuf = ((ByteBufConvertible) msg).asByteBuf(); if (!potentialEmptyBuf.isReadable()) { out.add(potentialEmptyBuf.retain()); return; } } - if (msg instanceof HttpContent || msg instanceof ByteBuf || msg instanceof FileRegion) { + if (msg instanceof HttpContent || msg instanceof ByteBufConvertible || msg instanceof FileRegion) { switch (state) { case ST_INIT: - throw new IllegalStateException("unexpected message type: " + StringUtil.simpleClassName(msg)); + throw new IllegalStateException("unexpected message type: " + StringUtil.simpleClassName(msg) + + ", state: " + state); case ST_CONTENT_NON_CHUNK: final long contentLength = contentLength(msg); if (contentLength > 0) { @@ -242,12 +245,12 @@ protected boolean isContentAlwaysEmpty(@SuppressWarnings("unused") H msg) { @Override public boolean acceptOutboundMessage(Object msg) throws Exception { - return msg instanceof HttpObject || msg instanceof ByteBuf || msg instanceof FileRegion; + return msg instanceof HttpObject || msg instanceof ByteBufConvertible || msg instanceof FileRegion; } private static Object encodeAndRetain(Object msg) { - if (msg instanceof ByteBuf) { - return ((ByteBuf) msg).retain(); + if (msg instanceof ByteBufConvertible) { + return ((ByteBufConvertible) msg).asByteBuf().retain(); } if (msg instanceof HttpContent) { return ((HttpContent) msg).content().retain(); @@ -262,8 +265,8 @@ private static long contentLength(Object msg) { if (msg instanceof HttpContent) { return ((HttpContent) msg).content().readableBytes(); } - if (msg instanceof ByteBuf) { - return ((ByteBuf) msg).readableBytes(); + if (msg instanceof ByteBufConvertible) { + return ((ByteBufConvertible) msg).asByteBuf().readableBytes(); } if (msg instanceof FileRegion) { return ((FileRegion) msg).count(); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequest.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequest.java index 407bb07b356..5484b4cd88d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequest.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestDecoder.java index 24252c73587..e2c9a9e1f74 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -66,19 +66,26 @@ public HttpRequestDecoder() { * Creates a new instance with the specified parameters. */ public HttpRequestDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxChunkSize) { - super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true); + int maxInitialLineLength, int maxHeaderSize) { + super(maxInitialLineLength, maxHeaderSize, DEFAULT_CHUNKED_SUPPORTED); } public HttpRequestDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders) { - super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, validateHeaders); + int maxInitialLineLength, int maxHeaderSize, boolean validateHeaders) { + super(maxInitialLineLength, maxHeaderSize, DEFAULT_CHUNKED_SUPPORTED, validateHeaders); } public HttpRequestDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders, + int maxInitialLineLength, int maxHeaderSize, boolean validateHeaders, int initialBufferSize) { - super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, validateHeaders, initialBufferSize); + super(maxInitialLineLength, maxHeaderSize, DEFAULT_CHUNKED_SUPPORTED, validateHeaders, initialBufferSize); + } + + public HttpRequestDecoder( + int maxInitialLineLength, int maxHeaderSize, boolean validateHeaders, + int initialBufferSize, boolean allowDuplicateContentLengths) { + super(maxInitialLineLength, maxHeaderSize, DEFAULT_CHUNKED_SUPPORTED, validateHeaders, + initialBufferSize, allowDuplicateContentLengths); } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestEncoder.java index a7658d32c90..a741e0323c8 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -44,7 +44,7 @@ protected void encodeInitialLine(ByteBuf buf, HttpRequest request) throws Except if (uri.isEmpty()) { // Add " / " as absolute path if uri is not present. - // See http://tools.ietf.org/html/rfc2616#section-5.1.2 + // See https://tools.ietf.org/html/rfc2616#section-5.1.2 ByteBufUtil.writeMediumBE(buf, SPACE_SLASH_AND_SPACE_MEDIUM); } else { CharSequence uriCharSequence = uri; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponse.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponse.java index 07fcb47eb32..b0ba3452938 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponse.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponse.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseDecoder.java index 21491971ef3..40d5f657859 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -97,19 +97,26 @@ public HttpResponseDecoder() { * Creates a new instance with the specified parameters. */ public HttpResponseDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxChunkSize) { - super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true); + int maxInitialLineLength, int maxHeaderSize) { + super(maxInitialLineLength, maxHeaderSize, DEFAULT_CHUNKED_SUPPORTED); } public HttpResponseDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders) { - super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, validateHeaders); + int maxInitialLineLength, int maxHeaderSize, boolean validateHeaders) { + super(maxInitialLineLength, maxHeaderSize, DEFAULT_CHUNKED_SUPPORTED, validateHeaders); } public HttpResponseDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders, + int maxInitialLineLength, int maxHeaderSize, boolean validateHeaders, int initialBufferSize) { - super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, validateHeaders, initialBufferSize); + super(maxInitialLineLength, maxHeaderSize, DEFAULT_CHUNKED_SUPPORTED, validateHeaders, initialBufferSize); + } + + public HttpResponseDecoder( + int maxInitialLineLength, int maxHeaderSize, boolean validateHeaders, + int initialBufferSize, boolean allowDuplicateContentLengths) { + super(maxInitialLineLength, maxHeaderSize, DEFAULT_CHUNKED_SUPPORTED, validateHeaders, + initialBufferSize, allowDuplicateContentLengths); } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseEncoder.java index 60086cfd8a2..1bbddfb91d4 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -53,6 +53,14 @@ protected void sanitizeHeadersBeforeEncode(HttpResponse msg, boolean isAlwaysEmp // Stripping Transfer-Encoding: // See https://tools.ietf.org/html/rfc7230#section-3.3.1 msg.headers().remove(HttpHeaderNames.TRANSFER_ENCODING); + } else if (status.code() == HttpResponseStatus.RESET_CONTENT.code()) { + + // Stripping Transfer-Encoding: + msg.headers().remove(HttpHeaderNames.TRANSFER_ENCODING); + + // Set Content-Length: 0 + // https://httpstatuses.com/205 + msg.headers().setInt(HttpHeaderNames.CONTENT_LENGTH, 0); } } } @@ -74,6 +82,7 @@ protected boolean isContentAlwaysEmpty(HttpResponse msg) { return true; } return status.code() == HttpResponseStatus.NO_CONTENT.code() || - status.code() == HttpResponseStatus.NOT_MODIFIED.code(); + status.code() == HttpResponseStatus.NOT_MODIFIED.code() || + status.code() == HttpResponseStatus.RESET_CONTENT.code(); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseStatus.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseStatus.java index b7e2c10d456..bb7641eac8f 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseStatus.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseStatus.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -22,12 +22,14 @@ import static io.netty.handler.codec.http.HttpConstants.SP; import static io.netty.util.ByteProcessor.FIND_ASCII_SPACE; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; import static java.lang.Integer.parseInt; +import static java.util.Objects.requireNonNull; /** * The response code and its description of HTTP or its derived protocols, such as - * RTSP and - * ICAP. + * RTSP and + * ICAP. */ public class HttpResponseStatus implements Comparable { @@ -223,7 +225,7 @@ public class HttpResponseStatus implements Comparable { /** * 421 Misdirected Request * - * 421 Status Code + * @see 421 (Misdirected Request) Status Code */ public static final HttpResponseStatus MISDIRECTED_REQUEST = newStatus(421, "Misdirected Request"); @@ -538,14 +540,9 @@ public HttpResponseStatus(int code, String reasonPhrase) { } private HttpResponseStatus(int code, String reasonPhrase, boolean bytes) { - if (code < 0) { - throw new IllegalArgumentException( - "code: " + code + " (expected: 0+)"); - } + checkPositiveOrZero(code, "code"); - if (reasonPhrase == null) { - throw new NullPointerException("reasonPhrase"); - } + requireNonNull(reasonPhrase, "reasonPhrase"); for (int i = 0; i < reasonPhrase.length(); i ++) { char c = reasonPhrase.charAt(i); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpScheme.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpScheme.java index 21facc2386d..97a6c6df732 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpScheme.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpScheme.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerCodec.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerCodec.java index 4e8d61361b2..63e40805efb 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerCodec.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerCodec.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,9 +20,11 @@ import io.netty.channel.CombinedChannelDuplexHandler; import java.util.ArrayDeque; -import java.util.List; import java.util.Queue; +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_MAX_HEADER_SIZE; +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_MAX_INITIAL_LINE_LENGTH; + /** * A combination of {@link HttpRequestDecoder} and {@link HttpResponseEncoder} * which enables easier server side HTTP implementation. @@ -33,7 +35,7 @@ public final class HttpServerCodec extends CombinedChannelDuplexHandler queue = new ArrayDeque(); + private final Queue queue = new ArrayDeque<>(); /** * Creates a new instance with the default decoder options @@ -41,36 +43,46 @@ public final class HttpServerCodec extends CombinedChannelDuplexHandler out) throws Exception { - int oldSize = out.size(); - super.decode(ctx, buffer, out); - int size = out.size(); - for (int i = oldSize; i < size; i++) { - Object obj = out.get(i); - if (obj instanceof HttpRequest) { - queue.add(((HttpRequest) obj).method()); + protected void decode(final ChannelHandlerContext ctx, ByteBuf buffer) throws Exception { + super.decode(context, buffer); + } + + HttpServerRequestDecoder(int maxInitialLineLength, int maxHeaderSize, + boolean validateHeaders, int initialBufferSize, boolean allowDuplicateContentLengths) { + super(maxInitialLineLength, maxHeaderSize, validateHeaders, initialBufferSize, + allowDuplicateContentLengths); + } + + @Override + protected void handlerAdded0(final ChannelHandlerContext ctx) { + context = new DelegatingChannelHandlerContext(ctx) { + + @Override + public ChannelHandlerContext fireChannelRead(Object msg) { + if (msg instanceof HttpRequest) { + queue.add(((HttpRequest) msg).method()); + } + super.fireChannelRead(msg); + return this; } - } + }; } } @@ -115,7 +143,8 @@ private final class HttpServerResponseEncoder extends HttpResponseEncoder { @Override protected void sanitizeHeadersBeforeEncode(HttpResponse msg, boolean isAlwaysEmpty) { - if (!isAlwaysEmpty && method == HttpMethod.CONNECT && msg.status().codeClass() == HttpStatusClass.SUCCESS) { + if (!isAlwaysEmpty && HttpMethod.CONNECT.equals(method) + && msg.status().codeClass() == HttpStatusClass.SUCCESS) { // Stripping Transfer-Encoding: // See https://tools.ietf.org/html/rfc7230#section-3.3.1 msg.headers().remove(HttpHeaderNames.TRANSFER_ENCODING); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerExpectContinueHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerExpectContinueHandler.java index 4757ca29ac3..1055d7b4758 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerExpectContinueHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerExpectContinueHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,9 +16,9 @@ package io.netty.handler.codec.http; import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelFutureListeners; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.util.ReferenceCountUtil; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; @@ -44,7 +44,7 @@ * * */ -public class HttpServerExpectContinueHandler extends ChannelInboundHandlerAdapter { +public class HttpServerExpectContinueHandler implements ChannelHandler { private static final FullHttpResponse EXPECTATION_FAILED = new DefaultFullHttpResponse( HTTP_1_1, HttpResponseStatus.EXPECTATION_FAILED, Unpooled.EMPTY_BUFFER); @@ -84,14 +84,14 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception // the expectation failed so we refuse the request. HttpResponse rejection = rejectResponse(req); ReferenceCountUtil.release(msg); - ctx.writeAndFlush(rejection).addListener(ChannelFutureListener.CLOSE_ON_FAILURE); + ctx.writeAndFlush(rejection).addListener(ctx.channel(), ChannelFutureListeners.CLOSE_ON_FAILURE); return; } - ctx.writeAndFlush(accept).addListener(ChannelFutureListener.CLOSE_ON_FAILURE); + ctx.writeAndFlush(accept).addListener(ctx.channel(), ChannelFutureListeners.CLOSE_ON_FAILURE); req.headers().remove(HttpHeaderNames.EXPECT); } } - super.channelRead(ctx, msg); + ctx.fireChannelRead(msg); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerKeepAliveHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerKeepAliveHandler.java index 28813332857..88d42cd13fb 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerKeepAliveHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerKeepAliveHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,13 +15,16 @@ */ package io.netty.handler.codec.http; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelFutureListeners; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPipeline; -import io.netty.channel.ChannelPromise; +import io.netty.util.concurrent.Future; -import static io.netty.handler.codec.http.HttpUtil.*; +import static io.netty.handler.codec.http.HttpUtil.isContentLengthSet; +import static io.netty.handler.codec.http.HttpUtil.isKeepAlive; +import static io.netty.handler.codec.http.HttpUtil.isTransferEncodingChunked; +import static io.netty.handler.codec.http.HttpUtil.setKeepAlive; /** * HttpServerKeepAliveHandler helps close persistent connections when appropriate. @@ -44,7 +47,7 @@ * * */ -public class HttpServerKeepAliveHandler extends ChannelDuplexHandler { +public class HttpServerKeepAliveHandler implements ChannelHandler { private static final String MULTIPART_PREFIX = "multipart"; private boolean persistentConnection = true; @@ -61,11 +64,11 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception persistentConnection = isKeepAlive(request); } } - super.channelRead(ctx, msg); + ctx.fireChannelRead(msg); } @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + public Future write(ChannelHandlerContext ctx, Object msg) { // modify message on way out to add headers if needed if (msg instanceof HttpResponse) { final HttpResponse response = (HttpResponse) msg; @@ -81,10 +84,12 @@ public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) setKeepAlive(response, false); } } - if (msg instanceof LastHttpContent && !shouldKeepAlive()) { - promise = promise.unvoid().addListener(ChannelFutureListener.CLOSE); + boolean shouldClose = msg instanceof LastHttpContent && !shouldKeepAlive(); + Future future = ctx.write(msg); + if (shouldClose) { + future.addListener(ctx.channel(), ChannelFutureListeners.CLOSE); } - super.write(ctx, msg, promise); + return future; } private void trackResponse(HttpResponse response) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java index b433ef83723..b27d707ba03 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpServerUpgradeHandler.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -14,15 +14,12 @@ */ package io.netty.handler.codec.http; -import static io.netty.util.AsciiString.containsContentEqualsIgnoreCase; -import static io.netty.util.AsciiString.containsAllContentEqualsIgnoreCase; - import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelFutureListeners; import io.netty.channel.ChannelHandlerContext; import io.netty.util.ReferenceCountUtil; import io.netty.util.ReferenceCounted; +import io.netty.util.concurrent.Future; import java.util.ArrayList; import java.util.Collection; @@ -30,7 +27,10 @@ import static io.netty.handler.codec.http.HttpResponseStatus.SWITCHING_PROTOCOLS; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; +import static io.netty.util.AsciiString.containsAllContentEqualsIgnoreCase; +import static io.netty.util.AsciiString.containsContentEqualsIgnoreCase; +import static io.netty.util.internal.StringUtil.COMMA; /** * A server-side handler that receives HTTP requests and optionally performs a protocol switch if @@ -169,6 +169,7 @@ public String toString() { private final SourceCodec sourceCodec; private final UpgradeCodecFactory upgradeCodecFactory; + private final boolean validateHeaders; private boolean handlingUpgrade; /** @@ -199,59 +200,92 @@ public HttpServerUpgradeHandler(SourceCodec sourceCodec, UpgradeCodecFactory upg */ public HttpServerUpgradeHandler( SourceCodec sourceCodec, UpgradeCodecFactory upgradeCodecFactory, int maxContentLength) { + this(sourceCodec, upgradeCodecFactory, maxContentLength, true); + } + + /** + * Constructs the upgrader with the supported codecs. + * + * @param sourceCodec the codec that is being used initially + * @param upgradeCodecFactory the factory that creates a new upgrade codec + * for one of the requested upgrade protocols + * @param maxContentLength the maximum length of the content of an upgrade request + * @param validateHeaders validate the header names and values of the upgrade response. + */ + public HttpServerUpgradeHandler(SourceCodec sourceCodec, UpgradeCodecFactory upgradeCodecFactory, + int maxContentLength, boolean validateHeaders) { super(maxContentLength); - this.sourceCodec = checkNotNull(sourceCodec, "sourceCodec"); - this.upgradeCodecFactory = checkNotNull(upgradeCodecFactory, "upgradeCodecFactory"); + this.sourceCodec = requireNonNull(sourceCodec, "sourceCodec"); + this.upgradeCodecFactory = requireNonNull(upgradeCodecFactory, "upgradeCodecFactory"); + this.validateHeaders = validateHeaders; } @Override - protected void decode(ChannelHandlerContext ctx, HttpObject msg, List out) + protected void decode(final ChannelHandlerContext ctx, HttpObject msg) throws Exception { - // Determine if we're already handling an upgrade request or just starting a new one. - handlingUpgrade |= isUpgradeRequest(msg); + if (!handlingUpgrade) { - // Not handling an upgrade request, just pass it to the next handler. - ReferenceCountUtil.retain(msg); - out.add(msg); - return; + // Not handling an upgrade request yet. Check if we received a new upgrade request. + if (msg instanceof HttpRequest) { + HttpRequest req = (HttpRequest) msg; + if (req.headers().contains(HttpHeaderNames.UPGRADE) && + shouldHandleUpgradeRequest(req)) { + handlingUpgrade = true; + } else { + ReferenceCountUtil.retain(msg); + ctx.fireChannelRead(msg); + return; + } + } else { + ReferenceCountUtil.retain(msg); + ctx.fireChannelRead(msg); + return; + } } FullHttpRequest fullRequest; if (msg instanceof FullHttpRequest) { fullRequest = (FullHttpRequest) msg; - ReferenceCountUtil.retain(msg); - out.add(msg); + tryUpgrade(ctx, fullRequest.retain()); } else { // Call the base class to handle the aggregation of the full request. - super.decode(ctx, msg, out); - if (out.isEmpty()) { - // The full request hasn't been created yet, still awaiting more data. - return; - } - - // Finished aggregating the full request, get it from the output list. - assert out.size() == 1; - handlingUpgrade = false; - fullRequest = (FullHttpRequest) out.get(0); + super.decode(new DelegatingChannelHandlerContext(ctx) { + @Override + public ChannelHandlerContext fireChannelRead(Object msg) { + // Finished aggregating the full request, get it from the output list. + handlingUpgrade = false; + tryUpgrade(ctx, (FullHttpRequest) msg); + return this; + } + }, msg); } + } - if (upgrade(ctx, fullRequest)) { - // The upgrade was successful, remove the message from the output list - // so that it's not propagated to the next handler. This request will - // be propagated as a user event instead. - out.clear(); - } + private void tryUpgrade(ChannelHandlerContext ctx, FullHttpRequest request) { + if (!upgrade(ctx, request)) { - // The upgrade did not succeed, just allow the full request to propagate to the - // next handler. + // The upgrade did not succeed, just allow the full request to propagate to the + // next handler. + ctx.fireChannelRead(request); + } } /** - * Determines whether or not the message is an HTTP upgrade request. + * Determines whether the specified upgrade {@link HttpRequest} should be handled by this handler or not. + * This method will be invoked only when the request contains an {@code Upgrade} header. + * It always returns {@code true} by default, which means any request with an {@code Upgrade} header + * will be handled. You can override this method to ignore certain {@code Upgrade} headers, for example: + *
    {@code
    +     * @Override
    +     * protected boolean isUpgradeRequest(HttpRequest req) {
    +     *   // Do not handle WebSocket upgrades.
    +     *   return !req.headers().contains(HttpHeaderNames.UPGRADE, "websocket", false);
    +     * }
    +     * }
    */ - private static boolean isUpgradeRequest(HttpObject msg) { - return msg instanceof HttpRequest && ((HttpRequest) msg).headers().get(HttpHeaderNames.UPGRADE) != null; + protected boolean shouldHandleUpgradeRequest(HttpRequest req) { + return true; } /** @@ -284,16 +318,23 @@ private boolean upgrade(final ChannelHandlerContext ctx, final FullHttpRequest r } // Make sure the CONNECTION header is present. - CharSequence connectionHeader = request.headers().get(HttpHeaderNames.CONNECTION); - if (connectionHeader == null) { + List connectionHeaderValues = request.headers().getAll(HttpHeaderNames.CONNECTION); + + if (connectionHeaderValues == null || connectionHeaderValues.isEmpty()) { return false; } + final StringBuilder concatenatedConnectionValue = new StringBuilder(connectionHeaderValues.size() * 10); + for (CharSequence connectionHeaderValue : connectionHeaderValues) { + concatenatedConnectionValue.append(connectionHeaderValue).append(COMMA); + } + concatenatedConnectionValue.setLength(concatenatedConnectionValue.length() - 1); + // Make sure the CONNECTION header contains UPGRADE as well as all protocol-specific headers. Collection requiredHeaders = upgradeCodec.requiredUpgradeHeaders(); - List values = splitHeader(connectionHeader); + List values = splitHeader(concatenatedConnectionValue); if (!containsContentEqualsIgnoreCase(values, HttpHeaderNames.UPGRADE) || - !containsAllContentEqualsIgnoreCase(values, requiredHeaders)) { + !containsAllContentEqualsIgnoreCase(values, requiredHeaders)) { return false; } @@ -314,40 +355,40 @@ private boolean upgrade(final ChannelHandlerContext ctx, final FullHttpRequest r // Create the user event to be fired once the upgrade completes. final UpgradeEvent event = new UpgradeEvent(upgradeProtocol, request); - final UpgradeCodec finalUpgradeCodec = upgradeCodec; - ctx.writeAndFlush(upgradeResponse).addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - try { - if (future.isSuccess()) { - // Perform the upgrade to the new protocol. - sourceCodec.upgradeFrom(ctx); - finalUpgradeCodec.upgradeTo(ctx, request); - - // Notify that the upgrade has occurred. Retain the event to offset - // the release() in the finally block. - ctx.fireUserEventTriggered(event.retain()); - - // Remove this handler from the pipeline. - ctx.pipeline().remove(HttpServerUpgradeHandler.this); - } else { - future.channel().close(); - } - } finally { - // Release the event if the upgrade event wasn't fired. - event.release(); - } - } - }); + // After writing the upgrade response we immediately prepare the + // pipeline for the next protocol to avoid a race between completion + // of the write future and receiving data before the pipeline is + // restructured. + try { + Future writeComplete = ctx.writeAndFlush(upgradeResponse); + // Perform the upgrade to the new protocol. + sourceCodec.upgradeFrom(ctx); + upgradeCodec.upgradeTo(ctx, request); + + // Notify that the upgrade has occurred. Retain the event to offset + // the release() in the finally block. + ctx.fireUserEventTriggered(event.retain()); + + // Remove this handler from the pipeline. + ctx.pipeline().remove(HttpServerUpgradeHandler.this); + + // Add the listener last to avoid firing upgrade logic after + // the channel is already closed since the listener may fire + // immediately if the write failed eagerly. + writeComplete.addListener(ctx.channel(), ChannelFutureListeners.CLOSE_ON_FAILURE); + } finally { + // Release the event if the upgrade event wasn't fired. + event.release(); + } return true; } /** * Creates the 101 Switching Protocols response message. */ - private static FullHttpResponse createUpgradeResponse(CharSequence upgradeProtocol) { - DefaultFullHttpResponse res = new DefaultFullHttpResponse(HTTP_1_1, SWITCHING_PROTOCOLS, - Unpooled.EMPTY_BUFFER, false); + private FullHttpResponse createUpgradeResponse(CharSequence upgradeProtocol) { + DefaultFullHttpResponse res = new DefaultFullHttpResponse( + HTTP_1_1, SWITCHING_PROTOCOLS, Unpooled.EMPTY_BUFFER, validateHeaders); res.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE); res.headers().add(HttpHeaderNames.UPGRADE, upgradeProtocol); return res; @@ -359,7 +400,7 @@ private static FullHttpResponse createUpgradeResponse(CharSequence upgradeProtoc */ private static List splitHeader(CharSequence header) { final StringBuilder builder = new StringBuilder(header.length()); - final List protocols = new ArrayList(4); + final List protocols = new ArrayList<>(4); for (int i = 0; i < header.length(); ++i) { char c = header.charAt(i); if (Character.isWhitespace(c)) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpStatusClass.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpStatusClass.java index dea8152a432..4d5ac8c5495 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpStatusClass.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpStatusClass.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java index 6893293499d..16ec11bbd8e 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,16 +15,25 @@ */ package io.netty.handler.codec.http; -import io.netty.util.AsciiString; -import io.netty.util.CharsetUtil; +import static java.util.Objects.requireNonNull; +import java.net.InetSocketAddress; import java.net.URI; -import java.util.ArrayList; import java.nio.charset.Charset; +import java.nio.charset.IllegalCharsetNameException; import java.nio.charset.UnsupportedCharsetException; +import java.util.ArrayList; import java.util.Iterator; import java.util.List; +import io.netty.util.AsciiString; +import io.netty.util.CharsetUtil; +import io.netty.util.NetUtil; +import io.netty.util.internal.UnstableApi; + +import static io.netty.util.internal.StringUtil.COMMA; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; + /** * Utility methods useful in the HTTP context. */ @@ -32,6 +41,7 @@ public final class HttpUtil { private static final AsciiString CHARSET_EQUALS = AsciiString.of(HttpHeaderValues.CHARSET + "="); private static final AsciiString SEMICOLON = AsciiString.cached(";"); + private static final String COMMA_STRING = String.valueOf(COMMA); private HttpUtil() { } @@ -58,20 +68,14 @@ public static boolean isAsteriskForm(URI uri) { /** * Returns {@code true} if and only if the connection can remain open and * thus 'kept alive'. This methods respects the value of the. + * * {@code "Connection"} header first and then the return value of * {@link HttpVersion#isKeepAliveDefault()}. */ public static boolean isKeepAlive(HttpMessage message) { - CharSequence connection = message.headers().get(HttpHeaderNames.CONNECTION); - if (connection != null && HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(connection)) { - return false; - } - - if (message.protocolVersion().isKeepAliveDefault()) { - return !HttpHeaderValues.CLOSE.contentEqualsIgnoreCase(connection); - } else { - return HttpHeaderValues.KEEP_ALIVE.contentEqualsIgnoreCase(connection); - } + return !message.headers().containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE, true) && + (message.protocolVersion().isKeepAliveDefault() || + message.headers().containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.KEEP_ALIVE, true)); } /** @@ -191,6 +195,7 @@ public static long getContentLength(HttpMessage message, long defaultValue) { /** * Get an {@code int} representation of {@link #getContentLength(HttpMessage, long)}. + * * @return the content length or {@code defaultValue} if this message does * not have the {@code "Content-Length"} header or its value is not * a number. Not to exceed the boundaries of integer. @@ -247,13 +252,9 @@ public static boolean isContentLengthSet(HttpMessage m) { * present */ public static boolean is100ContinueExpected(HttpMessage message) { - if (!isExpectHeaderValid(message)) { - return false; - } - - final String expectValue = message.headers().get(HttpHeaderNames.EXPECT); - // unquoted tokens in the expect header are case-insensitive, thus 100-continue is case insensitive - return HttpHeaderValues.CONTINUE.toString().equalsIgnoreCase(expectValue); + return isExpectHeaderValid(message) + // unquoted tokens in the expect header are case-insensitive, thus 100-continue is case insensitive + && message.headers().contains(HttpHeaderNames.EXPECT, HttpHeaderValues.CONTINUE, true); } /** @@ -305,12 +306,13 @@ public static void set100ContinueExpected(HttpMessage message, boolean expected) * @return True if transfer encoding is chunked, otherwise false */ public static boolean isTransferEncodingChunked(HttpMessage message) { - return message.headers().contains(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED, true); + return message.headers().containsValue(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED, true); } /** * Set the {@link HttpHeaderNames#TRANSFER_ENCODING} to either include {@link HttpHeaderValues#CHUNKED} if * {@code chunked} is {@code true}, or remove {@link HttpHeaderValues#CHUNKED} if {@code chunked} is {@code false}. + * * @param m The message which contains the headers to modify. * @param chunked if {@code true} then include {@link HttpHeaderValues#CHUNKED} in the headers. otherwise remove * {@link HttpHeaderValues#CHUNKED} from the headers. @@ -324,7 +326,7 @@ public static void setTransferEncodingChunked(HttpMessage m, boolean chunked) { if (encodings.isEmpty()) { return; } - List values = new ArrayList(encodings); + List values = new ArrayList<>(encodings); Iterator valuesIt = values.iterator(); while (valuesIt.hasNext()) { CharSequence value = valuesIt.next(); @@ -369,7 +371,7 @@ public static Charset getCharset(CharSequence contentTypeValue) { /** * Fetch charset from message's Content-Type header. * - * @param message entity to fetch Content-Type header from + * @param message entity to fetch Content-Type header from * @param defaultCharset result to use in case of empty, incorrect or doesn't contain required part header value * @return the charset from message's Content-Type header or {@code defaultCharset} * if charset is not presented or unparsable @@ -387,17 +389,23 @@ public static Charset getCharset(HttpMessage message, Charset defaultCharset) { * Fetch charset from Content-Type header value. * * @param contentTypeValue Content-Type header value to parse - * @param defaultCharset result to use in case of empty, incorrect or doesn't contain required part header value + * @param defaultCharset result to use in case of empty, incorrect or doesn't contain required part header value * @return the charset from message's Content-Type header or {@code defaultCharset} * if charset is not presented or unparsable */ public static Charset getCharset(CharSequence contentTypeValue, Charset defaultCharset) { if (contentTypeValue != null) { - CharSequence charsetCharSequence = getCharsetAsSequence(contentTypeValue); - if (charsetCharSequence != null) { + CharSequence charsetRaw = getCharsetAsSequence(contentTypeValue); + if (charsetRaw != null) { + if (charsetRaw.length() > 2) { // at least contains 2 quotes(") + if (charsetRaw.charAt(0) == '"' && charsetRaw.charAt(charsetRaw.length() - 1) == '"') { + charsetRaw = charsetRaw.subSequence(1, charsetRaw.length() - 1); + } + } try { - return Charset.forName(charsetCharSequence.toString()); - } catch (UnsupportedCharsetException ignored) { + return Charset.forName(charsetRaw.toString()); + } catch (UnsupportedCharsetException | IllegalCharsetNameException ignored) { + // just return the default charset return defaultCharset; } } else { @@ -454,16 +462,24 @@ public static CharSequence getCharsetAsSequence(HttpMessage message) { * @throws NullPointerException in case if {@code contentTypeValue == null} */ public static CharSequence getCharsetAsSequence(CharSequence contentTypeValue) { - if (contentTypeValue == null) { - throw new NullPointerException("contentTypeValue"); - } + requireNonNull(contentTypeValue, "contentTypeValue"); + int indexOfCharset = AsciiString.indexOfIgnoreCaseAscii(contentTypeValue, CHARSET_EQUALS, 0); - if (indexOfCharset != AsciiString.INDEX_NOT_FOUND) { - int indexOfEncoding = indexOfCharset + CHARSET_EQUALS.length(); - if (indexOfEncoding < contentTypeValue.length()) { - return contentTypeValue.subSequence(indexOfEncoding, contentTypeValue.length()); + if (indexOfCharset == AsciiString.INDEX_NOT_FOUND) { + return null; + } + + int indexOfEncoding = indexOfCharset + CHARSET_EQUALS.length(); + if (indexOfEncoding < contentTypeValue.length()) { + CharSequence charsetCandidate = contentTypeValue.subSequence(indexOfEncoding, contentTypeValue.length()); + int indexOfSemicolon = AsciiString.indexOfIgnoreCaseAscii(charsetCandidate, SEMICOLON, 0); + if (indexOfSemicolon == AsciiString.INDEX_NOT_FOUND) { + return charsetCandidate; } + + return charsetCandidate.subSequence(0, indexOfSemicolon); } + return null; } @@ -500,9 +516,7 @@ public static CharSequence getMimeType(HttpMessage message) { * @throws NullPointerException in case if {@code contentTypeValue == null} */ public static CharSequence getMimeType(CharSequence contentTypeValue) { - if (contentTypeValue == null) { - throw new NullPointerException("contentTypeValue"); - } + requireNonNull(contentTypeValue, "contentTypeValue"); int indexOfSemicolon = AsciiString.indexOfIgnoreCaseAscii(contentTypeValue, SEMICOLON, 0); if (indexOfSemicolon != AsciiString.INDEX_NOT_FOUND) { @@ -511,4 +525,98 @@ public static CharSequence getMimeType(CharSequence contentTypeValue) { return contentTypeValue.length() > 0 ? contentTypeValue : null; } } + + /** + * Formats the host string of an address so it can be used for computing an HTTP component + * such as a URL or a Host header + * + * @param addr the address + * @return the formatted String + */ + public static String formatHostnameForHttp(InetSocketAddress addr) { + String hostString = NetUtil.getHostname(addr); + if (NetUtil.isValidIpV6Address(hostString)) { + if (!addr.isUnresolved()) { + hostString = NetUtil.toAddressString(addr.getAddress()); + } + return '[' + hostString + ']'; + } + return hostString; + } + + /** + * Validates, and optionally extracts the content length from headers. This method is not intended for + * general use, but is here to be shared between HTTP/1 and HTTP/2 parsing. + * + * @param contentLengthFields the content-length header fields. + * @param isHttp10OrEarlier {@code true} if we are handling HTTP/1.0 or earlier + * @param allowDuplicateContentLengths {@code true} if multiple, identical-value content lengths should be allowed. + * @return the normalized content length from the headers or {@code -1} if the fields were empty. + * @throws IllegalArgumentException if the content-length fields are not valid + */ + @UnstableApi + public static long normalizeAndGetContentLength( + List contentLengthFields, boolean isHttp10OrEarlier, + boolean allowDuplicateContentLengths) { + if (contentLengthFields.isEmpty()) { + return -1; + } + + // Guard against multiple Content-Length headers as stated in + // https://tools.ietf.org/html/rfc7230#section-3.3.2: + // + // If a message is received that has multiple Content-Length header + // fields with field-values consisting of the same decimal value, or a + // single Content-Length header field with a field value containing a + // list of identical decimal values (e.g., "Content-Length: 42, 42"), + // indicating that duplicate Content-Length header fields have been + // generated or combined by an upstream message processor, then the + // recipient MUST either reject the message as invalid or replace the + // duplicated field-values with a single valid Content-Length field + // containing that decimal value prior to determining the message body + // length or forwarding the message. + String firstField = contentLengthFields.get(0).toString(); + boolean multipleContentLengths = + contentLengthFields.size() > 1 || firstField.indexOf(COMMA) >= 0; + + if (multipleContentLengths && !isHttp10OrEarlier) { + if (allowDuplicateContentLengths) { + // Find and enforce that all Content-Length values are the same + String firstValue = null; + for (CharSequence field : contentLengthFields) { + String[] tokens = field.toString().split(COMMA_STRING, -1); + for (String token : tokens) { + String trimmed = token.trim(); + if (firstValue == null) { + firstValue = trimmed; + } else if (!trimmed.equals(firstValue)) { + throw new IllegalArgumentException( + "Multiple Content-Length values found: " + contentLengthFields); + } + } + } + // Replace the duplicated field-values with a single valid Content-Length field + firstField = firstValue; + } else { + // Reject the message as invalid + throw new IllegalArgumentException( + "Multiple Content-Length values found: " + contentLengthFields); + } + } + // Ensure we not allow sign as part of the content-length: + // See https://github.com/squid-cache/squid/security/advisories/GHSA-qf3v-rc95-96j5 + if (firstField.isEmpty() || !Character.isDigit(firstField.charAt(0))) { + // Reject the message as invalid + throw new IllegalArgumentException( + "Content-Length value is not a number: " + firstField); + } + try { + final long value = Long.parseLong(firstField); + return checkPositiveOrZero(value, "Content-Length value"); + } catch (NumberFormatException e) { + // Reject the message as invalid + throw new IllegalArgumentException( + "Content-Length value is not a number: " + firstField, e); + } + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpVersion.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpVersion.java index a643f42458d..3bf52b12509 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpVersion.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpVersion.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,10 @@ */ package io.netty.handler.codec.http; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import static io.netty.util.internal.ObjectUtil.checkNonEmptyAfterTrim; +import static java.util.Objects.requireNonNull; + import io.netty.buffer.ByteBuf; import io.netty.util.CharsetUtil; @@ -23,8 +27,8 @@ /** * The version of HTTP or its derived protocols, such as - * RTSP and - * ICAP. + * RTSP and + * ICAP. */ public class HttpVersion implements Comparable { @@ -53,9 +57,7 @@ public class HttpVersion implements Comparable { * returned. */ public static HttpVersion valueOf(String text) { - if (text == null) { - throw new NullPointerException("text"); - } + requireNonNull(text, "text"); text = text.trim(); @@ -68,8 +70,8 @@ public static HttpVersion valueOf(String text) { // expected to be case-sensitive // // See: - // * http://trac.tools.ietf.org/wg/httpbis/trac/ticket/1 - // * http://trac.tools.ietf.org/wg/httpbis/trac/wiki + // * https://trac.tools.ietf.org/wg/httpbis/trac/ticket/1 + // * https://trac.tools.ietf.org/wg/httpbis/trac/wiki // HttpVersion version = version0(text); if (version == null) { @@ -99,22 +101,15 @@ private static HttpVersion version0(String text) { * Creates a new HTTP version with the specified version string. You will * not need to create a new instance unless you are implementing a protocol * derived from HTTP, such as - * RTSP and - * ICAP. + * RTSP and + * ICAP. * * @param keepAliveDefault * {@code true} if and only if the connection is kept alive unless * the {@code "Connection"} header is set to {@code "close"} explicitly. */ public HttpVersion(String text, boolean keepAliveDefault) { - if (text == null) { - throw new NullPointerException("text"); - } - - text = text.trim().toUpperCase(); - if (text.isEmpty()) { - throw new IllegalArgumentException("empty text"); - } + text = checkNonEmptyAfterTrim(text, "text").toUpperCase(); Matcher m = VERSION_PATTERN.matcher(text); if (!m.matches()) { @@ -133,8 +128,8 @@ public HttpVersion(String text, boolean keepAliveDefault) { * Creates a new HTTP version with the specified protocol name and version * numbers. You will not need to create a new instance unless you are * implementing a protocol derived from HTTP, such as - * RTSP and - * ICAP + * RTSP and + * ICAP * * @param keepAliveDefault * {@code true} if and only if the connection is kept alive unless @@ -149,14 +144,7 @@ public HttpVersion( private HttpVersion( String protocolName, int majorVersion, int minorVersion, boolean keepAliveDefault, boolean bytes) { - if (protocolName == null) { - throw new NullPointerException("protocolName"); - } - - protocolName = protocolName.trim().toUpperCase(); - if (protocolName.isEmpty()) { - throw new IllegalArgumentException("empty protocolName"); - } + protocolName = checkNonEmptyAfterTrim(protocolName, "protocolName").toUpperCase(); for (int i = 0; i < protocolName.length(); i ++) { if (Character.isISOControl(protocolName.charAt(i)) || @@ -165,12 +153,8 @@ private HttpVersion( } } - if (majorVersion < 0) { - throw new IllegalArgumentException("negative majorVersion"); - } - if (minorVersion < 0) { - throw new IllegalArgumentException("negative minorVersion"); - } + checkPositiveOrZero(majorVersion, "majorVersion"); + checkPositiveOrZero(minorVersion, "minorVersion"); this.protocolName = protocolName; this.majorVersion = majorVersion; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/LastHttpContent.java b/codec-http/src/main/java/io/netty/handler/codec/http/LastHttpContent.java index be62a47f2ea..a52a3830f90 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/LastHttpContent.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/LastHttpContent.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/QueryStringDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/QueryStringDecoder.java index 7c631f31ce9..8e319c7448c 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/QueryStringDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/QueryStringDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,23 +16,22 @@ package io.netty.handler.codec.http; import io.netty.util.CharsetUtil; +import io.netty.util.internal.PlatformDependent; import java.net.URI; import java.net.URLDecoder; -import java.nio.ByteBuffer; -import java.nio.CharBuffer; -import java.nio.charset.CharacterCodingException; import java.nio.charset.Charset; -import java.nio.charset.CharsetDecoder; -import java.nio.charset.CoderResult; import java.util.ArrayList; import java.util.Collections; import java.util.LinkedHashMap; import java.util.List; import java.util.Map; -import static io.netty.util.internal.ObjectUtil.*; -import static io.netty.util.internal.StringUtil.*; +import static io.netty.util.internal.ObjectUtil.checkPositive; +import static io.netty.util.internal.StringUtil.EMPTY_STRING; +import static io.netty.util.internal.StringUtil.SPACE; +import static io.netty.util.internal.StringUtil.decodeHexByte; +import static java.util.Objects.requireNonNull; /** * Splits an HTTP query string into a path string and key-value parameter pairs. @@ -54,7 +53,7 @@ * *

    HashDOS vulnerability fix

    * - * As a workaround to the HashDOS vulnerability, the decoder + * As a workaround to the HashDOS vulnerability, the decoder * limits the maximum number of decoded key-value parameter pairs, up to {@literal 1024} by * default, and you can configure it when you construct the decoder by passing an additional * integer parameter. @@ -68,6 +67,7 @@ public class QueryStringDecoder { private final Charset charset; private final String uri; private final int maxParams; + private final boolean semicolonIsNormalChar; private int pathEndIdx; private String path; private Map> params; @@ -109,9 +109,19 @@ public QueryStringDecoder(String uri, Charset charset, boolean hasPath) { * specified charset. */ public QueryStringDecoder(String uri, Charset charset, boolean hasPath, int maxParams) { - this.uri = checkNotNull(uri, "uri"); - this.charset = checkNotNull(charset, "charset"); + this(uri, charset, hasPath, maxParams, false); + } + + /** + * Creates a new decoder that decodes the specified URI encoded in the + * specified charset. + */ + public QueryStringDecoder(String uri, Charset charset, boolean hasPath, + int maxParams, boolean semicolonIsNormalChar) { + this.uri = requireNonNull(uri, "uri"); + this.charset = requireNonNull(charset, "charset"); this.maxParams = checkPositive(maxParams, "maxParams"); + this.semicolonIsNormalChar = semicolonIsNormalChar; // `-1` means that path end index will be initialized lazily pathEndIdx = hasPath ? -1 : 0; @@ -138,6 +148,14 @@ public QueryStringDecoder(URI uri, Charset charset) { * specified charset. */ public QueryStringDecoder(URI uri, Charset charset, int maxParams) { + this(uri, charset, maxParams, false); + } + + /** + * Creates a new decoder that decodes the specified URI encoded in the + * specified charset. + */ + public QueryStringDecoder(URI uri, Charset charset, int maxParams, boolean semicolonIsNormalChar) { String rawPath = uri.getRawPath(); if (rawPath == null) { rawPath = EMPTY_STRING; @@ -145,8 +163,9 @@ public QueryStringDecoder(URI uri, Charset charset, int maxParams) { String rawQuery = uri.getRawQuery(); // Also take care of cut of things like "http://localhost" this.uri = rawQuery == null? rawPath : rawPath + '?' + rawQuery; - this.charset = checkNotNull(charset, "charset"); + this.charset = requireNonNull(charset, "charset"); this.maxParams = checkPositive(maxParams, "maxParams"); + this.semicolonIsNormalChar = semicolonIsNormalChar; pathEndIdx = rawPath.length(); } @@ -177,7 +196,7 @@ public String path() { */ public Map> parameters() { if (params == null) { - params = decodeParams(uri, pathEndIdx(), charset, maxParams); + params = decodeParams(uri, pathEndIdx(), charset, maxParams, semicolonIsNormalChar); } return params; } @@ -204,7 +223,8 @@ private int pathEndIdx() { return pathEndIdx; } - private static Map> decodeParams(String s, int from, Charset charset, int paramsLimit) { + private static Map> decodeParams(String s, int from, Charset charset, int paramsLimit, + boolean semicolonIsNormalChar) { int len = s.length(); if (from >= len) { return Collections.emptyMap(); @@ -212,7 +232,7 @@ private static Map> decodeParams(String s, int from, Charse if (s.charAt(from) == '?') { from++; } - Map> params = new LinkedHashMap>(); + Map> params = new LinkedHashMap<>(); int nameStart = from; int valueStart = -1; int i; @@ -226,8 +246,12 @@ private static Map> decodeParams(String s, int from, Charse valueStart = i + 1; } break; - case '&': case ';': + if (semicolonIsNormalChar) { + continue; + } + // fall-through + case '&': if (addParam(s, nameStart, valueStart, i, params, charset)) { paramsLimit--; if (paramsLimit == 0) { @@ -258,7 +282,7 @@ private static boolean addParam(String s, int nameStart, int valueStart, int val String value = decodeComponent(s, valueStart, valueEnd, charset, false); List values = params.get(name); if (values == null) { - values = new ArrayList(1); // Often there's only 1 value. + values = new ArrayList<>(1); // Often there's only 1 value. params.put(name, values); } values.add(value); @@ -266,7 +290,7 @@ private static boolean addParam(String s, int nameStart, int valueStart, int val } /** - * Decodes a bit of an URL encoded by a browser. + * Decodes a bit of a URL encoded by a browser. *

    * This is equivalent to calling {@link #decodeComponent(String, Charset)} * with the UTF-8 charset (recommended to comply with RFC 3986, Section 2). @@ -281,7 +305,7 @@ public static String decodeComponent(final String s) { } /** - * Decodes a bit of an URL encoded by a browser. + * Decodes a bit of a URL encoded by a browser. *

    * The string is expected to be encoded as per RFC 3986, Section 2. * This is the encoding used by JavaScript functions {@code encodeURI} @@ -326,12 +350,10 @@ private static String decodeComponent(String s, int from, int toExcluded, Charse return s.substring(from, toExcluded); } - CharsetDecoder decoder = CharsetUtil.decoder(charset); - // Each encoded byte takes 3 characters (e.g. "%20") int decodedCapacity = (toExcluded - firstEscaped) / 3; - ByteBuffer byteBuf = ByteBuffer.allocate(decodedCapacity); - CharBuffer charBuf = CharBuffer.allocate(decodedCapacity); + byte[] buf = PlatformDependent.allocateUninitializedArray(decodedCapacity); + int bufIdx; StringBuilder strBuf = new StringBuilder(len); strBuf.append(s, from, firstEscaped); @@ -343,31 +365,17 @@ private static String decodeComponent(String s, int from, int toExcluded, Charse continue; } - byteBuf.clear(); + bufIdx = 0; do { if (i + 3 > toExcluded) { throw new IllegalArgumentException("unterminated escape sequence at index " + i + " of: " + s); } - byteBuf.put(decodeHexByte(s, i + 1)); + buf[bufIdx++] = decodeHexByte(s, i + 1); i += 3; } while (i < toExcluded && s.charAt(i) == '%'); i--; - byteBuf.flip(); - charBuf.clear(); - CoderResult result = decoder.reset().decode(byteBuf, charBuf, true); - try { - if (!result.isUnderflow()) { - result.throwException(); - } - result = decoder.flush(charBuf); - if (!result.isUnderflow()) { - result.throwException(); - } - } catch (CharacterCodingException ex) { - throw new IllegalStateException(ex); - } - strBuf.append(charBuf.flip()); + strBuf.append(new String(buf, 0, bufIdx, charset)); } return strBuf.toString(); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/QueryStringEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/QueryStringEncoder.java index cb1de9fefd9..f7bb2de27e2 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/QueryStringEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/QueryStringEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,17 +15,20 @@ */ package io.netty.handler.codec.http; -import io.netty.util.internal.ObjectUtil; +import static java.util.Objects.requireNonNull; + +import io.netty.buffer.ByteBufUtil; +import io.netty.util.CharsetUtil; +import io.netty.util.internal.StringUtil; -import java.io.UnsupportedEncodingException; import java.net.URI; import java.net.URISyntaxException; import java.net.URLEncoder; import java.nio.charset.Charset; -import java.nio.charset.UnsupportedCharsetException; +import java.util.Objects; /** - * Creates an URL-encoded URI from a path string and key-value parameter pairs. + * Creates a URL-encoded URI from a path string and key-value parameter pairs. * This encoder is for one time use only. Create a new instance for each URI. * *

    @@ -33,13 +36,16 @@
      * encoder.addParam("recipient", "world");
      * assert encoder.toString().equals("/hello?recipient=world");
      * 
    + * * @see QueryStringDecoder */ public class QueryStringEncoder { - private final String charsetName; + private final Charset charset; private final StringBuilder uriBuilder; private boolean hasParams; + private static final byte WRITE_UTF_UNKNOWN = (byte) '?'; + private static final char[] CHAR_MAP = "0123456789ABCDEF".toCharArray(); /** * Creates a new encoder that encodes a URI that starts with the specified @@ -54,25 +60,35 @@ public QueryStringEncoder(String uri) { * path string in the specified charset. */ public QueryStringEncoder(String uri, Charset charset) { + Objects.requireNonNull(charset, "charset"); uriBuilder = new StringBuilder(uri); - charsetName = charset.name(); + this.charset = CharsetUtil.UTF_8.equals(charset) ? null : charset; } /** * Adds a parameter with the specified name and value to this encoder. */ public void addParam(String name, String value) { - ObjectUtil.checkNotNull(name, "name"); + requireNonNull(name, "name"); if (hasParams) { uriBuilder.append('&'); } else { uriBuilder.append('?'); hasParams = true; } - appendComponent(name, charsetName, uriBuilder); + + encodeComponent(name); if (value != null) { uriBuilder.append('='); - appendComponent(value, charsetName, uriBuilder); + encodeComponent(value); + } + } + + private void encodeComponent(CharSequence s) { + if (charset == null) { + encodeUtf8Component(s); + } else { + encodeNonUtf8Component(s); } } @@ -95,28 +111,142 @@ public String toString() { return uriBuilder.toString(); } - private static void appendComponent(String s, String charset, StringBuilder sb) { - try { - s = URLEncoder.encode(s, charset); - } catch (UnsupportedEncodingException ignored) { - throw new UnsupportedCharsetException(charset); + /** + * Encode the String as per RFC 3986, Section 2. + *

    + * There is a little different between the JDK's encode method : {@link URLEncoder#encode(String, String)}. + * The JDK's encoder encode the space to {@code +} and this method directly encode the blank to {@code %20} + * beyond that , this method reuse the {@link #uriBuilder} in this class rather then create a new one, + * thus generates less garbage for the GC. + * + * @param s The String to encode + */ + private void encodeNonUtf8Component(CharSequence s) { + //Don't allocate memory until needed + char[] buf = null; + + for (int i = 0, len = s.length(); i < len;) { + char c = s.charAt(i); + if (dontNeedEncoding(c)) { + uriBuilder.append(c); + i++; + } else { + int index = 0; + if (buf == null) { + buf = new char[s.length() - i]; + } + + do { + buf[index] = c; + index++; + i++; + } while (i < s.length() && !dontNeedEncoding(c = s.charAt(i))); + + byte[] bytes = new String(buf, 0, index).getBytes(charset); + + for (byte b : bytes) { + appendEncoded(b); + } + } } - // replace all '+' with "%20" - int idx = s.indexOf('+'); - if (idx == -1) { - sb.append(s); - return; + } + + /** + * @see ByteBufUtil#writeUtf8(io.netty.buffer.ByteBuf, CharSequence, int, int) + */ + private void encodeUtf8Component(CharSequence s) { + for (int i = 0, len = s.length(); i < len; i++) { + char c = s.charAt(i); + if (!dontNeedEncoding(c)) { + encodeUtf8Component(s, i, len); + return; + } } - sb.append(s, 0, idx).append("%20"); - int size = s.length(); - idx++; - for (; idx < size; idx++) { - char c = s.charAt(idx); - if (c != '+') { - sb.append(c); + uriBuilder.append(s); + } + + private void encodeUtf8Component(CharSequence s, int encodingStart, int len) { + if (encodingStart > 0) { + // Append non-encoded characters directly first. + uriBuilder.append(s, 0, encodingStart); + } + encodeUtf8ComponentSlow(s, encodingStart, len); + } + + private void encodeUtf8ComponentSlow(CharSequence s, int start, int len) { + for (int i = start; i < len; i++) { + char c = s.charAt(i); + if (c < 0x80) { + if (dontNeedEncoding(c)) { + uriBuilder.append(c); + } else { + appendEncoded(c); + } + } else if (c < 0x800) { + appendEncoded(0xc0 | (c >> 6)); + appendEncoded(0x80 | (c & 0x3f)); + } else if (StringUtil.isSurrogate(c)) { + if (!Character.isHighSurrogate(c)) { + appendEncoded(WRITE_UTF_UNKNOWN); + continue; + } + // Surrogate Pair consumes 2 characters. + if (++i == s.length()) { + appendEncoded(WRITE_UTF_UNKNOWN); + break; + } + // Extra method to allow inlining the rest of writeUtf8 which is the most likely code path. + writeUtf8Surrogate(c, s.charAt(i)); } else { - sb.append("%20"); + appendEncoded(0xe0 | (c >> 12)); + appendEncoded(0x80 | ((c >> 6) & 0x3f)); + appendEncoded(0x80 | (c & 0x3f)); } } } + + private void writeUtf8Surrogate(char c, char c2) { + if (!Character.isLowSurrogate(c2)) { + appendEncoded(WRITE_UTF_UNKNOWN); + appendEncoded(Character.isHighSurrogate(c2) ? WRITE_UTF_UNKNOWN : c2); + return; + } + int codePoint = Character.toCodePoint(c, c2); + // See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G2630. + appendEncoded(0xf0 | (codePoint >> 18)); + appendEncoded(0x80 | ((codePoint >> 12) & 0x3f)); + appendEncoded(0x80 | ((codePoint >> 6) & 0x3f)); + appendEncoded(0x80 | (codePoint & 0x3f)); + } + + private void appendEncoded(int b) { + uriBuilder.append('%').append(forDigit(b >> 4)).append(forDigit(b)); + } + + /** + * Convert the given digit to a upper hexadecimal char. + * + * @param digit the number to convert to a character. + * @return the {@code char} representation of the specified digit + * in hexadecimal. + */ + private static char forDigit(int digit) { + return CHAR_MAP[digit & 0xF]; + } + + /** + * Determines whether the given character is a unreserved character. + *

    + * unreserved characters do not need to be encoded, and include uppercase and lowercase + * letters, decimal digits, hyphen, period, underscore, and tilde. + *

    + * unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~" / "*" + * + * @param ch the char to be judged whether it need to be encode + * @return true or false + */ + private static boolean dontNeedEncoding(char ch) { + return ch >= 'a' && ch <= 'z' || ch >= 'A' && ch <= 'Z' || ch >= '0' && ch <= '9' + || ch == '-' || ch == '_' || ch == '.' || ch == '*' || ch == '~'; + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/ReadOnlyHttpHeaders.java b/codec-http/src/main/java/io/netty/handler/codec/http/ReadOnlyHttpHeaders.java index 995e2ad90a5..36e258875bc 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/ReadOnlyHttpHeaders.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/ReadOnlyHttpHeaders.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -78,7 +78,8 @@ private CharSequence get0(CharSequence name) { for (int i = 0; i < nameValuePairs.length; i += 2) { CharSequence roName = nameValuePairs[i]; if (AsciiString.hashCode(roName) == nameHash && contentEqualsIgnoreCase(roName, name)) { - return nameValuePairs[i + 1]; + // Suppress a warning out of bounds access since the constructor allows only pairs + return nameValuePairs[i + 1]; // lgtm[java/index-out-of-bounds] } } return null; @@ -132,11 +133,11 @@ public List getAll(String name) { return Collections.emptyList(); } final int nameHash = AsciiString.hashCode(name); - List values = new ArrayList(4); + List values = new ArrayList<>(4); for (int i = 0; i < nameValuePairs.length; i += 2) { CharSequence roName = nameValuePairs[i]; if (AsciiString.hashCode(roName) == nameHash && contentEqualsIgnoreCase(roName, name)) { - values.add(nameValuePairs[i + 1].toString()); + values.add(nameValuePairs[i + 1].toString()); // lgtm[java/index-out-of-bounds] } } return values; @@ -147,10 +148,10 @@ public List> entries() { if (isEmpty()) { return Collections.emptyList(); } - List> entries = new ArrayList>(size()); + List> entries = new ArrayList<>(size()); for (int i = 0; i < nameValuePairs.length; i += 2) { - entries.add(new SimpleImmutableEntry(nameValuePairs[i].toString(), - nameValuePairs[i + 1].toString())); + entries.add(new SimpleImmutableEntry<>(nameValuePairs[i].toString(), + nameValuePairs[i + 1].toString())); // lgtm[java/index-out-of-bounds] } return entries; } @@ -170,14 +171,14 @@ public boolean containsValue(CharSequence name, CharSequence value, boolean igno if (ignoreCase) { for (int i = 0; i < nameValuePairs.length; i += 2) { if (contentEqualsIgnoreCase(nameValuePairs[i], name) && - contentEqualsIgnoreCase(nameValuePairs[i + 1], value)) { + contentEqualsIgnoreCase(nameValuePairs[i + 1], value)) { // lgtm[java/index-out-of-bounds] return true; } } } else { for (int i = 0; i < nameValuePairs.length; i += 2) { if (contentEqualsIgnoreCase(nameValuePairs[i], name) && - contentEquals(nameValuePairs[i + 1], value)) { + contentEquals(nameValuePairs[i + 1], value)) { // lgtm[java/index-out-of-bounds] return true; } } @@ -220,7 +221,7 @@ public Set names() { if (isEmpty()) { return Collections.emptySet(); } - Set names = new LinkedHashSet(size()); + Set names = new LinkedHashSet<>(size()); for (int i = 0; i < nameValuePairs.length; i += 2) { names.add(nameValuePairs[i].toString()); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/ServerCookieEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/ServerCookieEncoder.java deleted file mode 100644 index 4b48c6fdbad..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/http/ServerCookieEncoder.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http; - -import io.netty.handler.codec.http.cookie.ServerCookieDecoder; - -import java.util.Collection; -import java.util.List; - -/** - * A RFC6265 compliant cookie encoder to be used server side, - * so some fields are sent (Version is typically ignored). - * - * As Netty's Cookie merges Expires and MaxAge into one single field, only Max-Age field is sent. - * - * Note that multiple cookies must be sent as separate "Set-Cookie" headers. - * - *

    - * // Example
    - * {@link HttpResponse} res = ...;
    - * res.setHeader("Set-Cookie", {@link ServerCookieEncoder}.encode("JSESSIONID", "1234"));
    - * 
    - * - * @see ServerCookieDecoder - * - * @deprecated Use {@link io.netty.handler.codec.http.cookie.ServerCookieEncoder} instead - */ -@Deprecated -public final class ServerCookieEncoder { - - /** - * Encodes the specified cookie name-value pair into a Set-Cookie header value. - * - * @param name the cookie name - * @param value the cookie value - * @return a single Set-Cookie header value - */ - @Deprecated - public static String encode(String name, String value) { - return io.netty.handler.codec.http.cookie.ServerCookieEncoder.LAX.encode(name, value); - } - - /** - * Encodes the specified cookie into a Set-Cookie header value. - * - * @param cookie the cookie - * @return a single Set-Cookie header value - */ - @Deprecated - public static String encode(Cookie cookie) { - return io.netty.handler.codec.http.cookie.ServerCookieEncoder.LAX.encode(cookie); - } - - /** - * Batch encodes cookies into Set-Cookie header values. - * - * @param cookies a bunch of cookies - * @return the corresponding bunch of Set-Cookie headers - */ - @Deprecated - public static List encode(Cookie... cookies) { - return io.netty.handler.codec.http.cookie.ServerCookieEncoder.LAX.encode(cookies); - } - - /** - * Batch encodes cookies into Set-Cookie header values. - * - * @param cookies a bunch of cookies - * @return the corresponding bunch of Set-Cookie headers - */ - @Deprecated - public static List encode(Collection cookies) { - return io.netty.handler.codec.http.cookie.ServerCookieEncoder.LAX.encode(cookies); - } - - /** - * Batch encodes cookies into Set-Cookie header values. - * - * @param cookies a bunch of cookies - * @return the corresponding bunch of Set-Cookie headers - */ - @Deprecated - public static List encode(Iterable cookies) { - return io.netty.handler.codec.http.cookie.ServerCookieEncoder.LAX.encode(cookies); - } - - private ServerCookieEncoder() { - // Unused - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ClientCookieDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ClientCookieDecoder.java index e391c2144f1..63bfed75fea 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ClientCookieDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ClientCookieDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,13 +16,14 @@ package io.netty.handler.codec.http.cookie; import io.netty.handler.codec.DateFormatter; +import io.netty.handler.codec.http.cookie.CookieHeaderNames.SameSite; import java.util.Date; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** - * A RFC6265 compliant cookie decoder to be used client side. + * A RFC6265 compliant cookie decoder to be used client side. * * It will store the way the raw value was wrapped in {@link Cookie#setWrap(boolean)} so it can be * eventually sent back to the Origin server as is. @@ -52,7 +53,7 @@ private ClientCookieDecoder(boolean strict) { * @return the decoded {@link Cookie} */ public Cookie decode(String header) { - final int headerLen = checkNotNull(header, "header").length(); + final int headerLen = requireNonNull(header, "header").length(); if (headerLen == 0) { return null; @@ -140,7 +141,7 @@ public Cookie decode(String header) { cookieBuilder.appendAttribute(nameBegin, nameEnd, valueBegin, valueEnd); } } - return cookieBuilder.cookie(); + return cookieBuilder != null ? cookieBuilder.cookie() : null; } private static class CookieBuilder { @@ -154,6 +155,7 @@ private static class CookieBuilder { private int expiresEnd; private boolean secure; private boolean httpOnly; + private SameSite sameSite; CookieBuilder(DefaultCookie cookie, String header) { this.cookie = cookie; @@ -180,6 +182,7 @@ Cookie cookie() { cookie.setMaxAge(mergeMaxAgeAndExpires()); cookie.setSecure(secure); cookie.setHttpOnly(httpOnly); + cookie.setSameSite(sameSite); return cookie; } @@ -206,7 +209,7 @@ void appendAttribute(int keyStart, int keyEnd, int valueStart, int valueEnd) { } else if (length == 7) { parse7(keyStart, valueStart, valueEnd); } else if (length == 8) { - parse8(keyStart); + parse8(keyStart, valueStart, valueEnd); } } @@ -241,9 +244,11 @@ private void parse7(int nameStart, int valueStart, int valueEnd) { } } - private void parse8(int nameStart) { + private void parse8(int nameStart, int valueStart, int valueEnd) { if (header.regionMatches(true, nameStart, CookieHeaderNames.HTTPONLY, 0, 8)) { httpOnly = true; + } else if (header.regionMatches(true, nameStart, CookieHeaderNames.SAMESITE, 0, 8)) { + sameSite = SameSite.of(computeValue(valueStart, valueEnd)); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ClientCookieEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ClientCookieEncoder.java index 9554b64ed46..951d9045e95 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ClientCookieEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ClientCookieEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,7 +20,8 @@ import static io.netty.handler.codec.http.cookie.CookieUtil.stringBuilder; import static io.netty.handler.codec.http.cookie.CookieUtil.stripTrailingSeparator; import static io.netty.handler.codec.http.cookie.CookieUtil.stripTrailingSeparatorOrNull; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; + import io.netty.handler.codec.http.HttpRequest; import io.netty.util.internal.InternalThreadLocalMap; @@ -31,7 +32,7 @@ import java.util.List; /** - * A RFC6265 compliant cookie encoder to be used client side, so + * A RFC6265 compliant cookie encoder to be used client side, so * only name=value pairs are sent. * * Note that multiple cookies are supposed to be sent at once in a single "Cookie" header. @@ -83,7 +84,7 @@ public String encode(String name, String value) { */ public String encode(Cookie cookie) { StringBuilder buf = stringBuilder(); - encode(buf, checkNotNull(cookie, "cookie")); + encode(buf, requireNonNull(cookie, "cookie")); return stripTrailingSeparator(buf); } @@ -91,26 +92,21 @@ public String encode(Cookie cookie) { * Sort cookies into decreasing order of path length, breaking ties by sorting into increasing chronological * order of creation time, as recommended by RFC 6265. */ - private static final Comparator COOKIE_COMPARATOR = new Comparator() { - @Override - public int compare(Cookie c1, Cookie c2) { - String path1 = c1.path(); - String path2 = c2.path(); - // Cookies with unspecified path default to the path of the request. We don't - // know the request path here, but we assume that the length of an unspecified - // path is longer than any specified path (i.e. pathless cookies come first), - // because setting cookies with a path longer than the request path is of - // limited use. - int len1 = path1 == null ? Integer.MAX_VALUE : path1.length(); - int len2 = path2 == null ? Integer.MAX_VALUE : path2.length(); - int diff = len2 - len1; - if (diff != 0) { - return diff; - } - // Rely on Java's sort stability to retain creation order in cases where - // cookies have same path length. - return -1; - } + // package-private for testing only + static final Comparator COOKIE_COMPARATOR = (c1, c2) -> { + String path1 = c1.path(); + String path2 = c2.path(); + // Cookies with unspecified path default to the path of the request. We don't + // know the request path here, but we assume that the length of an unspecified + // path is longer than any specified path (i.e. pathless cookies come first), + // because setting cookies with a path longer than the request path is of + // limited use. + int len1 = path1 == null ? Integer.MAX_VALUE : path1.length(); + int len2 = path2 == null ? Integer.MAX_VALUE : path2.length(); + + // Rely on Arrays.sort's stability to retain creation order in cases where + // cookies have same path length. + return len2 - len1; }; /** @@ -121,7 +117,7 @@ public int compare(Cookie c1, Cookie c2) { * @return a Rfc6265 style Cookie header value, null if no cookies are passed. */ public String encode(Cookie... cookies) { - if (checkNotNull(cookies, "cookies").length == 0) { + if (requireNonNull(cookies, "cookies").length == 0) { return null; } @@ -152,7 +148,7 @@ public String encode(Cookie... cookies) { * @return a Rfc6265 style Cookie header value, null if no cookies are passed. */ public String encode(Collection cookies) { - if (checkNotNull(cookies, "cookies").isEmpty()) { + if (requireNonNull(cookies, "cookies").isEmpty()) { return null; } @@ -161,7 +157,7 @@ public String encode(Collection cookies) { if (cookies.size() == 1) { encode(buf, cookies.iterator().next()); } else { - Cookie[] cookiesSorted = cookies.toArray(new Cookie[cookies.size()]); + Cookie[] cookiesSorted = cookies.toArray(new Cookie[0]); Arrays.sort(cookiesSorted, COOKIE_COMPARATOR); for (Cookie c : cookiesSorted) { encode(buf, c); @@ -182,7 +178,7 @@ public String encode(Collection cookies) { * @return a Rfc6265 style Cookie header value, null if no cookies are passed. */ public String encode(Iterable cookies) { - Iterator cookiesIt = checkNotNull(cookies, "cookies").iterator(); + Iterator cookiesIt = requireNonNull(cookies, "cookies").iterator(); if (!cookiesIt.hasNext()) { return null; } @@ -198,7 +194,7 @@ public String encode(Iterable cookies) { while (cookiesIt.hasNext()) { cookiesList.add(cookiesIt.next()); } - Cookie[] cookiesSorted = cookiesList.toArray(new Cookie[cookiesList.size()]); + Cookie[] cookiesSorted = cookiesList.toArray(new Cookie[0]); Arrays.sort(cookiesSorted, COOKIE_COMPARATOR); for (Cookie c : cookiesSorted) { encode(buf, c); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/Cookie.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/Cookie.java index f128f1cace2..ea5183ec74a 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/Cookie.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/Cookie.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,7 @@ /** * An interface defining an - * HTTP cookie. + * HTTP cookie. */ public interface Cookie extends Comparable { @@ -127,7 +127,7 @@ public interface Cookie extends Comparable { * Checks to see if this {@link Cookie} can only be accessed via HTTP. * If this returns true, the {@link Cookie} cannot be accessed through * client side script - But only if the browser supports it. - * For more information, please look here + * For more information, please look here * * @return True if this {@link Cookie} is HTTP-only or false if it isn't */ @@ -138,7 +138,7 @@ public interface Cookie extends Comparable { * If set to true, this {@link Cookie} cannot be accessed by a client * side script. However, this works only if the browser supports it. * For for information, please look - * here. + * here. * * @param httpOnly True if the {@link Cookie} is HTTP only, otherwise false. */ diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieDecoder.java index ab3fbcf7b0e..97edaa42c9c 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieEncoder.java index d6487681529..a1f20f1e82f 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -44,7 +44,8 @@ protected void validateCookie(String name, String value) { } if ((pos = firstInvalidCookieValueOctet(unwrappedValue)) >= 0) { - throw new IllegalArgumentException("Cookie value contains an invalid char: " + value.charAt(pos)); + throw new IllegalArgumentException("Cookie value contains an invalid char: " + + unwrappedValue.charAt(pos)); } } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieHeaderNames.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieHeaderNames.java index 6d2e7f577c2..7e3881ebef0 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieHeaderNames.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieHeaderNames.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -28,6 +28,35 @@ public final class CookieHeaderNames { public static final String HTTPONLY = "HTTPOnly"; + public static final String SAMESITE = "SameSite"; + + /** + * Possible values for the SameSite attribute. + * See changes to RFC6265bis + */ + public enum SameSite { + Lax, + Strict, + None; + + /** + * Return the enum value corresponding to the passed in same-site-flag, using a case insensitive comparison. + * + * @param name value for the SameSite Attribute + * @return enum value for the provided name or null + */ + static SameSite of(String name) { + if (name != null) { + for (SameSite each : SameSite.class.getEnumConstants()) { + if (each.name().equalsIgnoreCase(name)) { + return each; + } + } + } + return null; + } + } + private CookieHeaderNames() { // Unused. } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieUtil.java index 1e9d9c8f87e..64aa6bfe800 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/CookieUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -97,24 +97,24 @@ static String stripTrailingSeparator(StringBuilder buf) { static void add(StringBuilder sb, String name, long val) { sb.append(name); - sb.append((char) HttpConstants.EQUALS); + sb.append('='); sb.append(val); - sb.append((char) HttpConstants.SEMICOLON); - sb.append((char) HttpConstants.SP); + sb.append(';'); + sb.append(HttpConstants.SP_CHAR); } static void add(StringBuilder sb, String name, String val) { sb.append(name); - sb.append((char) HttpConstants.EQUALS); + sb.append('='); sb.append(val); - sb.append((char) HttpConstants.SEMICOLON); - sb.append((char) HttpConstants.SP); + sb.append(';'); + sb.append(HttpConstants.SP_CHAR); } static void add(StringBuilder sb, String name) { sb.append(name); - sb.append((char) HttpConstants.SEMICOLON); - sb.append((char) HttpConstants.SP); + sb.append(';'); + sb.append(HttpConstants.SP_CHAR); } static void addQuoted(StringBuilder sb, String name, String val) { @@ -123,12 +123,12 @@ static void addQuoted(StringBuilder sb, String name, String val) { } sb.append(name); - sb.append((char) HttpConstants.EQUALS); - sb.append((char) HttpConstants.DOUBLE_QUOTE); + sb.append('='); + sb.append('"'); sb.append(val); - sb.append((char) HttpConstants.DOUBLE_QUOTE); - sb.append((char) HttpConstants.SEMICOLON); - sb.append((char) HttpConstants.SP); + sb.append('"'); + sb.append(';'); + sb.append(HttpConstants.SP_CHAR); } static int firstInvalidCookieNameOctet(CharSequence cs) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/DefaultCookie.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/DefaultCookie.java index cbd54cd092d..2973a076c9e 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/DefaultCookie.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/DefaultCookie.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,8 +15,12 @@ */ package io.netty.handler.codec.http.cookie; -import static io.netty.handler.codec.http.cookie.CookieUtil.*; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import io.netty.handler.codec.http.cookie.CookieHeaderNames.SameSite; + +import static io.netty.handler.codec.http.cookie.CookieUtil.stringBuilder; +import static io.netty.handler.codec.http.cookie.CookieUtil.validateAttributeValue; +import static io.netty.util.internal.ObjectUtil.checkNonEmptyAfterTrim; +import static java.util.Objects.requireNonNull; /** * The default {@link Cookie} implementation. @@ -31,16 +35,13 @@ public class DefaultCookie implements Cookie { private long maxAge = UNDEFINED_MAX_AGE; private boolean secure; private boolean httpOnly; + private SameSite sameSite; /** * Creates a new cookie with the specified name and value. */ public DefaultCookie(String name, String value) { - name = checkNotNull(name, "name").trim(); - if (name.isEmpty()) { - throw new IllegalArgumentException("empty name"); - } - this.name = name; + this.name = checkNonEmptyAfterTrim(name, "name"); setValue(value); } @@ -56,7 +57,7 @@ public String value() { @Override public void setValue(String value) { - this.value = checkNotNull(value, "value"); + this.value = requireNonNull(value, "value"); } @Override @@ -119,6 +120,26 @@ public void setHttpOnly(boolean httpOnly) { this.httpOnly = httpOnly; } + /** + * Checks to see if this {@link Cookie} can be sent along cross-site requests. + * For more information, please look + * here + * @return same-site-flag value + */ + public SameSite sameSite() { + return sameSite; + } + + /** + * Determines if this this {@link Cookie} can be sent along cross-site requests. + * For more information, please look + * here + * @param sameSite same-site-flag value + */ + public void setSameSite(SameSite sameSite) { + this.sameSite = sameSite; + } + @Override public int hashCode() { return name().hashCode(); @@ -194,19 +215,6 @@ public int compareTo(Cookie c) { return 0; } - /** - * Validate a cookie attribute value, throws a {@link IllegalArgumentException} otherwise. - * Only intended to be used by {@link io.netty.handler.codec.http.DefaultCookie}. - * @param name attribute name - * @param value attribute value - * @return the trimmed, validated attribute value - * @deprecated CookieUtil is package private, will be removed once old Cookie API is dropped - */ - @Deprecated - protected String validateValue(String name, String value) { - return validateAttributeValue(name, value); - } - @Override public String toString() { StringBuilder buf = stringBuilder() @@ -232,6 +240,9 @@ public String toString() { if (isHttpOnly()) { buf.append(", HTTPOnly"); } + if (sameSite() != null) { + buf.append(", SameSite=").append(sameSite()); + } return buf.toString(); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ServerCookieDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ServerCookieDecoder.java index cf5349b0863..397b37710da 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ServerCookieDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ServerCookieDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,18 +15,21 @@ */ package io.netty.handler.codec.http.cookie; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; +import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; +import java.util.List; import java.util.Set; import java.util.TreeSet; /** - * A RFC6265 compliant cookie decoder to be used server side. + * A RFC6265 compliant cookie decoder to be used server side. * * Only name and value fields are expected, so old fields are not populated (path, domain, etc). * - * Old RFC2965 cookies are still supported, + * Old RFC2965 cookies are still supported, * old fields will simply be ignored. * * @see ServerCookieEncoder @@ -57,19 +60,38 @@ private ServerCookieDecoder(boolean strict) { } /** - * Decodes the specified Set-Cookie HTTP header value into a {@link Cookie}. + * Decodes the specified {@code Cookie} HTTP header value into a {@link Cookie}. Unlike {@link #decode(String)}, + * this includes all cookie values present, even if they have the same name. + * + * @return the decoded {@link Cookie} + */ + public List decodeAll(String header) { + List cookies = new ArrayList<>(); + decode(cookies, header); + return Collections.unmodifiableList(cookies); + } + + /** + * Decodes the specified {@code Cookie} HTTP header value into a {@link Cookie}. * * @return the decoded {@link Cookie} */ public Set decode(String header) { - final int headerLen = checkNotNull(header, "header").length(); + Set cookies = new TreeSet<>(); + decode(cookies, header); + return cookies; + } + + /** + * Decodes the specified {@code Cookie} HTTP header value into a {@link Cookie}. + */ + private void decode(Collection cookies, String header) { + final int headerLen = requireNonNull(header, "header").length(); if (headerLen == 0) { - return Collections.emptySet(); + return; } - Set cookies = new TreeSet(); - int i = 0; boolean rfc2965Style = false; @@ -149,7 +171,5 @@ public Set decode(String header) { cookies.add(cookie); } } - - return cookies; } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ServerCookieEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ServerCookieEncoder.java index b707dc33d18..9bfebb1fb9d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ServerCookieEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/ServerCookieEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,12 +15,6 @@ */ package io.netty.handler.codec.http.cookie; -import static io.netty.handler.codec.http.cookie.CookieUtil.add; -import static io.netty.handler.codec.http.cookie.CookieUtil.addQuoted; -import static io.netty.handler.codec.http.cookie.CookieUtil.stringBuilder; -import static io.netty.handler.codec.http.cookie.CookieUtil.stripTrailingSeparator; -import static io.netty.util.internal.ObjectUtil.checkNotNull; - import io.netty.handler.codec.DateFormatter; import io.netty.handler.codec.http.HttpConstants; import io.netty.handler.codec.http.HttpResponse; @@ -34,8 +28,14 @@ import java.util.List; import java.util.Map; +import static io.netty.handler.codec.http.cookie.CookieUtil.add; +import static io.netty.handler.codec.http.cookie.CookieUtil.addQuoted; +import static io.netty.handler.codec.http.cookie.CookieUtil.stringBuilder; +import static io.netty.handler.codec.http.cookie.CookieUtil.stripTrailingSeparator; +import static java.util.Objects.requireNonNull; + /** - * A RFC6265 compliant cookie encoder to be used server side, + * A RFC6265 compliant cookie encoder to be used server side, * so some fields are sent (Version is typically ignored). * * As Netty's Cookie merges Expires and MaxAge into one single field, only Max-Age field is sent. @@ -88,7 +88,7 @@ public String encode(String name, String value) { * @return a single Set-Cookie header value */ public String encode(Cookie cookie) { - final String name = checkNotNull(cookie, "cookie").name(); + final String name = requireNonNull(cookie, "cookie").name(); final String value = cookie.value() != null ? cookie.value() : ""; validateCookie(name, value); @@ -105,10 +105,10 @@ public String encode(Cookie cookie) { add(buf, CookieHeaderNames.MAX_AGE, cookie.maxAge()); Date expires = new Date(cookie.maxAge() * 1000 + System.currentTimeMillis()); buf.append(CookieHeaderNames.EXPIRES); - buf.append((char) HttpConstants.EQUALS); + buf.append('='); DateFormatter.append(expires, buf); - buf.append((char) HttpConstants.SEMICOLON); - buf.append((char) HttpConstants.SP); + buf.append(';'); + buf.append(HttpConstants.SP_CHAR); } if (cookie.path() != null) { @@ -124,6 +124,12 @@ public String encode(Cookie cookie) { if (cookie.isHttpOnly()) { add(buf, CookieHeaderNames.HTTPONLY); } + if (cookie instanceof DefaultCookie) { + DefaultCookie c = (DefaultCookie) cookie; + if (c.sameSite() != null) { + add(buf, CookieHeaderNames.SAMESITE, c.sameSite().name()); + } + } return stripTrailingSeparator(buf); } @@ -139,7 +145,7 @@ private static List dedup(List encoded, Map nam for (int idx : nameToLastIndex.values()) { isLastInstance[idx] = true; } - List dedupd = new ArrayList(nameToLastIndex.size()); + List dedupd = new ArrayList<>(nameToLastIndex.size()); for (int i = 0, n = encoded.size(); i < n; i++) { if (isLastInstance[i]) { dedupd.add(encoded.get(i)); @@ -155,12 +161,12 @@ private static List dedup(List encoded, Map nam * @return the corresponding bunch of Set-Cookie headers */ public List encode(Cookie... cookies) { - if (checkNotNull(cookies, "cookies").length == 0) { + if (requireNonNull(cookies, "cookies").length == 0) { return Collections.emptyList(); } - List encoded = new ArrayList(cookies.length); - Map nameToIndex = strict && cookies.length > 1 ? new HashMap() : null; + List encoded = new ArrayList<>(cookies.length); + Map nameToIndex = strict && cookies.length > 1 ? new HashMap<>() : null; boolean hasDupdName = false; for (int i = 0; i < cookies.length; i++) { Cookie c = cookies[i]; @@ -179,12 +185,12 @@ public List encode(Cookie... cookies) { * @return the corresponding bunch of Set-Cookie headers */ public List encode(Collection cookies) { - if (checkNotNull(cookies, "cookies").isEmpty()) { + if (requireNonNull(cookies, "cookies").isEmpty()) { return Collections.emptyList(); } - List encoded = new ArrayList(cookies.size()); - Map nameToIndex = strict && cookies.size() > 1 ? new HashMap() : null; + List encoded = new ArrayList<>(cookies.size()); + Map nameToIndex = strict && cookies.size() > 1 ? new HashMap<>() : null; int i = 0; boolean hasDupdName = false; for (Cookie c : cookies) { @@ -203,14 +209,14 @@ public List encode(Collection cookies) { * @return the corresponding bunch of Set-Cookie headers */ public List encode(Iterable cookies) { - Iterator cookiesIt = checkNotNull(cookies, "cookies").iterator(); + Iterator cookiesIt = requireNonNull(cookies, "cookies").iterator(); if (!cookiesIt.hasNext()) { return Collections.emptyList(); } - List encoded = new ArrayList(); + List encoded = new ArrayList<>(); Cookie firstCookie = cookiesIt.next(); - Map nameToIndex = strict && cookiesIt.hasNext() ? new HashMap() : null; + Map nameToIndex = strict && cookiesIt.hasNext() ? new HashMap<>() : null; int i = 0; encoded.add(encode(firstCookie)); boolean hasDupdName = nameToIndex != null && nameToIndex.put(firstCookie.name(), i++) != null; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/package-info.java b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/package-info.java index 4f9ebaf0f0e..d67e10b8b56 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cookie/package-info.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cookie/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsConfig.java b/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsConfig.java index 5ccae5d641b..0a75514fc8e 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsConfig.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsConfig.java @@ -5,7 +5,7 @@ * 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -47,7 +47,7 @@ public final class CorsConfig { private final boolean shortCircuit; CorsConfig(final CorsConfigBuilder builder) { - origins = new LinkedHashSet(builder.origins); + origins = new LinkedHashSet<>(builder.origins); anyOrigin = builder.anyOrigin; enabled = builder.enabled; exposeHeaders = builder.exposeHeaders; @@ -219,7 +219,7 @@ public HttpHeaders preflightResponseHeaders() { * * CORS headers are set after a request is processed. This may not always be desired * and this setting will check that the Origin is valid and if it is not valid no - * further processing will take place, and a error will be returned to the calling client. + * further processing will take place, and an error will be returned to the calling client. * * @return {@code true} if a CORS request should short-circuit upon receiving an invalid Origin header. */ diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsConfigBuilder.java b/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsConfigBuilder.java index c55eed184ba..c415bdcc848 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsConfigBuilder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsConfigBuilder.java @@ -5,7 +5,7 @@ * 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http.cors; +import static io.netty.util.internal.ObjectUtil.checkNotNullWithIAE; + import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpMethod; @@ -68,11 +70,11 @@ public static CorsConfigBuilder forOrigins(final String... origins) { boolean allowNullOrigin; boolean enabled = true; boolean allowCredentials; - final Set exposeHeaders = new HashSet(); + final Set exposeHeaders = new HashSet<>(); long maxAge; - final Set requestMethods = new HashSet(); - final Set requestHeaders = new HashSet(); - final Map> preflightHeaders = new HashMap>(); + final Set requestMethods = new HashSet<>(); + final Set requestHeaders = new HashSet<>(); + final Map> preflightHeaders = new HashMap<>(); private boolean noPreflightHeaders; boolean shortCircuit; @@ -82,7 +84,7 @@ public static CorsConfigBuilder forOrigins(final String... origins) { * @param origins the origin to be used for this builder. */ CorsConfigBuilder(final String... origins) { - this.origins = new LinkedHashSet(Arrays.asList(origins)); + this.origins = new LinkedHashSet<>(Arrays.asList(origins)); anyOrigin = false; } @@ -341,7 +343,7 @@ public CorsConfigBuilder noPreflightResponseHeaders() { * * CORS headers are set after a request is processed. This may not always be desired * and this setting will check that the Origin is valid and if it is not valid no - * further processing will take place, and a error will be returned to the calling client. + * further processing will take place, and an error will be returned to the calling client. * * @return {@link CorsConfigBuilder} to support method chaining. */ @@ -378,10 +380,7 @@ private static final class ConstantValueGenerator implements Callable { * @param value the value that will be returned when the call method is invoked. */ private ConstantValueGenerator(final Object value) { - if (value == null) { - throw new IllegalArgumentException("value must not be null"); - } - this.value = value; + this.value = checkNotNullWithIAE(value, "value"); } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsHandler.java index f4b5552b724..b21adf67d51 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cors/CorsHandler.java @@ -5,7 +5,7 @@ * 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,11 +15,9 @@ */ package io.netty.handler.codec.http.cors; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelFutureListeners; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaderValues; @@ -27,45 +25,68 @@ import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpUtil; +import io.netty.util.concurrent.Future; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; -import static io.netty.handler.codec.http.HttpMethod.*; -import static io.netty.handler.codec.http.HttpResponseStatus.*; -import static io.netty.util.ReferenceCountUtil.*; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import java.util.Collections; +import java.util.List; + +import static io.netty.handler.codec.http.HttpMethod.OPTIONS; +import static io.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN; +import static io.netty.handler.codec.http.HttpResponseStatus.OK; +import static io.netty.util.ReferenceCountUtil.release; +import static io.netty.util.internal.ObjectUtil.checkNonEmpty; +import static java.util.Objects.requireNonNull; /** - * Handles Cross Origin Resource Sharing (CORS) requests. + * Handles Cross Origin Resource Sharing (CORS) requests. *

    - * This handler can be configured using a {@link CorsConfig}, please + * This handler can be configured using one or more {@link CorsConfig}, please * refer to this class for details about the configuration options available. */ -public class CorsHandler extends ChannelDuplexHandler { +public class CorsHandler implements ChannelHandler { private static final InternalLogger logger = InternalLoggerFactory.getInstance(CorsHandler.class); private static final String ANY_ORIGIN = "*"; private static final String NULL_ORIGIN = "null"; - private final CorsConfig config; + private CorsConfig config; private HttpRequest request; + private final List configList; + private final boolean isShortCircuit; /** - * Creates a new instance with the specified {@link CorsConfig}. + * Creates a new instance with a single {@link CorsConfig}. */ public CorsHandler(final CorsConfig config) { - this.config = checkNotNull(config, "config"); + this(Collections.singletonList(requireNonNull(config, "config")), config.isShortCircuit()); + } + + /** + * Creates a new instance with the specified config list. If more than one + * config matches a certain origin, the first in the List will be used. + * + * @param configList List of {@link CorsConfig} + * @param isShortCircuit Same as {@link CorsConfig#isShortCurcuit()} but applicable to all supplied configs. + */ + public CorsHandler(final List configList, boolean isShortCircuit) { + checkNonEmpty(configList, "configList"); + this.configList = configList; + this.isShortCircuit = isShortCircuit; } @Override public void channelRead(final ChannelHandlerContext ctx, final Object msg) throws Exception { - if (config.isCorsSupportEnabled() && msg instanceof HttpRequest) { + if (msg instanceof HttpRequest) { request = (HttpRequest) msg; + final String origin = request.headers().get(HttpHeaderNames.ORIGIN); + config = getForOrigin(origin); if (isPreflightRequest(request)) { handlePreflight(ctx, request); return; } - if (config.isShortCircuit() && !validateOrigin()) { + if (isShortCircuit && !(origin == null || config != null)) { forbidden(ctx, request); return; } @@ -99,9 +120,24 @@ private void setPreflightHeaders(final HttpResponse response) { response.headers().add(config.preflightResponseHeaders()); } + private CorsConfig getForOrigin(String requestOrigin) { + for (CorsConfig corsConfig : configList) { + if (corsConfig.isAnyOriginSupported()) { + return corsConfig; + } + if (corsConfig.origins().contains(requestOrigin)) { + return corsConfig; + } + if (corsConfig.isNullOriginAllowed() || NULL_ORIGIN.equals(requestOrigin)) { + return corsConfig; + } + } + return null; + } + private boolean setOrigin(final HttpResponse response) { final String origin = request.headers().get(HttpHeaderNames.ORIGIN); - if (origin != null) { + if (origin != null && config != null) { if (NULL_ORIGIN.equals(origin) && config.isNullOriginAllowed()) { setNullOrigin(response); return true; @@ -125,24 +161,6 @@ private boolean setOrigin(final HttpResponse response) { return false; } - private boolean validateOrigin() { - if (config.isAnyOriginSupported()) { - return true; - } - - final String origin = request.headers().get(HttpHeaderNames.ORIGIN); - if (origin == null) { - // Not a CORS request so we cannot validate it. It may be a non CORS request. - return true; - } - - if ("null".equals(origin) && config.isNullOriginAllowed()) { - return true; - } - - return config.origins().contains(origin); - } - private void echoRequestOrigin(final HttpResponse response) { setOrigin(response, request.headers().get(HttpHeaderNames.ORIGIN)); } @@ -172,7 +190,7 @@ private void setAllowCredentials(final HttpResponse response) { private static boolean isPreflightRequest(final HttpRequest request) { final HttpHeaders headers = request.headers(); - return request.method().equals(OPTIONS) && + return OPTIONS.equals(request.method()) && headers.contains(HttpHeaderNames.ORIGIN) && headers.contains(HttpHeaderNames.ACCESS_CONTROL_REQUEST_METHOD); } @@ -196,20 +214,20 @@ private void setMaxAge(final HttpResponse response) { } @Override - public void write(final ChannelHandlerContext ctx, final Object msg, final ChannelPromise promise) - throws Exception { - if (config.isCorsSupportEnabled() && msg instanceof HttpResponse) { + public Future write(final ChannelHandlerContext ctx, final Object msg) { + if (config != null && config.isCorsSupportEnabled() && msg instanceof HttpResponse) { final HttpResponse response = (HttpResponse) msg; if (setOrigin(response)) { setAllowCredentials(response); setExposeHeaders(response); } } - ctx.writeAndFlush(msg, promise); + return ctx.write(msg); } private static void forbidden(final ChannelHandlerContext ctx, final HttpRequest request) { - HttpResponse response = new DefaultFullHttpResponse(request.protocolVersion(), FORBIDDEN); + HttpResponse response = new DefaultFullHttpResponse( + request.protocolVersion(), FORBIDDEN, ctx.alloc().buffer(0)); response.headers().set(HttpHeaderNames.CONTENT_LENGTH, HttpHeaderValues.ZERO); release(request); respond(ctx, request, response); @@ -224,9 +242,9 @@ private static void respond( HttpUtil.setKeepAlive(response, keepAlive); - final ChannelFuture future = ctx.writeAndFlush(response); + Future future = ctx.writeAndFlush(response); if (!keepAlive) { - future.addListener(ChannelFutureListener.CLOSE); + future.addListener(ctx.channel(), ChannelFutureListeners.CLOSE); } } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/cors/package-info.java b/codec-http/src/main/java/io/netty/handler/codec/http/cors/package-info.java index b9b480a338d..6c1570f4f79 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/cors/package-info.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/cors/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractDiskHttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractDiskHttpData.java index a21e72f2551..f97317d9ab0 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractDiskHttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractDiskHttpData.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,19 +18,22 @@ import io.netty.buffer.ByteBuf; import io.netty.handler.codec.http.HttpConstants; import io.netty.util.internal.EmptyArrays; +import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.Charset; +import java.nio.file.Files; -import static io.netty.buffer.Unpooled.*; +import static io.netty.buffer.Unpooled.EMPTY_BUFFER; +import static io.netty.buffer.Unpooled.wrappedBuffer; +import static java.util.Objects.requireNonNull; /** * Abstract Disk HttpData implementation @@ -87,22 +90,21 @@ private File tempFile() throws IOException { File tmpFile; if (getBaseDirectory() == null) { // create a temporary file - tmpFile = File.createTempFile(getPrefix(), newpostfix); + tmpFile = PlatformDependent.createTempFile(getPrefix(), newpostfix, null); } else { - tmpFile = File.createTempFile(getPrefix(), newpostfix, new File( + tmpFile = PlatformDependent.createTempFile(getPrefix(), newpostfix, new File( getBaseDirectory())); } if (deleteOnExit()) { - tmpFile.deleteOnExit(); + // See https://github.com/netty/netty/issues/10351 + DeleteFileOnExitHook.add(tmpFile.getPath()); } return tmpFile; } @Override public void setContent(ByteBuf buffer) throws IOException { - if (buffer == null) { - throw new NullPointerException("buffer"); - } + requireNonNull(buffer, "buffer"); try { size = buffer.readableBytes(); checkSize(size); @@ -115,13 +117,19 @@ public void setContent(ByteBuf buffer) throws IOException { if (buffer.readableBytes() == 0) { // empty file if (!file.createNewFile()) { - throw new IOException("file exists already: " + file); + if (file.length() == 0) { + return; + } else { + if (!file.delete() || !file.createNewFile()) { + throw new IOException("file exists already: " + file); + } + } } return; } - FileOutputStream outputStream = new FileOutputStream(file); - try { - FileChannel localfileChannel = outputStream.getChannel(); + try (RandomAccessFile accessFile = new RandomAccessFile(file, "rw")) { + accessFile.setLength(0); + FileChannel localfileChannel = accessFile.getChannel(); ByteBuffer byteBuffer = buffer.nioBuffer(); int written = 0; while (written < size) { @@ -129,8 +137,6 @@ public void setContent(ByteBuf buffer) throws IOException { } buffer.readerIndex(buffer.readerIndex() + written); localfileChannel.force(false); - } finally { - outputStream.close(); } setCompleted(); } finally { @@ -151,20 +157,28 @@ public void addContent(ByteBuf buffer, boolean last) throw new IOException("Out of size: " + (size + localsize) + " > " + definedSize); } - ByteBuffer byteBuffer = buffer.nioBufferCount() == 1 ? buffer.nioBuffer() : buffer.copy().nioBuffer(); - int written = 0; if (file == null) { file = tempFile(); } if (fileChannel == null) { - FileOutputStream outputStream = new FileOutputStream(file); - fileChannel = outputStream.getChannel(); + RandomAccessFile accessFile = new RandomAccessFile(file, "rw"); + fileChannel = accessFile.getChannel(); } - while (written < localsize) { - written += fileChannel.write(byteBuffer); + int remaining = localsize; + long position = fileChannel.position(); + int index = buffer.readerIndex(); + while (remaining > 0) { + int written = buffer.getBytes(index, fileChannel, position, remaining); + if (written < 0) { + break; + } + remaining -= written; + position += written; + index += written; } - size += localsize; - buffer.readerIndex(buffer.readerIndex() + written); + fileChannel.position(position); + buffer.readerIndex(index); + size += localsize - remaining; } finally { // Release the buffer as it was retained before and we not need a reference to it at all // See https://github.com/netty/netty/issues/1516 @@ -176,45 +190,45 @@ public void addContent(ByteBuf buffer, boolean last) file = tempFile(); } if (fileChannel == null) { - FileOutputStream outputStream = new FileOutputStream(file); - fileChannel = outputStream.getChannel(); + RandomAccessFile accessFile = new RandomAccessFile(file, "rw"); + fileChannel = accessFile.getChannel(); + } + try { + fileChannel.force(false); + } finally { + fileChannel.close(); } - fileChannel.force(false); - fileChannel.close(); fileChannel = null; setCompleted(); } else { - if (buffer == null) { - throw new NullPointerException("buffer"); - } + requireNonNull(buffer, "buffer"); } } @Override public void setContent(File file) throws IOException { + long size = file.length(); + checkSize(size); + this.size = size; if (this.file != null) { delete(); } this.file = file; - size = file.length(); - checkSize(size); isRenamed = true; setCompleted(); } @Override public void setContent(InputStream inputStream) throws IOException { - if (inputStream == null) { - throw new NullPointerException("inputStream"); - } + requireNonNull(inputStream, "inputStream"); if (file != null) { delete(); } file = tempFile(); - FileOutputStream outputStream = new FileOutputStream(file); int written = 0; - try { - FileChannel localfileChannel = outputStream.getChannel(); + try (RandomAccessFile accessFile = new RandomAccessFile(file, "rw")) { + accessFile.setLength(0); + FileChannel localfileChannel = accessFile.getChannel(); byte[] bytes = new byte[4096 * 4]; ByteBuffer byteBuffer = ByteBuffer.wrap(bytes); int read = inputStream.read(bytes); @@ -225,8 +239,6 @@ public void setContent(InputStream inputStream) throws IOException { read = inputStream.read(bytes); } localfileChannel.force(false); - } finally { - outputStream.close(); } size = written; if (definedSize > 0 && definedSize < size) { @@ -245,18 +257,32 @@ public void delete() { if (fileChannel != null) { try { fileChannel.force(false); - fileChannel.close(); } catch (IOException e) { - logger.warn("Failed to close a file.", e); + logger.warn("Failed to force.", e); + } finally { + try { + fileChannel.close(); + } catch (IOException e) { + logger.warn("Failed to close a file.", e); + } } fileChannel = null; } - if (! isRenamed) { + if (!isRenamed) { + String filePath = null; + if (file != null && file.exists()) { + filePath = file.getPath(); if (!file.delete()) { + filePath = null; logger.warn("Failed to delete: {}", file); } } + + // If you turn on deleteOnExit make sure it is executed. + if (deleteOnExit() && filePath != null) { + DeleteFileOnExitHook.remove(filePath); + } file = null; } } @@ -284,20 +310,25 @@ public ByteBuf getChunk(int length) throws IOException { return EMPTY_BUFFER; } if (fileChannel == null) { - FileInputStream inputStream = new FileInputStream(file); - fileChannel = inputStream.getChannel(); + RandomAccessFile accessFile = new RandomAccessFile(file, "r"); + fileChannel = accessFile.getChannel(); } int read = 0; ByteBuffer byteBuffer = ByteBuffer.allocate(length); - while (read < length) { - int readnow = fileChannel.read(byteBuffer); - if (readnow == -1) { - fileChannel.close(); - fileChannel = null; - break; - } else { + try { + while (read < length) { + int readnow = fileChannel.read(byteBuffer); + if (readnow == -1) { + fileChannel.close(); + fileChannel = null; + break; + } read += readnow; } + } catch (IOException e) { + fileChannel.close(); + fileChannel = null; + throw e; } if (read == 0) { return EMPTY_BUFFER; @@ -334,36 +365,34 @@ public boolean isInMemory() { @Override public boolean renameTo(File dest) throws IOException { - if (dest == null) { - throw new NullPointerException("dest"); - } + requireNonNull(dest, "dest"); if (file == null) { throw new IOException("No file defined so cannot be renamed"); } if (!file.renameTo(dest)) { // must copy IOException exception = null; - FileInputStream inputStream = null; - FileOutputStream outputStream = null; + RandomAccessFile inputAccessFile = null; + RandomAccessFile outputAccessFile = null; long chunkSize = 8196; long position = 0; try { - inputStream = new FileInputStream(file); - outputStream = new FileOutputStream(dest); - FileChannel in = inputStream.getChannel(); - FileChannel out = outputStream.getChannel(); + inputAccessFile = new RandomAccessFile(file, "r"); + outputAccessFile = new RandomAccessFile(dest, "rw"); + FileChannel in = inputAccessFile.getChannel(); + FileChannel out = outputAccessFile.getChannel(); while (position < size) { if (chunkSize < size - position) { chunkSize = size - position; } - position += in.transferTo(position, chunkSize , out); + position += in.transferTo(position, chunkSize, out); } } catch (IOException e) { exception = e; } finally { - if (inputStream != null) { + if (inputAccessFile != null) { try { - inputStream.close(); + inputAccessFile.close(); } catch (IOException e) { if (exception == null) { // Choose to report the first exception exception = e; @@ -372,9 +401,9 @@ public boolean renameTo(File dest) throws IOException { } } } - if (outputStream != null) { + if (outputAccessFile != null) { try { - outputStream.close(); + outputAccessFile.close(); } catch (IOException e) { if (exception == null) { // Choose to report the first exception exception = e; @@ -408,27 +437,11 @@ public boolean renameTo(File dest) throws IOException { /** * Utility function + * * @return the array of bytes */ private static byte[] readFrom(File src) throws IOException { - long srcsize = src.length(); - if (srcsize > Integer.MAX_VALUE) { - throw new IllegalArgumentException( - "File too big to be loaded in memory"); - } - FileInputStream inputStream = new FileInputStream(src); - byte[] array = new byte[(int) srcsize]; - try { - FileChannel fileChannel = inputStream.getChannel(); - ByteBuffer byteBuffer = ByteBuffer.wrap(array); - int read = 0; - while (read < srcsize) { - read += fileChannel.read(byteBuffer); - } - } finally { - inputStream.close(); - } - return array; + return Files.readAllBytes(src.toPath()); } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractHttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractHttpData.java index ff05753bae5..6bb6decea82 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractHttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractHttpData.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,9 @@ */ package io.netty.handler.codec.http.multipart; +import static java.util.Objects.requireNonNull; +import static io.netty.util.internal.ObjectUtil.checkNonEmpty; + import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelException; import io.netty.handler.codec.http.HttpConstants; @@ -40,18 +43,12 @@ public abstract class AbstractHttpData extends AbstractReferenceCounted implemen private long maxSize = DefaultHttpDataFactory.MAXSIZE; protected AbstractHttpData(String name, Charset charset, long size) { - if (name == null) { - throw new NullPointerException("name"); - } + requireNonNull(name, "name"); name = REPLACE_PATTERN.matcher(name).replaceAll(" "); name = STRIP_PATTERN.matcher(name).replaceAll(""); - if (name.isEmpty()) { - throw new IllegalArgumentException("empty name"); - } - - this.name = name; + this.name = checkNonEmpty(name, "name"); if (charset != null) { setCharset(charset); } @@ -59,7 +56,9 @@ protected AbstractHttpData(String name, Charset charset, long size) { } @Override - public long getMaxSize() { return maxSize; } + public long getMaxSize() { + return maxSize; + } @Override public void setMaxSize(long maxSize) { @@ -94,9 +93,7 @@ public Charset getCharset() { @Override public void setCharset(Charset charset) { - if (charset == null) { - throw new NullPointerException("charset"); - } + requireNonNull(charset, "charset"); this.charset = charset; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java index 31aa9ce64b5..ed55effa287 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpData.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,10 +20,9 @@ import io.netty.handler.codec.http.HttpConstants; import java.io.File; -import java.io.FileInputStream; -import java.io.FileOutputStream; import java.io.IOException; import java.io.InputStream; +import java.io.RandomAccessFile; import java.nio.ByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.Charset; @@ -32,6 +31,7 @@ import static io.netty.buffer.Unpooled.buffer; import static io.netty.buffer.Unpooled.compositeBuffer; import static io.netty.buffer.Unpooled.wrappedBuffer; +import static java.util.Objects.requireNonNull; /** * Abstract Memory HttpData implementation @@ -43,13 +43,12 @@ public abstract class AbstractMemoryHttpData extends AbstractHttpData { protected AbstractMemoryHttpData(String name, Charset charset, long size) { super(name, charset, size); + byteBuf = EMPTY_BUFFER; } @Override public void setContent(ByteBuf buffer) throws IOException { - if (buffer == null) { - throw new NullPointerException("buffer"); - } + requireNonNull(buffer, "buffer"); long localsize = buffer.readableBytes(); checkSize(localsize); if (definedSize > 0 && definedSize < localsize) { @@ -66,21 +65,25 @@ public void setContent(ByteBuf buffer) throws IOException { @Override public void setContent(InputStream inputStream) throws IOException { - if (inputStream == null) { - throw new NullPointerException("inputStream"); - } - ByteBuf buffer = buffer(); + requireNonNull(inputStream, "inputStream"); byte[] bytes = new byte[4096 * 4]; - int read = inputStream.read(bytes); + ByteBuf buffer = buffer(); int written = 0; - while (read > 0) { - buffer.writeBytes(bytes, 0, read); - written += read; - checkSize(written); - read = inputStream.read(bytes); + try { + int read = inputStream.read(bytes); + while (read > 0) { + buffer.writeBytes(bytes, 0, read); + written += read; + checkSize(written); + read = inputStream.read(bytes); + } + } catch (IOException e) { + buffer.release(); + throw e; } size = written; if (definedSize > 0 && definedSize < size) { + buffer.release(); throw new IOException("Out of size: " + size + " > " + definedSize); } if (byteBuf != null) { @@ -103,6 +106,13 @@ public void addContent(ByteBuf buffer, boolean last) size += localsize; if (byteBuf == null) { byteBuf = buffer; + } else if (localsize == 0) { + // Nothing to add and byteBuf already exists + buffer.release(); + } else if (byteBuf.readableBytes() == 0) { + // Previous buffer is empty, so just replace it + byteBuf.release(); + byteBuf = buffer; } else if (byteBuf instanceof CompositeByteBuf) { CompositeByteBuf cbb = (CompositeByteBuf) byteBuf; cbb.addComponent(true, buffer); @@ -115,33 +125,35 @@ public void addContent(ByteBuf buffer, boolean last) if (last) { setCompleted(); } else { - if (buffer == null) { - throw new NullPointerException("buffer"); - } + requireNonNull(buffer, "buffer"); } } @Override public void setContent(File file) throws IOException { - if (file == null) { - throw new NullPointerException("file"); - } + requireNonNull(file, "file"); long newsize = file.length(); if (newsize > Integer.MAX_VALUE) { - throw new IllegalArgumentException( - "File too big to be loaded in memory"); + throw new IllegalArgumentException("File too big to be loaded in memory"); } checkSize(newsize); - FileInputStream inputStream = new FileInputStream(file); - FileChannel fileChannel = inputStream.getChannel(); - byte[] array = new byte[(int) newsize]; - ByteBuffer byteBuffer = ByteBuffer.wrap(array); - int read = 0; - while (read < newsize) { - read += fileChannel.read(byteBuffer); + RandomAccessFile accessFile = new RandomAccessFile(file, "r"); + ByteBuffer byteBuffer; + try { + FileChannel fileChannel = accessFile.getChannel(); + try { + byte[] array = new byte[(int) newsize]; + byteBuffer = ByteBuffer.wrap(array); + int read = 0; + while (read < newsize) { + read += fileChannel.read(byteBuffer); + } + } finally { + fileChannel.close(); + } + } finally { + accessFile.close(); } - fileChannel.close(); - inputStream.close(); byteBuffer.flip(); if (byteBuf != null) { byteBuf.release(); @@ -222,9 +234,7 @@ public boolean isInMemory() { @Override public boolean renameTo(File dest) throws IOException { - if (dest == null) { - throw new NullPointerException("dest"); - } + requireNonNull(dest, "dest"); if (byteBuf == null) { // empty file if (!dest.createNewFile()) { @@ -233,24 +243,29 @@ public boolean renameTo(File dest) throws IOException { return true; } int length = byteBuf.readableBytes(); - FileOutputStream outputStream = new FileOutputStream(dest); - FileChannel fileChannel = outputStream.getChannel(); - int written = 0; - if (byteBuf.nioBufferCount() == 1) { - ByteBuffer byteBuffer = byteBuf.nioBuffer(); - while (written < length) { - written += fileChannel.write(byteBuffer); - } - } else { - ByteBuffer[] byteBuffers = byteBuf.nioBuffers(); - while (written < length) { - written += fileChannel.write(byteBuffers); + long written = 0; + RandomAccessFile accessFile = new RandomAccessFile(dest, "rw"); + try { + FileChannel fileChannel = accessFile.getChannel(); + try { + if (byteBuf.nioBufferCount() == 1) { + ByteBuffer byteBuffer = byteBuf.nioBuffer(); + while (written < length) { + written += fileChannel.write(byteBuffer); + } + } else { + ByteBuffer[] byteBuffers = byteBuf.nioBuffers(); + while (written < length) { + written += fileChannel.write(byteBuffers); + } + } + fileChannel.force(false); + } finally { + fileChannel.close(); } + } finally { + accessFile.close(); } - - fileChannel.force(false); - fileChannel.close(); - outputStream.close(); return written == length; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/Attribute.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/Attribute.java index 223cc6872f5..5250cc5f361 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/Attribute.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/Attribute.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/CaseIgnoringComparator.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/CaseIgnoringComparator.java index fb46572e13e..034b74b16eb 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/CaseIgnoringComparator.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/CaseIgnoringComparator.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java index 6387ecaac70..84c056963dd 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactory.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -37,6 +37,15 @@ *

  • MemoryAttribute, DiskAttribute or MixedAttribute
  • *
  • MemoryFileUpload, DiskFileUpload or MixedFileUpload
  • * + * A good example of releasing HttpData once all work is done is as follow:
    + *
    {@code
    + *   for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) {
    + *     httpData.release();
    + *     factory.removeHttpDataFromClean(request, httpData);
    + *   }
    + *   factory.cleanAllHttpData();
    + *   decoder.destroy();
    + *  }
    */ public class DefaultHttpDataFactory implements HttpDataFactory { @@ -59,6 +68,10 @@ public class DefaultHttpDataFactory implements HttpDataFactory { private Charset charset = HttpConstants.DEFAULT_CHARSET; + private String baseDir; + + private boolean deleteOnExit; // false is a good default cause true leaks + /** * Keep all {@link HttpData}s until cleaning methods are called. * We need to use {@link IdentityHashMap} because different requests may be equal. @@ -67,7 +80,7 @@ public class DefaultHttpDataFactory implements HttpDataFactory { * different data items may be equal. */ private final Map> requestFileDeleteMap = - Collections.synchronizedMap(new IdentityHashMap>()); + Collections.synchronizedMap(new IdentityHashMap<>()); /** * HttpData will be in memory if less than default size (16KB). @@ -111,6 +124,25 @@ public DefaultHttpDataFactory(long minSize, Charset charset) { this.charset = charset; } + /** + * Override global {@link DiskAttribute#baseDirectory} and {@link DiskFileUpload#baseDirectory} values. + * + * @param baseDir directory path where to store disk attributes and file uploads. + */ + public void setBaseDir(String baseDir) { + this.baseDir = baseDir; + } + + /** + * Override global {@link DiskAttribute#deleteOnExitTemporaryFile} and + * {@link DiskFileUpload#deleteOnExitTemporaryFile} values. + * + * @param deleteOnExit true if temporary files should be deleted with the JVM, false otherwise. + */ + public void setDeleteOnExit(boolean deleteOnExit) { + this.deleteOnExit = deleteOnExit; + } + @Override public void setMaxLimit(long maxSize) { this.maxSize = maxSize; @@ -122,7 +154,7 @@ public void setMaxLimit(long maxSize) { private List getList(HttpRequest request) { List list = requestFileDeleteMap.get(request); if (list == null) { - list = new ArrayList(); + list = new ArrayList<>(); requestFileDeleteMap.put(request, list); } return list; @@ -131,14 +163,14 @@ private List getList(HttpRequest request) { @Override public Attribute createAttribute(HttpRequest request, String name) { if (useDisk) { - Attribute attribute = new DiskAttribute(name, charset); + Attribute attribute = new DiskAttribute(name, charset, baseDir, deleteOnExit); attribute.setMaxSize(maxSize); List list = getList(request); list.add(attribute); return attribute; } if (checkSize) { - Attribute attribute = new MixedAttribute(name, minSize, charset); + Attribute attribute = new MixedAttribute(name, minSize, charset, baseDir, deleteOnExit); attribute.setMaxSize(maxSize); List list = getList(request); list.add(attribute); @@ -152,14 +184,14 @@ public Attribute createAttribute(HttpRequest request, String name) { @Override public Attribute createAttribute(HttpRequest request, String name, long definedSize) { if (useDisk) { - Attribute attribute = new DiskAttribute(name, definedSize, charset); + Attribute attribute = new DiskAttribute(name, definedSize, charset, baseDir, deleteOnExit); attribute.setMaxSize(maxSize); List list = getList(request); list.add(attribute); return attribute; } if (checkSize) { - Attribute attribute = new MixedAttribute(name, definedSize, minSize, charset); + Attribute attribute = new MixedAttribute(name, definedSize, minSize, charset, baseDir, deleteOnExit); attribute.setMaxSize(maxSize); List list = getList(request); list.add(attribute); @@ -186,11 +218,11 @@ public Attribute createAttribute(HttpRequest request, String name, String value) if (useDisk) { Attribute attribute; try { - attribute = new DiskAttribute(name, value, charset); + attribute = new DiskAttribute(name, value, charset, baseDir, deleteOnExit); attribute.setMaxSize(maxSize); } catch (IOException e) { // revert to Mixed mode - attribute = new MixedAttribute(name, value, minSize, charset); + attribute = new MixedAttribute(name, value, minSize, charset, baseDir, deleteOnExit); attribute.setMaxSize(maxSize); } checkHttpDataSize(attribute); @@ -199,7 +231,7 @@ public Attribute createAttribute(HttpRequest request, String name, String value) return attribute; } if (checkSize) { - Attribute attribute = new MixedAttribute(name, value, minSize, charset); + Attribute attribute = new MixedAttribute(name, value, minSize, charset, baseDir, deleteOnExit); attribute.setMaxSize(maxSize); checkHttpDataSize(attribute); List list = getList(request); @@ -222,7 +254,7 @@ public FileUpload createFileUpload(HttpRequest request, String name, String file long size) { if (useDisk) { FileUpload fileUpload = new DiskFileUpload(name, filename, contentType, - contentTransferEncoding, charset, size); + contentTransferEncoding, charset, size, baseDir, deleteOnExit); fileUpload.setMaxSize(maxSize); checkHttpDataSize(fileUpload); List list = getList(request); @@ -231,7 +263,7 @@ public FileUpload createFileUpload(HttpRequest request, String name, String file } if (checkSize) { FileUpload fileUpload = new MixedFileUpload(name, filename, contentType, - contentTransferEncoding, charset, size, minSize); + contentTransferEncoding, charset, size, minSize, baseDir, deleteOnExit); fileUpload.setMaxSize(maxSize); checkHttpDataSize(fileUpload); List list = getList(request); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DeleteFileOnExitHook.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DeleteFileOnExitHook.java new file mode 100644 index 00000000000..f93208d7216 --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DeleteFileOnExitHook.java @@ -0,0 +1,82 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.multipart; + +import java.io.File; +import java.util.Collections; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; + +/** + * DeleteFileOnExitHook. + */ +final class DeleteFileOnExitHook { + private static final Set FILES = Collections.newSetFromMap(new ConcurrentHashMap()); + + private DeleteFileOnExitHook() { + } + + static { + // DeleteOnExitHook must be the last shutdown hook to be invoked. + // Application shutdown hooks may add the first file to the + // delete on exit list and cause the DeleteOnExitHook to be + // registered during shutdown in progress. + Runtime.getRuntime().addShutdownHook(new Thread() { + + @Override + public void run() { + runHook(); + } + }); + } + + /** + * Remove from the pool to reduce space footprint. + * + * @param file tmp file path + */ + public static void remove(String file) { + FILES.remove(file); + } + + /** + * Add to the hook and clean up when the program exits. + * + * @param file tmp file path + */ + public static void add(String file) { + FILES.add(file); + } + + /** + * Check in the hook files. + * + * @param file target file + * @return true or false + */ + public static boolean checkFileExist(String file) { + return FILES.contains(file); + } + + /** + * Clean up all the files. + */ + static void runHook() { + for (String filename : FILES) { + new File(filename).delete(); + } + } +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java index 74391885b28..56a9b930d67 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskAttribute.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -23,6 +23,7 @@ import java.nio.charset.Charset; import static io.netty.buffer.Unpooled.wrappedBuffer; +import static java.util.Objects.requireNonNull; /** * Disk implementation of Attributes @@ -36,6 +37,10 @@ public class DiskAttribute extends AbstractDiskHttpData implements Attribute { public static final String postfix = ".att"; + private String baseDir; + + private boolean deleteOnExit; + /** * Constructor used for huge Attribute */ @@ -43,16 +48,40 @@ public DiskAttribute(String name) { this(name, HttpConstants.DEFAULT_CHARSET); } + public DiskAttribute(String name, String baseDir, boolean deleteOnExit) { + this(name, HttpConstants.DEFAULT_CHARSET); + this.baseDir = baseDir == null ? baseDirectory : baseDir; + this.deleteOnExit = deleteOnExit; + } + public DiskAttribute(String name, long definedSize) { + this(name, definedSize, HttpConstants.DEFAULT_CHARSET, baseDirectory, deleteOnExitTemporaryFile); + } + + public DiskAttribute(String name, long definedSize, String baseDir, boolean deleteOnExit) { this(name, definedSize, HttpConstants.DEFAULT_CHARSET); + this.baseDir = baseDir == null ? baseDirectory : baseDir; + this.deleteOnExit = deleteOnExit; } public DiskAttribute(String name, Charset charset) { + this(name, charset, baseDirectory, deleteOnExitTemporaryFile); + } + + public DiskAttribute(String name, Charset charset, String baseDir, boolean deleteOnExit) { super(name, charset, 0); + this.baseDir = baseDir == null ? baseDirectory : baseDir; + this.deleteOnExit = deleteOnExit; } public DiskAttribute(String name, long definedSize, Charset charset) { + this(name, definedSize, charset, baseDirectory, deleteOnExitTemporaryFile); + } + + public DiskAttribute(String name, long definedSize, Charset charset, String baseDir, boolean deleteOnExit) { super(name, charset, definedSize); + this.baseDir = baseDir == null ? baseDirectory : baseDir; + this.deleteOnExit = deleteOnExit; } public DiskAttribute(String name, String value) throws IOException { @@ -60,8 +89,15 @@ public DiskAttribute(String name, String value) throws IOException { } public DiskAttribute(String name, String value, Charset charset) throws IOException { + this(name, value, charset, baseDirectory, deleteOnExitTemporaryFile); + } + + public DiskAttribute(String name, String value, Charset charset, + String baseDir, boolean deleteOnExit) throws IOException { super(name, charset, 0); // Attribute have no default size setValue(value); + this.baseDir = baseDir == null ? baseDirectory : baseDir; + this.deleteOnExit = deleteOnExit; } @Override @@ -77,9 +113,7 @@ public String getValue() throws IOException { @Override public void setValue(String value) throws IOException { - if (value == null) { - throw new NullPointerException("value"); - } + requireNonNull(value, "value"); byte [] bytes = value.getBytes(getCharset()); checkSize(bytes.length); ByteBuf buffer = wrappedBuffer(bytes); @@ -137,12 +171,12 @@ public String toString() { @Override protected boolean deleteOnExit() { - return deleteOnExitTemporaryFile; + return deleteOnExit; } @Override protected String getBaseDirectory() { - return baseDirectory; + return baseDir; } @Override @@ -194,7 +228,7 @@ public Attribute retainedDuplicate() { @Override public Attribute replace(ByteBuf content) { - DiskAttribute attr = new DiskAttribute(getName()); + DiskAttribute attr = new DiskAttribute(getName(), baseDir, deleteOnExit); attr.setCharset(getCharset()); if (content != null) { try { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskFileUpload.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskFileUpload.java index 1a5076f508e..e7b9a6e8d80 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskFileUpload.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/DiskFileUpload.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http.multipart; +import static java.util.Objects.requireNonNull; + import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelException; import io.netty.handler.codec.http.HttpHeaderNames; @@ -36,6 +38,10 @@ public class DiskFileUpload extends AbstractDiskHttpData implements FileUpload { public static final String postfix = ".tmp"; + private final String baseDir; + + private final boolean deleteOnExit; + private String filename; private String contentType; @@ -43,11 +49,19 @@ public class DiskFileUpload extends AbstractDiskHttpData implements FileUpload { private String contentTransferEncoding; public DiskFileUpload(String name, String filename, String contentType, - String contentTransferEncoding, Charset charset, long size) { + String contentTransferEncoding, Charset charset, long size, String baseDir, boolean deleteOnExit) { super(name, charset, size); setFilename(filename); setContentType(contentType); setContentTransferEncoding(contentTransferEncoding); + this.baseDir = baseDir == null ? baseDirectory : baseDir; + this.deleteOnExit = deleteOnExit; + } + + public DiskFileUpload(String name, String filename, String contentType, + String contentTransferEncoding, Charset charset, long size) { + this(name, filename, contentType, contentTransferEncoding, + charset, size, baseDirectory, deleteOnExitTemporaryFile); } @Override @@ -62,9 +76,7 @@ public String getFilename() { @Override public void setFilename(String filename) { - if (filename == null) { - throw new NullPointerException("filename"); - } + requireNonNull(filename, "filename"); this.filename = filename; } @@ -93,9 +105,7 @@ public int compareTo(FileUpload o) { @Override public void setContentType(String contentType) { - if (contentType == null) { - throw new NullPointerException("contentType"); - } + requireNonNull(contentType, "contentType"); this.contentType = contentType; } @@ -137,12 +147,12 @@ public String toString() { @Override protected boolean deleteOnExit() { - return deleteOnExitTemporaryFile; + return deleteOnExit; } @Override protected String getBaseDirectory() { - return baseDirectory; + return baseDir; } @Override @@ -195,7 +205,8 @@ public FileUpload retainedDuplicate() { @Override public FileUpload replace(ByteBuf content) { DiskFileUpload upload = new DiskFileUpload( - getName(), getFilename(), getContentType(), getContentTransferEncoding(), getCharset(), size); + getName(), getFilename(), getContentType(), getContentTransferEncoding(), getCharset(), size, + baseDir, deleteOnExit); if (content != null) { try { upload.setContent(content); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/FileUpload.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/FileUpload.java index 113ab54963a..35b97411f91 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/FileUpload.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/FileUpload.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/FileUploadUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/FileUploadUtil.java index 11b9b85e4a6..6fa8131f607 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/FileUploadUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/FileUploadUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java index f9d3c0d858f..b51981de99a 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpData.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -50,9 +50,8 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { /** * Set the content from the ChannelBuffer (erase any previous data) * - * @param buffer - * must be not null - * @throws IOException + * @param buffer Must be not null. + * @throws IOException If an IO error occurs when setting the content of this HttpData. */ void setContent(ByteBuf buffer) throws IOException; @@ -63,25 +62,23 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { * must be not null except if last is set to False * @param last * True of the buffer is the last one - * @throws IOException + * @throws IOException If an IO error occurs while adding content to this HttpData. */ void addContent(ByteBuf buffer, boolean last) throws IOException; /** * Set the content from the file (erase any previous data) * - * @param file - * must be not null - * @throws IOException + * @param file Must be not null. + * @throws IOException If an IO error occurs when setting the content of this HttpData. */ void setContent(File file) throws IOException; /** * Set the content from the inputStream (erase any previous data) * - * @param inputStream - * must be not null - * @throws IOException + * @param inputStream Must be not null. + * @throws IOException If an IO error occurs when setting the content of this HttpData. */ void setContent(InputStream inputStream) throws IOException; @@ -121,18 +118,20 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { void delete(); /** - * Returns the contents of the file item as an array of bytes. + * Returns the contents of the file item as an array of bytes.
    + * Note: this method will allocate a lot of memory, if the data is currently stored on the file system. * * @return the contents of the file item as an array of bytes. - * @throws IOException + * @throws IOException If an IO error occurs while reading the data contents of this HttpData. */ byte[] get() throws IOException; /** - * Returns the content of the file item as a ByteBuf + * Returns the content of the file item as a ByteBuf.
    + * Note: this method will allocate a lot of memory, if the data is currently stored on the file system. * * @return the content of the file item as a ByteBuf - * @throws IOException + * @throws IOException If an IO error occurs while reading the data contents of this HttpData. */ ByteBuf getByteBuf() throws IOException; @@ -153,7 +152,7 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { * * @return the contents of the file item as a String, using the default * character encoding. - * @throws IOException + * @throws IOException If an IO error occurs while reading the data contents of this HttpData. */ String getString() throws IOException; @@ -165,7 +164,7 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { * the charset to use * @return the contents of the file item as a String, using the specified * charset. - * @throws IOException + * @throws IOException If an IO error occurs while reading the data contents of this HttpData. */ String getString(Charset encoding) throws IOException; @@ -190,10 +189,9 @@ public interface HttpData extends InterfaceHttpData, ByteBufHolder { * the new file will be out of the cleaner of the factory that creates the * original InterfaceHttpData object. * - * @param dest - * destination file - must be not null - * @return True if the write is successful - * @throws IOException + * @param dest Destination file - must be not null. + * @return {@code true} if the write is successful. + * @throws IOException If an IO error occurs while renaming the underlying file of this HttpData. */ boolean renameTo(File dest) throws IOException; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpDataFactory.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpDataFactory.java index 51d98b8b251..630c6d1af7a 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpDataFactory.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpDataFactory.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java index 359ef6e9960..a404cae7fb9 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostBodyUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,6 +16,7 @@ package io.netty.handler.codec.http.multipart; import io.netty.buffer.ByteBuf; +import io.netty.handler.codec.http.HttpConstants; /** * Shared Static object between HttpMessageDecoder, HttpPostRequestDecoder and HttpPostRequestEncoder @@ -34,6 +35,12 @@ final class HttpPostBodyUtil { */ public static final String DEFAULT_TEXT_CONTENT_TYPE = "text/plain"; + static final String BIT_7_STRING = "7bit"; + + static final String BIT_8_STRING = "8bit"; + + static final String BINARY_STRING = "binary"; + /** * Allowed mechanism for multipart * mechanism := "7bit" @@ -46,15 +53,15 @@ public enum TransferEncodingMechanism { /** * Default encoding */ - BIT7("7bit"), + BIT7(BIT_7_STRING), /** * Short lines but not in ASCII - no encoding */ - BIT8("8bit"), + BIT8(BIT_8_STRING), /** * Could be long text not in ASCII - no encoding */ - BINARY("binary"); + BINARY(BINARY_STRING); private final String value; @@ -150,4 +157,119 @@ static int findEndOfString(String sb) { return result; } + /** + * Try to find first LF or CRLF as Line Breaking + * + * @param buffer the buffer to search in + * @param index the index to start from in the buffer + * @return a relative position from index > 0 if LF or CRLF is found + * or < 0 if not found + */ + static int findLineBreak(ByteBuf buffer, int index) { + int toRead = buffer.readableBytes() - (index - buffer.readerIndex()); + int posFirstChar = buffer.bytesBefore(index, toRead, HttpConstants.LF); + if (posFirstChar == -1) { + // No LF, so neither CRLF + return -1; + } + if (posFirstChar > 0 && buffer.getByte(index + posFirstChar - 1) == HttpConstants.CR) { + posFirstChar--; + } + return posFirstChar; + } + + /** + * Try to find last LF or CRLF as Line Breaking + * + * @param buffer the buffer to search in + * @param index the index to start from in the buffer + * @return a relative position from index > 0 if LF or CRLF is found + * or < 0 if not found + */ + static int findLastLineBreak(ByteBuf buffer, int index) { + int candidate = findLineBreak(buffer, index); + int findCRLF = 0; + if (candidate >= 0) { + if (buffer.getByte(index + candidate) == HttpConstants.CR) { + findCRLF = 2; + } else { + findCRLF = 1; + } + candidate += findCRLF; + } + int next; + while (candidate > 0 && (next = findLineBreak(buffer, index + candidate)) >= 0) { + candidate += next; + if (buffer.getByte(index + candidate) == HttpConstants.CR) { + findCRLF = 2; + } else { + findCRLF = 1; + } + candidate += findCRLF; + } + return candidate - findCRLF; + } + + /** + * Try to find the delimiter, with LF or CRLF in front of it (added as delimiters) if needed + * + * @param buffer the buffer to search in + * @param index the index to start from in the buffer + * @param delimiter the delimiter as byte array + * @param precededByLineBreak true if it must be preceded by LF or CRLF, else false + * @return a relative position from index > 0 if delimiter found designing the start of it + * (including LF or CRLF is asked) + * or a number < 0 if delimiter is not found + * @throws IndexOutOfBoundsException + * if {@code offset + delimiter.length} is greater than {@code buffer.capacity} + */ + static int findDelimiter(ByteBuf buffer, int index, byte[] delimiter, boolean precededByLineBreak) { + final int delimiterLength = delimiter.length; + final int readerIndex = buffer.readerIndex(); + final int writerIndex = buffer.writerIndex(); + int toRead = writerIndex - index; + int newOffset = index; + boolean delimiterNotFound = true; + while (delimiterNotFound && delimiterLength <= toRead) { + // Find first position: delimiter + int posDelimiter = buffer.bytesBefore(newOffset, toRead, delimiter[0]); + if (posDelimiter < 0) { + return -1; + } + newOffset += posDelimiter; + toRead -= posDelimiter; + // Now check for delimiter + if (toRead >= delimiterLength) { + delimiterNotFound = false; + for (int i = 0; i < delimiterLength; i++) { + if (buffer.getByte(newOffset + i) != delimiter[i]) { + newOffset++; + toRead--; + delimiterNotFound = true; + break; + } + } + } + if (!delimiterNotFound) { + // Delimiter found, find if necessary: LF or CRLF + if (precededByLineBreak && newOffset > readerIndex) { + if (buffer.getByte(newOffset - 1) == HttpConstants.LF) { + newOffset--; + // Check if CR before: not mandatory to be there + if (newOffset > readerIndex && buffer.getByte(newOffset - 1) == HttpConstants.CR) { + newOffset--; + } + } else { + // Delimiter with Line Break could be further: iterate after first char of delimiter + newOffset++; + toRead--; + delimiterNotFound = true; + continue; + } + } + return newOffset - readerIndex; + } + } + return -1; + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java index 8e3a90c009b..cc389cba8f3 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -31,18 +31,24 @@ import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.NotEnoughDataDecoderException; import io.netty.util.CharsetUtil; import io.netty.util.internal.InternalThreadLocalMap; +import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.StringUtil; import java.io.IOException; import java.nio.charset.Charset; +import java.nio.charset.IllegalCharsetNameException; import java.nio.charset.UnsupportedCharsetException; import java.util.ArrayList; import java.util.List; import java.util.Map; import java.util.TreeMap; -import static io.netty.buffer.Unpooled.*; -import static io.netty.util.internal.ObjectUtil.*; +import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.BINARY_STRING; +import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.BIT_7_STRING; +import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.BIT_8_STRING; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import static java.util.Objects.requireNonNull; + /** * This decoder will decode Body and can handle POST BODY. @@ -75,12 +81,12 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest /** * HttpDatas from Body */ - private final List bodyListHttpData = new ArrayList(); + private final List bodyListHttpData = new ArrayList<>(); /** * HttpDatas as Map from Body */ - private final Map> bodyMapHttpData = new TreeMap>( + private final Map> bodyMapHttpData = new TreeMap<>( CaseIgnoringComparator.INSTANCE); /** @@ -96,7 +102,7 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest /** * If multipart, this is the boundary for the global multipart */ - private String multipartDataBoundary; + private final String multipartDataBoundary; /** * If multipart, there could be internal multiparts (mixed) to the global @@ -173,36 +179,43 @@ public HttpPostMultipartRequestDecoder(HttpDataFactory factory, HttpRequest requ * errors */ public HttpPostMultipartRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset) { - this.request = checkNotNull(request, "request"); - this.charset = checkNotNull(charset, "charset"); - this.factory = checkNotNull(factory, "factory"); + this.request = requireNonNull(request, "request"); + this.charset = requireNonNull(charset, "charset"); + this.factory = requireNonNull(factory, "factory"); // Fill default values - setMultipart(this.request.headers().get(HttpHeaderNames.CONTENT_TYPE)); - if (request instanceof HttpContent) { - // Offer automatically if the given request is als type of HttpContent - // See #1089 - offer((HttpContent) request); - } else { - undecodedChunk = buffer(); - parseBody(); + String contentTypeValue = this.request.headers().get(HttpHeaderNames.CONTENT_TYPE); + if (contentTypeValue == null) { + throw new ErrorDataDecoderException("No '" + HttpHeaderNames.CONTENT_TYPE + "' header present."); } - } - /** - * Set from the request ContentType the multipartDataBoundary and the possible charset. - */ - private void setMultipart(String contentType) { - String[] dataBoundary = HttpPostRequestDecoder.getMultipartDataBoundary(contentType); + String[] dataBoundary = HttpPostRequestDecoder.getMultipartDataBoundary(contentTypeValue); if (dataBoundary != null) { multipartDataBoundary = dataBoundary[0]; if (dataBoundary.length > 1 && dataBoundary[1] != null) { - charset = Charset.forName(dataBoundary[1]); + try { + this.charset = Charset.forName(dataBoundary[1]); + } catch (IllegalCharsetNameException e) { + throw new ErrorDataDecoderException(e); + } } } else { multipartDataBoundary = null; } currentStatus = MultiPartStatus.HEADERDELIMITER; + + try { + if (request instanceof HttpContent) { + // Offer automatically if the given request is als type of HttpContent + // See #1089 + offer((HttpContent) request); + } else { + parseBody(); + } + } catch (Throwable e) { + destroy(); + PlatformDependent.throwException(e); + } } private void checkDestroyed() { @@ -321,21 +334,34 @@ public InterfaceHttpData getBodyHttpData(String name) { public HttpPostMultipartRequestDecoder offer(HttpContent content) { checkDestroyed(); - // Maybe we should better not copy here for performance reasons but this will need - // more care by the caller to release the content in a correct manner later - // So maybe something to optimize on a later stage + if (content instanceof LastHttpContent) { + isLastChunk = true; + } + ByteBuf buf = content.content(); if (undecodedChunk == null) { - undecodedChunk = buf.copy(); + undecodedChunk = + // Since the Handler will release the incoming later on, we need to copy it + // + // We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity + // which is not really usable for us as we may exceed it once we add more bytes. + buf.alloc().buffer(buf.readableBytes()).writeBytes(buf); } else { undecodedChunk.writeBytes(buf); } - if (content instanceof LastHttpContent) { - isLastChunk = true; - } parseBody(); if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) { - undecodedChunk.discardReadBytes(); + if (undecodedChunk.refCnt() == 1) { + // It's safe to call discardBytes() as we are the only owner of the buffer. + undecodedChunk.discardReadBytes(); + } else { + // There seems to be multiple references of the buffer. Let's copy the data and release the buffer to + // ensure we can give back memory to the system. + ByteBuf buffer = undecodedChunk.alloc().buffer(undecodedChunk.readableBytes()); + buffer.writeBytes(undecodedChunk); + undecodedChunk.release(); + undecodedChunk = buffer; + } } return this; } @@ -420,7 +446,7 @@ protected void addHttpData(InterfaceHttpData data) { } List datas = bodyMapHttpData.get(data.getName()); if (datas == null) { - datas = new ArrayList(1); + datas = new ArrayList<>(1); bodyMapHttpData.put(data.getName(), datas); } datas.add(data); @@ -496,9 +522,7 @@ private InterfaceHttpData decodeMultipart(MultiPartStatus state) { if (charsetAttribute != null) { try { localCharset = Charset.forName(charsetAttribute.getValue()); - } catch (IOException e) { - throw new ErrorDataDecoderException(e); - } catch (UnsupportedCharsetException e) { + } catch (IOException | UnsupportedCharsetException e) { throw new ErrorDataDecoderException(e); } } @@ -523,11 +547,7 @@ private InterfaceHttpData decodeMultipart(MultiPartStatus state) { currentAttribute = factory.createAttribute(request, cleanString(nameAttribute.getValue())); } - } catch (NullPointerException e) { - throw new ErrorDataDecoderException(e); - } catch (IllegalArgumentException e) { - throw new ErrorDataDecoderException(e); - } catch (IOException e) { + } catch (NullPointerException | IOException | IllegalArgumentException e) { throw new ErrorDataDecoderException(e); } if (localCharset != null) { @@ -535,7 +555,7 @@ private InterfaceHttpData decodeMultipart(MultiPartStatus state) { } } // load data - if (!loadDataMultipart(undecodedChunk, multipartDataBoundary, currentAttribute)) { + if (!loadDataMultipartOptimized(undecodedChunk, multipartDataBoundary, currentAttribute)) { // Delimiter is not found. Need more chunks. return null; } @@ -574,8 +594,6 @@ private InterfaceHttpData decodeMultipart(MultiPartStatus state) { /** * Skip control Characters - * - * @throws NotEnoughDataDecoderException */ private static void skipControlCharacters(ByteBuf undecodedChunk) { if (!undecodedChunk.hasArray()) { @@ -617,7 +635,7 @@ private static void skipControlCharactersStandard(ByteBuf undecodedChunk) { * @param closeDelimiterStatus * the next getStatus if the delimiter is a close delimiter * @return the next InterfaceHttpData if any - * @throws ErrorDataDecoderException + * @throws ErrorDataDecoderException If no multipart delimiter is found, or an error occurs during decoding. */ private InterfaceHttpData findMultipartDelimiter(String delimiter, MultiPartStatus dispositionStatus, MultiPartStatus closeDelimiterStatus) { @@ -632,7 +650,7 @@ private InterfaceHttpData findMultipartDelimiter(String delimiter, MultiPartStat skipOneLine(); String newline; try { - newline = readDelimiter(undecodedChunk, delimiter); + newline = readDelimiterOptimized(undecodedChunk, delimiter, charset); } catch (NotEnoughDataDecoderException ignored) { undecodedChunk.readerIndex(readerIndex); return null; @@ -660,19 +678,18 @@ private InterfaceHttpData findMultipartDelimiter(String delimiter, MultiPartStat * Find the next Disposition * * @return the next InterfaceHttpData if any - * @throws ErrorDataDecoderException */ private InterfaceHttpData findMultipartDisposition() { int readerIndex = undecodedChunk.readerIndex(); if (currentStatus == MultiPartStatus.DISPOSITION) { - currentFieldAttributes = new TreeMap(CaseIgnoringComparator.INSTANCE); + currentFieldAttributes = new TreeMap<>(CaseIgnoringComparator.INSTANCE); } // read many lines until empty line with newline found! Store all data while (!skipOneLine()) { String newline; try { skipControlCharacters(undecodedChunk); - newline = readLine(undecodedChunk, charset); + newline = readLineOptimized(undecodedChunk, charset); } catch (NotEnoughDataDecoderException ignored) { undecodedChunk.readerIndex(readerIndex); return null; @@ -693,9 +710,7 @@ private InterfaceHttpData findMultipartDisposition() { Attribute attribute; try { attribute = getContentDispositionAttribute(values); - } catch (NullPointerException e) { - throw new ErrorDataDecoderException(e); - } catch (IllegalArgumentException e) { + } catch (NullPointerException | IllegalArgumentException e) { throw new ErrorDataDecoderException(e); } currentFieldAttributes.put(attribute.getName(), attribute); @@ -706,9 +721,7 @@ private InterfaceHttpData findMultipartDisposition() { try { attribute = factory.createAttribute(request, HttpHeaderNames.CONTENT_TRANSFER_ENCODING.toString(), cleanString(contents[1])); - } catch (NullPointerException e) { - throw new ErrorDataDecoderException(e); - } catch (IllegalArgumentException e) { + } catch (NullPointerException | IllegalArgumentException e) { throw new ErrorDataDecoderException(e); } @@ -718,9 +731,7 @@ private InterfaceHttpData findMultipartDisposition() { try { attribute = factory.createAttribute(request, HttpHeaderNames.CONTENT_LENGTH.toString(), cleanString(contents[1])); - } catch (NullPointerException e) { - throw new ErrorDataDecoderException(e); - } catch (IllegalArgumentException e) { + } catch (NullPointerException | IllegalArgumentException e) { throw new ErrorDataDecoderException(e); } @@ -744,9 +755,7 @@ private InterfaceHttpData findMultipartDisposition() { Attribute attribute; try { attribute = factory.createAttribute(request, charsetHeader, cleanString(values)); - } catch (NullPointerException e) { - throw new ErrorDataDecoderException(e); - } catch (IllegalArgumentException e) { + } catch (NullPointerException | IllegalArgumentException e) { throw new ErrorDataDecoderException(e); } currentFieldAttributes.put(HttpHeaderValues.CHARSET, attribute); @@ -755,17 +764,13 @@ private InterfaceHttpData findMultipartDisposition() { try { attribute = factory.createAttribute(request, cleanString(contents[0]), contents[i]); - } catch (NullPointerException e) { - throw new ErrorDataDecoderException(e); - } catch (IllegalArgumentException e) { + } catch (NullPointerException | IllegalArgumentException e) { throw new ErrorDataDecoderException(e); } currentFieldAttributes.put(attribute.getName(), attribute); } } } - } else { - throw new ErrorDataDecoderException("Unknown Params: " + newline); } } // Is it a FileUpload @@ -813,12 +818,10 @@ private Attribute getContentDispositionAttribute(String... values) { } else if (FILENAME_ENCODED.equals(name)) { try { name = HttpHeaderValues.FILENAME.toString(); - String[] split = value.split("'", 3); + String[] split = cleanString(value).split("'", 3); value = QueryStringDecoder.decodeComponent(split[2], Charset.forName(split[0])); - } catch (ArrayIndexOutOfBoundsException e) { + } catch (ArrayIndexOutOfBoundsException | UnsupportedCharsetException e) { throw new ErrorDataDecoderException(e); - } catch (UnsupportedCharsetException e) { - throw new ErrorDataDecoderException(e); } } else { // otherwise we need to clean the value @@ -833,7 +836,7 @@ private Attribute getContentDispositionAttribute(String... values) { * @param delimiter * the delimiter to use * @return the InterfaceHttpData if any - * @throws ErrorDataDecoderException + * @throws ErrorDataDecoderException If an error occurs when decoding the multipart data. */ protected InterfaceHttpData getFileUpload(String delimiter) { // eventually restart from existing FileUpload @@ -849,25 +852,27 @@ protected InterfaceHttpData getFileUpload(String delimiter) { } catch (IOException e) { throw new ErrorDataDecoderException(e); } - if (code.equals(HttpPostBodyUtil.TransferEncodingMechanism.BIT7.value())) { - localCharset = CharsetUtil.US_ASCII; - } else if (code.equals(HttpPostBodyUtil.TransferEncodingMechanism.BIT8.value())) { - localCharset = CharsetUtil.ISO_8859_1; - mechanism = TransferEncodingMechanism.BIT8; - } else if (code.equals(HttpPostBodyUtil.TransferEncodingMechanism.BINARY.value())) { - // no real charset, so let the default - mechanism = TransferEncodingMechanism.BINARY; - } else { - throw new ErrorDataDecoderException("TransferEncoding Unknown: " + code); + switch (code) { + case BIT_7_STRING: + localCharset = CharsetUtil.US_ASCII; + break; + case BIT_8_STRING: + localCharset = CharsetUtil.ISO_8859_1; + mechanism = TransferEncodingMechanism.BIT8; + break; + case BINARY_STRING: + // no real charset, so let the default + mechanism = TransferEncodingMechanism.BINARY; + break; + default: + throw new ErrorDataDecoderException("TransferEncoding Unknown: " + code); } } Attribute charsetAttribute = currentFieldAttributes.get(HttpHeaderValues.CHARSET); if (charsetAttribute != null) { try { localCharset = Charset.forName(charsetAttribute.getValue()); - } catch (IOException e) { - throw new ErrorDataDecoderException(e); - } catch (UnsupportedCharsetException e) { + } catch (IOException | UnsupportedCharsetException e) { throw new ErrorDataDecoderException(e); } } @@ -895,16 +900,12 @@ protected InterfaceHttpData getFileUpload(String delimiter) { cleanString(nameAttribute.getValue()), cleanString(filenameAttribute.getValue()), contentType, mechanism.value(), localCharset, size); - } catch (NullPointerException e) { - throw new ErrorDataDecoderException(e); - } catch (IllegalArgumentException e) { - throw new ErrorDataDecoderException(e); - } catch (IOException e) { + } catch (NullPointerException | IllegalArgumentException | IOException e) { throw new ErrorDataDecoderException(e); } } // load data as much as possible - if (!loadDataMultipart(undecodedChunk, delimiter, currentFileUpload)) { + if (!loadDataMultipartOptimized(undecodedChunk, delimiter, currentFileUpload)) { // Delimiter is not found. Need more chunks. return null; } @@ -933,19 +934,22 @@ protected InterfaceHttpData getFileUpload(String delimiter) { */ @Override public void destroy() { - checkDestroyed(); + // Release all data items, including those not yet pulled, only file based items cleanFiles(); + // Clean Memory based data + for (InterfaceHttpData httpData : bodyListHttpData) { + // Might have been already released by the user + if (httpData.refCnt() > 0) { + httpData.release(); + } + } + destroyed = true; if (undecodedChunk != null && undecodedChunk.refCnt() > 0) { undecodedChunk.release(); undecodedChunk = null; } - - // release all data which was not yet pulled - for (int i = bodyListHttpDataRank; i < bodyListHttpData.size(); i++) { - bodyListHttpData.get(i).release(); - } } /** @@ -988,76 +992,27 @@ private void cleanMixedAttributes() { * Need more chunks and reset the {@code readerIndex} to the previous * value */ - private static String readLineStandard(ByteBuf undecodedChunk, Charset charset) { + private static String readLineOptimized(ByteBuf undecodedChunk, Charset charset) { int readerIndex = undecodedChunk.readerIndex(); + ByteBuf line = null; try { - ByteBuf line = buffer(64); - - while (undecodedChunk.isReadable()) { - byte nextByte = undecodedChunk.readByte(); - if (nextByte == HttpConstants.CR) { - // check but do not changed readerIndex - nextByte = undecodedChunk.getByte(undecodedChunk.readerIndex()); - if (nextByte == HttpConstants.LF) { - // force read - undecodedChunk.readByte(); - return line.toString(charset); - } else { - // Write CR (not followed by LF) - line.writeByte(HttpConstants.CR); - } - } else if (nextByte == HttpConstants.LF) { - return line.toString(charset); - } else { - line.writeByte(nextByte); + if (undecodedChunk.isReadable()) { + int posLfOrCrLf = HttpPostBodyUtil.findLineBreak(undecodedChunk, undecodedChunk.readerIndex()); + if (posLfOrCrLf <= 0) { + throw new NotEnoughDataDecoderException(); } - } - } catch (IndexOutOfBoundsException e) { - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(e); - } - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - - /** - * Read one line up to the CRLF or LF - * - * @return the String from one line - * @throws NotEnoughDataDecoderException - * Need more chunks and reset the {@code readerIndex} to the previous - * value - */ - private static String readLine(ByteBuf undecodedChunk, Charset charset) { - if (!undecodedChunk.hasArray()) { - return readLineStandard(undecodedChunk, charset); - } - SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk); - int readerIndex = undecodedChunk.readerIndex(); - try { - ByteBuf line = buffer(64); + try { + line = undecodedChunk.alloc().heapBuffer(posLfOrCrLf); + line.writeBytes(undecodedChunk, posLfOrCrLf); - while (sao.pos < sao.limit) { - byte nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.CR) { - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return line.toString(charset); - } else { - // Write CR (not followed by LF) - sao.pos--; - line.writeByte(HttpConstants.CR); - } - } else { - line.writeByte(nextByte); + byte nextByte = undecodedChunk.readByte(); + if (nextByte == HttpConstants.CR) { + // force read next byte since LF is the following one + undecodedChunk.readByte(); } - } else if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); return line.toString(charset); - } else { - line.writeByte(nextByte); + } finally { + line.release(); } } } catch (IndexOutOfBoundsException e) { @@ -1083,23 +1038,19 @@ private static String readLine(ByteBuf undecodedChunk, Charset charset) { * Need more chunks and reset the {@code readerIndex} to the previous * value */ - private static String readDelimiterStandard(ByteBuf undecodedChunk, String delimiter) { - int readerIndex = undecodedChunk.readerIndex(); + private static String readDelimiterOptimized(ByteBuf undecodedChunk, String delimiter, Charset charset) { + final int readerIndex = undecodedChunk.readerIndex(); + final byte[] bdelimiter = delimiter.getBytes(charset); + final int delimiterLength = bdelimiter.length; try { - StringBuilder sb = new StringBuilder(64); - int delimiterPos = 0; - int len = delimiter.length(); - while (undecodedChunk.isReadable() && delimiterPos < len) { - byte nextByte = undecodedChunk.readByte(); - if (nextByte == delimiter.charAt(delimiterPos)) { - delimiterPos++; - sb.append((char) nextByte); - } else { - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } + int delimiterPos = HttpPostBodyUtil.findDelimiter(undecodedChunk, readerIndex, bdelimiter, false); + if (delimiterPos < 0) { + // delimiter not found so break here ! + undecodedChunk.readerIndex(readerIndex); + throw new NotEnoughDataDecoderException(); } + StringBuilder sb = new StringBuilder(delimiter); + undecodedChunk.readerIndex(readerIndex + delimiterPos + delimiterLength); // Now check if either opening delimiter or closing delimiter if (undecodedChunk.isReadable()) { byte nextByte = undecodedChunk.readByte(); @@ -1164,220 +1115,90 @@ private static String readDelimiterStandard(ByteBuf undecodedChunk, String delim } /** - * Read one line up to --delimiter or --delimiter-- and if existing the CRLF - * or LF. Note that CRLF or LF are mandatory for opening delimiter - * (--delimiter) but not for closing delimiter (--delimiter--) since some - * clients does not include CRLF in this case. + * Rewrite buffer in order to skip lengthToSkip bytes from current readerIndex, + * such that any readable bytes available after readerIndex + lengthToSkip (so before writerIndex) + * are moved at readerIndex position, + * therefore decreasing writerIndex of lengthToSkip at the end of the process. * - * @param delimiter - * of the form --string, such that '--' is already included - * @return the String from one line as the delimiter searched (opening or - * closing) - * @throws NotEnoughDataDecoderException - * Need more chunks and reset the readerInder to the previous - * value + * @param buffer the buffer to rewrite from current readerIndex + * @param lengthToSkip the size to skip from readerIndex */ - private static String readDelimiter(ByteBuf undecodedChunk, String delimiter) { - if (!undecodedChunk.hasArray()) { - return readDelimiterStandard(undecodedChunk, delimiter); + private static void rewriteCurrentBuffer(ByteBuf buffer, int lengthToSkip) { + if (lengthToSkip == 0) { + return; } - SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk); - int readerIndex = undecodedChunk.readerIndex(); - int delimiterPos = 0; - int len = delimiter.length(); - try { - StringBuilder sb = new StringBuilder(64); - // check conformity with delimiter - while (sao.pos < sao.limit && delimiterPos < len) { - byte nextByte = sao.bytes[sao.pos++]; - if (nextByte == delimiter.charAt(delimiterPos)) { - delimiterPos++; - sb.append((char) nextByte); - } else { - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } - // Now check if either opening delimiter or closing delimiter - if (sao.pos < sao.limit) { - byte nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.CR) { - // first check for opening delimiter - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return sb.toString(); - } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else { - // error since CR must be followed by LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else if (nextByte == HttpConstants.LF) { - // same first check for opening delimiter where LF used with - // no CR - sao.setReadPosition(0); - return sb.toString(); - } else if (nextByte == '-') { - sb.append('-'); - // second check for closing delimiter - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == '-') { - sb.append('-'); - // now try to find if CRLF or LF there - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.CR) { - if (sao.pos < sao.limit) { - nextByte = sao.bytes[sao.pos++]; - if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return sb.toString(); - } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else { - // error CR without LF - // delimiter not found so break here ! - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); - } - } else if (nextByte == HttpConstants.LF) { - sao.setReadPosition(0); - return sb.toString(); - } else { - // No CRLF but ok however (Adobe Flash - // uploader) - // minus 1 since we read one char ahead but - // should not - sao.setReadPosition(1); - return sb.toString(); - } - } - // FIXME what do we do here? - // either considering it is fine, either waiting for - // more data to come? - // lets try considering it is fine... - sao.setReadPosition(0); - return sb.toString(); - } - // whatever now => error since incomplete - // only one '-' => not enough or whatever not enough - // element - } - } - } - } catch (IndexOutOfBoundsException e) { - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(e); + final int readerIndex = buffer.readerIndex(); + final int readableBytes = buffer.readableBytes(); + if (readableBytes == lengthToSkip) { + buffer.readerIndex(readerIndex); + buffer.writerIndex(readerIndex); + return; } - undecodedChunk.readerIndex(readerIndex); - throw new NotEnoughDataDecoderException(); + buffer.setBytes(readerIndex, buffer, readerIndex + lengthToSkip, readableBytes - lengthToSkip); + buffer.readerIndex(readerIndex); + buffer.writerIndex(readerIndex + readableBytes - lengthToSkip); } /** * Load the field value or file data from a Multipart request * * @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks - * @throws ErrorDataDecoderException - */ - private static boolean loadDataMultipartStandard(ByteBuf undecodedChunk, String delimiter, HttpData httpData) { - final int startReaderIndex = undecodedChunk.readerIndex(); - final int delimeterLength = delimiter.length(); - int index = 0; - int lastPosition = startReaderIndex; - byte prevByte = HttpConstants.LF; - boolean delimiterFound = false; - while (undecodedChunk.isReadable()) { - final byte nextByte = undecodedChunk.readByte(); - // Check the delimiter - if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) { - index++; - if (delimeterLength == index) { - delimiterFound = true; - break; - } - continue; - } - lastPosition = undecodedChunk.readerIndex(); - if (nextByte == HttpConstants.LF) { - index = 0; - lastPosition -= (prevByte == HttpConstants.CR)? 2 : 1; - } - prevByte = nextByte; - } - if (prevByte == HttpConstants.CR) { - lastPosition--; - } - ByteBuf content = undecodedChunk.copy(startReaderIndex, lastPosition - startReaderIndex); - try { - httpData.addContent(content, delimiterFound); - } catch (IOException e) { - throw new ErrorDataDecoderException(e); - } - undecodedChunk.readerIndex(lastPosition); - return delimiterFound; - } - - /** - * Load the field value from a Multipart request - * - * @return {@code true} if the last chunk is loaded (boundary delimiter found), {@code false} if need more chunks - * @throws ErrorDataDecoderException */ - private static boolean loadDataMultipart(ByteBuf undecodedChunk, String delimiter, HttpData httpData) { - if (!undecodedChunk.hasArray()) { - return loadDataMultipartStandard(undecodedChunk, delimiter, httpData); + private static boolean loadDataMultipartOptimized(ByteBuf undecodedChunk, String delimiter, HttpData httpData) { + if (!undecodedChunk.isReadable()) { + return false; } - final SeekAheadOptimize sao = new SeekAheadOptimize(undecodedChunk); final int startReaderIndex = undecodedChunk.readerIndex(); - final int delimeterLength = delimiter.length(); - int index = 0; - int lastRealPos = sao.pos; - byte prevByte = HttpConstants.LF; - boolean delimiterFound = false; - while (sao.pos < sao.limit) { - final byte nextByte = sao.bytes[sao.pos++]; - // Check the delimiter - if (prevByte == HttpConstants.LF && nextByte == delimiter.codePointAt(index)) { - index++; - if (delimeterLength == index) { - delimiterFound = true; - break; + final byte[] bdelimiter = delimiter.getBytes(httpData.getCharset()); + int posDelimiter = HttpPostBodyUtil.findDelimiter(undecodedChunk, startReaderIndex, bdelimiter, true); + if (posDelimiter < 0) { + // Not found but however perhaps because incomplete so search LF or CRLF from the end. + // Possible last bytes contain partially delimiter + // (delimiter is possibly partially there, at least 1 missing byte), + // therefore searching last delimiter.length +1 (+1 for CRLF instead of LF) + int lastPosition = undecodedChunk.readableBytes() - bdelimiter.length - 1; + if (lastPosition < 0) { + // Not enough bytes, but at most delimiter.length bytes available so can still try to find CRLF there + lastPosition = 0; + } + posDelimiter = HttpPostBodyUtil.findLastLineBreak(undecodedChunk, startReaderIndex + lastPosition); + if (posDelimiter < 0) { + // not found so this chunk can be fully added + ByteBuf content = undecodedChunk.copy(); + try { + httpData.addContent(content, false); + } catch (IOException e) { + throw new ErrorDataDecoderException(e); } - continue; + undecodedChunk.readerIndex(startReaderIndex); + undecodedChunk.writerIndex(startReaderIndex); + return false; } - lastRealPos = sao.pos; - if (nextByte == HttpConstants.LF) { - index = 0; - lastRealPos -= (prevByte == HttpConstants.CR)? 2 : 1; + // posDelimiter is not from startReaderIndex but from startReaderIndex + lastPosition + posDelimiter += lastPosition; + if (posDelimiter == 0) { + // Nothing to add + return false; } - prevByte = nextByte; - } - if (prevByte == HttpConstants.CR) { - lastRealPos--; + // Not fully but still some bytes to provide: httpData is not yet finished since delimiter not found + ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter); + try { + httpData.addContent(content, false); + } catch (IOException e) { + throw new ErrorDataDecoderException(e); + } + rewriteCurrentBuffer(undecodedChunk, posDelimiter); + return false; } - final int lastPosition = sao.getReadPosition(lastRealPos); - final ByteBuf content = undecodedChunk.copy(startReaderIndex, lastPosition - startReaderIndex); + // Delimiter found at posDelimiter, including LF or CRLF, so httpData has its last chunk + ByteBuf content = undecodedChunk.copy(startReaderIndex, posDelimiter); try { - httpData.addContent(content, delimiterFound); + httpData.addContent(content, true); } catch (IOException e) { throw new ErrorDataDecoderException(e); } - undecodedChunk.readerIndex(lastPosition); - return delimiterFound; + rewriteCurrentBuffer(undecodedChunk, posDelimiter); + return true; } /** @@ -1445,7 +1266,7 @@ private boolean skipOneLine() { * follows by several values that were separated by ';' or ',' */ private static String[] splitMultipartHeader(String sb) { - ArrayList headers = new ArrayList(1); + ArrayList headers = new ArrayList<>(1); int nameStart; int nameEnd; int colonEnd; @@ -1515,6 +1336,17 @@ private static String[] splitMultipartHeaderValues(String svalue) { } } values.add(svalue.substring(start)); - return values.toArray(new String[values.size()]); + return values.toArray(new String[0]); + } + + /** + * This method is package private intentionally in order to allow during tests + * to access to the amount of memory allocated (capacity) within the private + * ByteBuf undecodedChunk + * + * @return the number of bytes the internal buffer can contain + */ + int getCurrentAllocatedCapacity() { + return undecodedChunk.capacity(); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoder.java index 0c106264063..7f6a681d511 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http.multipart; +import static java.util.Objects.requireNonNull; + import io.netty.handler.codec.DecoderException; import io.netty.handler.codec.http.HttpConstants; import io.netty.handler.codec.http.HttpContent; @@ -83,15 +85,9 @@ public HttpPostRequestDecoder(HttpDataFactory factory, HttpRequest request) { * errors */ public HttpPostRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset) { - if (factory == null) { - throw new NullPointerException("factory"); - } - if (request == null) { - throw new NullPointerException("request"); - } - if (charset == null) { - throw new NullPointerException("charset"); - } + requireNonNull(factory, "factory"); + requireNonNull(request, "request"); + requireNonNull(charset, "charset"); // Fill default values if (isMultipart(request)) { decoder = new HttpPostMultipartRequestDecoder(factory, request, charset); @@ -140,11 +136,11 @@ protected enum MultiPartStatus { * @return True if the request is a Multipart request */ public static boolean isMultipart(HttpRequest request) { - if (request.headers().contains(HttpHeaderNames.CONTENT_TYPE)) { - return getMultipartDataBoundary(request.headers().get(HttpHeaderNames.CONTENT_TYPE)) != null; - } else { - return false; + String mimeType = request.headers().get(HttpHeaderNames.CONTENT_TYPE); + if (mimeType != null && mimeType.startsWith(HttpHeaderValues.MULTIPART_FORM_DATA.toString())) { + return getMultipartDataBoundary(mimeType) != null; } + return false; } /** diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java index a56431e2226..425718714b0 100755 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -34,7 +34,6 @@ import io.netty.handler.codec.http.HttpVersion; import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.stream.ChunkedInput; -import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.StringUtil; import java.io.File; @@ -46,11 +45,12 @@ import java.util.List; import java.util.ListIterator; import java.util.Map; +import java.util.concurrent.ThreadLocalRandom; import java.util.regex.Pattern; import static io.netty.buffer.Unpooled.wrappedBuffer; -import static io.netty.util.internal.ObjectUtil.checkNotNull; import static java.util.AbstractMap.SimpleImmutableEntry; +import static java.util.Objects.requireNonNull; /** * This encoder will help to encode Request for a FORM as POST. @@ -88,18 +88,19 @@ public enum EncoderMode { * will be treated as distinct fields. * * Reference: - * http://www.w3.org/TR/html5/forms.html#multipart-form-data + * https://www.w3.org/TR/html5/forms.html#multipart-form-data */ HTML5 } + @SuppressWarnings("rawtypes") private static final Map.Entry[] percentEncodings; static { percentEncodings = new Map.Entry[] { - new SimpleImmutableEntry(Pattern.compile("\\*"), "%2A"), - new SimpleImmutableEntry(Pattern.compile("\\+"), "%20"), - new SimpleImmutableEntry(Pattern.compile("~"), "%7E") + new SimpleImmutableEntry<>(Pattern.compile("\\*"), "%2A"), + new SimpleImmutableEntry<>(Pattern.compile("\\+"), "%20"), + new SimpleImmutableEntry<>(Pattern.compile("~"), "%7E") }; } @@ -208,19 +209,19 @@ public HttpPostRequestEncoder( HttpDataFactory factory, HttpRequest request, boolean multipart, Charset charset, EncoderMode encoderMode) throws ErrorDataEncoderException { - this.request = checkNotNull(request, "request"); - this.charset = checkNotNull(charset, "charset"); - this.factory = checkNotNull(factory, "factory"); + this.request = requireNonNull(request, "request"); + this.charset = requireNonNull(charset, "charset"); + this.factory = requireNonNull(factory, "factory"); if (HttpMethod.TRACE.equals(request.method())) { throw new ErrorDataEncoderException("Cannot create a Encoder if request is a TRACE"); } // Fill default values - bodyListDatas = new ArrayList(); + bodyListDatas = new ArrayList<>(); // default mode isLastChunk = false; isLastChunkSent = false; isMultipart = multipart; - multipartHttpDatas = new ArrayList(); + multipartHttpDatas = new ArrayList<>(); this.encoderMode = encoderMode; if (isMultipart) { initDataMultipart(); @@ -288,7 +289,7 @@ private void initMixedMultipart() { */ private static String getNewMultipartDelimiter() { // construct a generated delimiter - return Long.toHexString(PlatformDependent.threadLocalRandom().nextLong()); + return Long.toHexString(ThreadLocalRandom.current().nextLong()); } /** @@ -309,9 +310,7 @@ public List getBodyListAttributes() { * if the encoding is in error or if the finalize were already done */ public void setBodyHttpDatas(List datas) throws ErrorDataEncoderException { - if (datas == null) { - throw new NullPointerException("datas"); - } + requireNonNull(datas, "datas"); globalBodySize = 0; bodyListDatas.clear(); currentFileUpload = null; @@ -336,7 +335,7 @@ public void setBodyHttpDatas(List datas) throws ErrorDataEnco */ public void addBodyAttribute(String name, String value) throws ErrorDataEncoderException { String svalue = value != null? value : StringUtil.EMPTY_STRING; - Attribute data = factory.createAttribute(request, checkNotNull(name, "name"), svalue); + Attribute data = factory.createAttribute(request, requireNonNull(name, "name"), svalue); addBodyHttpData(data); } @@ -382,8 +381,8 @@ public void addBodyFileUpload(String name, File file, String contentType, boolea */ public void addBodyFileUpload(String name, String filename, File file, String contentType, boolean isText) throws ErrorDataEncoderException { - checkNotNull(name, "name"); - checkNotNull(file, "file"); + requireNonNull(name, "name"); + requireNonNull(file, "file"); if (filename == null) { filename = StringUtil.EMPTY_STRING; } @@ -447,7 +446,7 @@ public void addBodyHttpData(InterfaceHttpData data) throws ErrorDataEncoderExcep if (headerFinalized) { throw new ErrorDataEncoderException("Cannot add value once finalized"); } - bodyListDatas.add(checkNotNull(data, "data")); + bodyListDatas.add(requireNonNull(data, "data")); if (!isMultipart) { if (data instanceof Attribute) { Attribute attribute = (Attribute) data; @@ -637,7 +636,7 @@ public void addBodyHttpData(InterfaceHttpData data) throws ErrorDataEncoderExcep replacement.append("; ") .append(HttpHeaderValues.FILENAME) .append("=\"") - .append(fileUpload.getFilename()) + .append(currentFileUpload.getFilename()) .append('"'); } @@ -779,12 +778,11 @@ public HttpRequest finalizeRequest() throws ErrorDataEncoderException { } // Now consider size for chunk or not long realSize = globalBodySize; - if (isMultipart) { - iterator = multipartHttpDatas.listIterator(); - } else { + if (!isMultipart) { realSize -= 1; // last '&' removed - iterator = multipartHttpDatas.listIterator(); } + iterator = multipartHttpDatas.listIterator(); + headers.set(HttpHeaderNames.CONTENT_LENGTH, String.valueOf(realSize)); if (realSize > HttpPostBodyUtil.chunkSize || isMultipart) { isChunked = true; @@ -867,7 +865,7 @@ private String encodeAttribute(String s, Charset charset) throws ErrorDataEncode /** * - * @return the next ByteBuf to send as a HttpChunk and modifying currentBuffer accordingly + * @return the next ByteBuf to send as an HttpChunk and modifying currentBuffer accordingly */ private ByteBuf fillByteBuf() { int length = currentBuffer.readableBytes(); @@ -944,17 +942,15 @@ private HttpContent encodeNextChunkUrlEncoded(int sizeleft) throws ErrorDataEnco // Set name= if (isKey) { String key = currentData.getName(); - buffer = wrappedBuffer(key.getBytes()); + buffer = wrappedBuffer(key.getBytes(charset)); isKey = false; if (currentBuffer == null) { - currentBuffer = wrappedBuffer(buffer, wrappedBuffer("=".getBytes())); - // continue - size -= buffer.readableBytes() + 1; + currentBuffer = wrappedBuffer(buffer, wrappedBuffer("=".getBytes(charset))); } else { - currentBuffer = wrappedBuffer(currentBuffer, buffer, wrappedBuffer("=".getBytes())); - // continue - size -= buffer.readableBytes() + 1; + currentBuffer = wrappedBuffer(currentBuffer, buffer, wrappedBuffer("=".getBytes(charset))); } + // continue + size -= buffer.readableBytes() + 1; if (currentBuffer.readableBytes() >= HttpPostBodyUtil.chunkSize) { buffer = fillByteBuf(); return new DefaultHttpContent(buffer); @@ -972,14 +968,18 @@ private HttpContent encodeNextChunkUrlEncoded(int sizeleft) throws ErrorDataEnco ByteBuf delimiter = null; if (buffer.readableBytes() < size) { isKey = true; - delimiter = iterator.hasNext() ? wrappedBuffer("&".getBytes()) : null; + delimiter = iterator.hasNext() ? wrappedBuffer("&".getBytes(charset)) : null; } // End for current InterfaceHttpData, need potentially more data if (buffer.capacity() == 0) { currentData = null; if (currentBuffer == null) { - currentBuffer = delimiter; + if (delimiter == null) { + return null; + } else { + currentBuffer = delimiter; + } } else { if (delimiter != null) { currentBuffer = wrappedBuffer(currentBuffer, delimiter); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java index 1b209d6569d..1ee3115214d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,6 +16,7 @@ package io.netty.handler.codec.http.multipart; import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; import io.netty.handler.codec.http.HttpConstants; import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.HttpRequest; @@ -26,6 +27,8 @@ import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.ErrorDataDecoderException; import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.MultiPartStatus; import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.NotEnoughDataDecoderException; +import io.netty.util.ByteProcessor; +import io.netty.util.internal.StringUtil; import java.io.IOException; import java.nio.charset.Charset; @@ -34,8 +37,9 @@ import java.util.Map; import java.util.TreeMap; -import static io.netty.buffer.Unpooled.*; -import static io.netty.util.internal.ObjectUtil.*; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import static java.util.Objects.requireNonNull; + /** * This decoder will decode Body and can handle POST BODY. @@ -68,12 +72,12 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD /** * HttpDatas from Body */ - private final List bodyListHttpData = new ArrayList(); + private final List bodyListHttpData = new ArrayList<>(); /** * HttpDatas as Map from Body */ - private final Map> bodyMapHttpData = new TreeMap>( + private final Map> bodyMapHttpData = new TreeMap<>( CaseIgnoringComparator.INSTANCE); /** @@ -145,16 +149,20 @@ public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest reque * errors */ public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset) { - this.request = checkNotNull(request, "request"); - this.charset = checkNotNull(charset, "charset"); - this.factory = checkNotNull(factory, "factory"); - if (request instanceof HttpContent) { - // Offer automatically if the given request is als type of HttpContent - // See #1089 - offer((HttpContent) request); - } else { - undecodedChunk = buffer(); - parseBody(); + this.request = requireNonNull(request, "request"); + this.charset = requireNonNull(charset, "charset"); + this.factory = requireNonNull(factory, "factory"); + try { + if (request instanceof HttpContent) { + // Offer automatically if the given request is as type of HttpContent + // See #1089 + offer((HttpContent) request); + } else { + parseBody(); + } + } catch (Throwable e) { + destroy(); + throw e; } } @@ -274,21 +282,34 @@ public InterfaceHttpData getBodyHttpData(String name) { public HttpPostStandardRequestDecoder offer(HttpContent content) { checkDestroyed(); - // Maybe we should better not copy here for performance reasons but this will need - // more care by the caller to release the content in a correct manner later - // So maybe something to optimize on a later stage + if (content instanceof LastHttpContent) { + isLastChunk = true; + } + ByteBuf buf = content.content(); if (undecodedChunk == null) { - undecodedChunk = buf.copy(); + undecodedChunk = + // Since the Handler will release the incoming later on, we need to copy it + // + // We are explicit allocate a buffer and NOT calling copy() as otherwise it may set a maxCapacity + // which is not really usable for us as we may exceed it once we add more bytes. + buf.alloc().buffer(buf.readableBytes()).writeBytes(buf); } else { undecodedChunk.writeBytes(buf); } - if (content instanceof LastHttpContent) { - isLastChunk = true; - } parseBody(); if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) { - undecodedChunk.discardReadBytes(); + if (undecodedChunk.refCnt() == 1) { + // It's safe to call discardBytes() as we are the only owner of the buffer. + undecodedChunk.discardReadBytes(); + } else { + // There seems to be multiple references of the buffer. Let's copy the data and release the buffer to + // ensure we can give back memory to the system. + ByteBuf buffer = undecodedChunk.alloc().buffer(undecodedChunk.readableBytes()); + buffer.writeBytes(undecodedChunk); + undecodedChunk.release(); + undecodedChunk = buffer; + } } return this; } @@ -367,11 +388,8 @@ protected void addHttpData(InterfaceHttpData data) { if (data == null) { return; } - List datas = bodyMapHttpData.get(data.getName()); - if (datas == null) { - datas = new ArrayList(1); - bodyMapHttpData.put(data.getName(), datas); - } + List datas = bodyMapHttpData.computeIfAbsent( + data.getName(), k -> new ArrayList<>(1)); datas.add(data); bodyListHttpData.add(data); } @@ -423,7 +441,7 @@ private void parseBodyAttributesStandard() { if (read == '&') { currentStatus = MultiPartStatus.DISPOSITION; ampersandpos = currentpos - 1; - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = true; } else if (read == HttpConstants.CR) { @@ -433,7 +451,7 @@ private void parseBodyAttributesStandard() { if (read == HttpConstants.LF) { currentStatus = MultiPartStatus.PREEPILOGUE; ampersandpos = currentpos - 2; - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = false; } else { @@ -446,7 +464,7 @@ private void parseBodyAttributesStandard() { } else if (read == HttpConstants.LF) { currentStatus = MultiPartStatus.PREEPILOGUE; ampersandpos = currentpos - 1; - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = false; } @@ -460,32 +478,24 @@ private void parseBodyAttributesStandard() { // special case ampersandpos = currentpos; if (ampersandpos > firstpos) { - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); } else if (!currentAttribute.isCompleted()) { - setFinalBuffer(EMPTY_BUFFER); + setFinalBuffer(Unpooled.EMPTY_BUFFER); } firstpos = currentpos; currentStatus = MultiPartStatus.EPILOGUE; - undecodedChunk.readerIndex(firstpos); - return; - } - if (contRead && currentAttribute != null) { + } else if (contRead && currentAttribute != null && currentStatus == MultiPartStatus.FIELD) { // reset index except if to continue in case of FIELD getStatus - if (currentStatus == MultiPartStatus.FIELD) { - currentAttribute.addContent(undecodedChunk.copy(firstpos, currentpos - firstpos), - false); - firstpos = currentpos; - } - undecodedChunk.readerIndex(firstpos); - } else { - // end of line or end of block so keep index to last valid position - undecodedChunk.readerIndex(firstpos); + currentAttribute.addContent(undecodedChunk.retainedSlice(firstpos, currentpos - firstpos), + false); + firstpos = currentpos; } + undecodedChunk.readerIndex(firstpos); } catch (ErrorDataDecoderException e) { // error while decoding undecodedChunk.readerIndex(firstpos); throw e; - } catch (IOException e) { + } catch (IOException | IllegalArgumentException e) { // error while decoding undecodedChunk.readerIndex(firstpos); throw new ErrorDataDecoderException(e); @@ -501,6 +511,9 @@ private void parseBodyAttributesStandard() { * errors */ private void parseBodyAttributes() { + if (undecodedChunk == null) { + return; + } if (!undecodedChunk.hasArray()) { parseBodyAttributesStandard(); return; @@ -544,7 +557,7 @@ private void parseBodyAttributes() { if (read == '&') { currentStatus = MultiPartStatus.DISPOSITION; ampersandpos = currentpos - 1; - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = true; } else if (read == HttpConstants.CR) { @@ -555,7 +568,7 @@ private void parseBodyAttributes() { currentStatus = MultiPartStatus.PREEPILOGUE; ampersandpos = currentpos - 2; sao.setReadPosition(0); - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = false; break loop; @@ -573,7 +586,7 @@ private void parseBodyAttributes() { currentStatus = MultiPartStatus.PREEPILOGUE; ampersandpos = currentpos - 1; sao.setReadPosition(0); - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); firstpos = currentpos; contRead = false; break loop; @@ -590,36 +603,24 @@ private void parseBodyAttributes() { // special case ampersandpos = currentpos; if (ampersandpos > firstpos) { - setFinalBuffer(undecodedChunk.copy(firstpos, ampersandpos - firstpos)); + setFinalBuffer(undecodedChunk.retainedSlice(firstpos, ampersandpos - firstpos)); } else if (!currentAttribute.isCompleted()) { - setFinalBuffer(EMPTY_BUFFER); + setFinalBuffer(Unpooled.EMPTY_BUFFER); } firstpos = currentpos; currentStatus = MultiPartStatus.EPILOGUE; - undecodedChunk.readerIndex(firstpos); - return; - } - if (contRead && currentAttribute != null) { + } else if (contRead && currentAttribute != null && currentStatus == MultiPartStatus.FIELD) { // reset index except if to continue in case of FIELD getStatus - if (currentStatus == MultiPartStatus.FIELD) { - currentAttribute.addContent(undecodedChunk.copy(firstpos, currentpos - firstpos), - false); - firstpos = currentpos; - } - undecodedChunk.readerIndex(firstpos); - } else { - // end of line or end of block so keep index to last valid position - undecodedChunk.readerIndex(firstpos); + currentAttribute.addContent(undecodedChunk.retainedSlice(firstpos, currentpos - firstpos), + false); + firstpos = currentpos; } + undecodedChunk.readerIndex(firstpos); } catch (ErrorDataDecoderException e) { // error while decoding undecodedChunk.readerIndex(firstpos); throw e; - } catch (IOException e) { - // error while decoding - undecodedChunk.readerIndex(firstpos); - throw new ErrorDataDecoderException(e); - } catch (IllegalArgumentException e) { + } catch (IOException | IllegalArgumentException e) { // error while decoding undecodedChunk.readerIndex(firstpos); throw new ErrorDataDecoderException(e); @@ -628,8 +629,10 @@ private void parseBodyAttributes() { private void setFinalBuffer(ByteBuf buffer) throws IOException { currentAttribute.addContent(buffer, true); - String value = decodeAttribute(currentAttribute.getByteBuf().toString(charset), charset); - currentAttribute.setValue(value); + ByteBuf decodedBuf = decodeAttribute(currentAttribute.getByteBuf(), charset); + if (decodedBuf != null) { // override content only when ByteBuf needed decoding + currentAttribute.setContent(decodedBuf); + } addHttpData(currentAttribute); currentAttribute = null; } @@ -647,14 +650,43 @@ private static String decodeAttribute(String s, Charset charset) { } } + private static ByteBuf decodeAttribute(ByteBuf b, Charset charset) { + int firstEscaped = b.forEachByte(new UrlEncodedDetector()); + if (firstEscaped == -1) { + return null; // nothing to decode + } + + ByteBuf buf = b.alloc().buffer(b.readableBytes()); + UrlDecoder urlDecode = new UrlDecoder(buf); + int idx = b.forEachByte(urlDecode); + if (urlDecode.nextEscapedIdx != 0) { // incomplete hex byte + if (idx == -1) { + idx = b.readableBytes() - 1; + } + idx -= urlDecode.nextEscapedIdx - 1; + buf.release(); + throw new ErrorDataDecoderException( + String.format("Invalid hex byte at index '%d' in string: '%s'", idx, b.toString(charset))); + } + + return buf; + } + /** * Destroy the {@link HttpPostStandardRequestDecoder} and release all it resources. After this method * was called it is not possible to operate on it anymore. */ @Override public void destroy() { - // Release all data items, including those not yet pulled + // Release all data items, including those not yet pulled, only file based items cleanFiles(); + // Clean Memory based data + for (InterfaceHttpData httpData : bodyListHttpData) { + // Might have been already released by the user + if (httpData.refCnt() > 0) { + httpData.release(); + } + } destroyed = true; @@ -683,4 +715,48 @@ public void removeHttpDataFromClean(InterfaceHttpData data) { factory.removeHttpDataFromClean(request, data); } + + private static final class UrlEncodedDetector implements ByteProcessor { + @Override + public boolean process(byte value) { + return value != '%' && value != '+'; + } + } + + private static final class UrlDecoder implements ByteProcessor { + + private final ByteBuf output; + private int nextEscapedIdx; + private byte hiByte; + + UrlDecoder(ByteBuf output) { + this.output = output; + } + + @Override + public boolean process(byte value) { + if (nextEscapedIdx != 0) { + if (nextEscapedIdx == 1) { + hiByte = value; + ++nextEscapedIdx; + } else { + int hi = StringUtil.decodeHexNibble((char) hiByte); + int lo = StringUtil.decodeHexNibble((char) value); + if (hi == -1 || lo == -1) { + ++nextEscapedIdx; + return false; + } + output.writeByte((hi << 4) + lo); + nextEscapedIdx = 0; + } + } else if (value == '%') { + nextEscapedIdx = 1; + } else if (value == '+') { + output.writeByte(' '); + } else { + output.writeByte(value); + } + return true; + } + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InterfaceHttpData.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InterfaceHttpData.java index 2b9cb61e965..8c153294ac4 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InterfaceHttpData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InterfaceHttpData.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InterfaceHttpPostRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InterfaceHttpPostRequestDecoder.java index abfebc256c8..21ac13cf064 100755 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InterfaceHttpPostRequestDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InterfaceHttpPostRequestDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InternalAttribute.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InternalAttribute.java index 991100ed20c..731c9028083 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InternalAttribute.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/InternalAttribute.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http.multipart; +import static java.util.Objects.requireNonNull; + import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.util.AbstractReferenceCounted; @@ -28,7 +30,7 @@ * (like Multipart Mixed mode) */ final class InternalAttribute extends AbstractReferenceCounted implements InterfaceHttpData { - private final List value = new ArrayList(); + private final List value = new ArrayList<>(); private final Charset charset; private int size; @@ -42,27 +44,21 @@ public HttpDataType getHttpDataType() { } public void addValue(String value) { - if (value == null) { - throw new NullPointerException("value"); - } + requireNonNull(value, "value"); ByteBuf buf = Unpooled.copiedBuffer(value, charset); this.value.add(buf); size += buf.readableBytes(); } public void addValue(String value, int rank) { - if (value == null) { - throw new NullPointerException("value"); - } + requireNonNull(value, "value"); ByteBuf buf = Unpooled.copiedBuffer(value, charset); this.value.add(rank, buf); size += buf.readableBytes(); } public void setValue(String value, int rank) { - if (value == null) { - throw new NullPointerException("value"); - } + requireNonNull(value, "value"); ByteBuf buf = Unpooled.copiedBuffer(value, charset); ByteBuf old = this.value.set(rank, buf); if (old != null) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java index 63c8c34b2a4..ba2696313a2 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryAttribute.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -23,6 +23,7 @@ import java.nio.charset.Charset; import static io.netty.buffer.Unpooled.*; +import static java.util.Objects.requireNonNull; /** * Memory implementation of Attributes @@ -66,9 +67,7 @@ public String getValue() { @Override public void setValue(String value) throws IOException { - if (value == null) { - throw new NullPointerException("value"); - } + requireNonNull(value, "value"); byte [] bytes = value.getBytes(getCharset()); checkSize(bytes.length); ByteBuf buffer = wrappedBuffer(bytes); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryFileUpload.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryFileUpload.java index 28e3859b793..22b75d3fea6 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryFileUpload.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MemoryFileUpload.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http.multipart; +import static java.util.Objects.requireNonNull; + import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelException; import io.netty.handler.codec.http.HttpHeaderNames; @@ -56,9 +58,7 @@ public String getFilename() { @Override public void setFilename(String filename) { - if (filename == null) { - throw new NullPointerException("filename"); - } + requireNonNull(filename, "filename"); this.filename = filename; } @@ -87,9 +87,7 @@ public int compareTo(FileUpload o) { @Override public void setContentType(String contentType) { - if (contentType == null) { - throw new NullPointerException("contentType"); - } + requireNonNull(contentType, "contentType"); this.contentType = contentType; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java index 90b7a4af627..fbd23fd2fba 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedAttribute.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -27,6 +27,8 @@ * Mixed implementation using both in Memory and in File with a limit of size */ public class MixedAttribute implements Attribute { + private final String baseDir; + private final boolean deleteOnExit; private Attribute attribute; private final long limitSize; @@ -41,24 +43,45 @@ public MixedAttribute(String name, long definedSize, long limitSize) { } public MixedAttribute(String name, long limitSize, Charset charset) { + this(name, limitSize, charset, DiskAttribute.baseDirectory, DiskAttribute.deleteOnExitTemporaryFile); + } + + public MixedAttribute(String name, long limitSize, Charset charset, String baseDir, boolean deleteOnExit) { this.limitSize = limitSize; attribute = new MemoryAttribute(name, charset); + this.baseDir = baseDir; + this.deleteOnExit = deleteOnExit; } public MixedAttribute(String name, long definedSize, long limitSize, Charset charset) { + this(name, definedSize, limitSize, charset, + DiskAttribute.baseDirectory, DiskAttribute.deleteOnExitTemporaryFile); + } + + public MixedAttribute(String name, long definedSize, long limitSize, Charset charset, + String baseDir, boolean deleteOnExit) { this.limitSize = limitSize; attribute = new MemoryAttribute(name, definedSize, charset); + this.baseDir = baseDir; + this.deleteOnExit = deleteOnExit; } public MixedAttribute(String name, String value, long limitSize) { - this(name, value, limitSize, HttpConstants.DEFAULT_CHARSET); + this(name, value, limitSize, HttpConstants.DEFAULT_CHARSET, + DiskAttribute.baseDirectory, DiskFileUpload.deleteOnExitTemporaryFile); } public MixedAttribute(String name, String value, long limitSize, Charset charset) { + this(name, value, limitSize, charset, + DiskAttribute.baseDirectory, DiskFileUpload.deleteOnExitTemporaryFile); + } + + public MixedAttribute(String name, String value, long limitSize, Charset charset, + String baseDir, boolean deleteOnExit) { this.limitSize = limitSize; if (value.length() > this.limitSize) { try { - attribute = new DiskAttribute(name, value, charset); + attribute = new DiskAttribute(name, value, charset, baseDir, deleteOnExit); } catch (IOException e) { // revert to Memory mode try { @@ -74,6 +97,8 @@ public MixedAttribute(String name, String value, long limitSize, Charset charset throw new IllegalArgumentException(e); } } + this.baseDir = baseDir; + this.deleteOnExit = deleteOnExit; } @Override @@ -100,7 +125,7 @@ public void addContent(ByteBuf buffer, boolean last) throws IOException { checkSize(attribute.length() + buffer.readableBytes()); if (attribute.length() + buffer.readableBytes() > limitSize) { DiskAttribute diskAttribute = new DiskAttribute(attribute - .getName(), attribute.definedLength()); + .getName(), attribute.definedLength(), baseDir, deleteOnExit); diskAttribute.setMaxSize(maxSize); if (((MemoryAttribute) attribute).getByteBuf() != null) { diskAttribute.addContent(((MemoryAttribute) attribute) @@ -178,7 +203,7 @@ public void setContent(ByteBuf buffer) throws IOException { if (buffer.readableBytes() > limitSize) { if (attribute instanceof MemoryAttribute) { // change to Disk - attribute = new DiskAttribute(attribute.getName(), attribute.definedLength()); + attribute = new DiskAttribute(attribute.getName(), attribute.definedLength(), baseDir, deleteOnExit); attribute.setMaxSize(maxSize); } } @@ -191,7 +216,7 @@ public void setContent(File file) throws IOException { if (file.length() > limitSize) { if (attribute instanceof MemoryAttribute) { // change to Disk - attribute = new DiskAttribute(attribute.getName(), attribute.definedLength()); + attribute = new DiskAttribute(attribute.getName(), attribute.definedLength(), baseDir, deleteOnExit); attribute.setMaxSize(maxSize); } } @@ -202,7 +227,7 @@ public void setContent(File file) throws IOException { public void setContent(InputStream inputStream) throws IOException { if (attribute instanceof MemoryAttribute) { // change to Disk even if we don't know the size - attribute = new DiskAttribute(attribute.getName(), attribute.definedLength()); + attribute = new DiskAttribute(attribute.getName(), attribute.definedLength(), baseDir, deleteOnExit); attribute.setMaxSize(maxSize); } attribute.setContent(inputStream); @@ -245,9 +270,6 @@ public String getValue() throws IOException { @Override public void setValue(String value) throws IOException { - if (value != null) { - checkSize(value.getBytes().length); - } attribute.setValue(value); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java index b80b892359d..b5d5e673ce2 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/MixedFileUpload.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -27,6 +27,10 @@ */ public class MixedFileUpload implements FileUpload { + private final String baseDir; + + private final boolean deleteOnExit; + private FileUpload fileUpload; private final long limitSize; @@ -37,6 +41,13 @@ public class MixedFileUpload implements FileUpload { public MixedFileUpload(String name, String filename, String contentType, String contentTransferEncoding, Charset charset, long size, long limitSize) { + this(name, filename, contentType, contentTransferEncoding, + charset, size, limitSize, DiskFileUpload.baseDirectory, DiskFileUpload.deleteOnExitTemporaryFile); + } + + public MixedFileUpload(String name, String filename, String contentType, + String contentTransferEncoding, Charset charset, long size, + long limitSize, String baseDir, boolean deleteOnExit) { this.limitSize = limitSize; if (size > this.limitSize) { fileUpload = new DiskFileUpload(name, filename, contentType, @@ -46,6 +57,8 @@ public MixedFileUpload(String name, String filename, String contentType, contentTransferEncoding, charset, size); } definedSize = size; + this.baseDir = baseDir; + this.deleteOnExit = deleteOnExit; } @Override @@ -76,7 +89,7 @@ public void addContent(ByteBuf buffer, boolean last) .getName(), fileUpload.getFilename(), fileUpload .getContentType(), fileUpload .getContentTransferEncoding(), fileUpload.getCharset(), - definedSize); + definedSize, baseDir, deleteOnExit); diskFileUpload.setMaxSize(maxSize); ByteBuf data = fileUpload.getByteBuf(); if (data != null && data.isReadable()) { @@ -177,7 +190,7 @@ public void setContent(ByteBuf buffer) throws IOException { .getName(), memoryUpload.getFilename(), memoryUpload .getContentType(), memoryUpload .getContentTransferEncoding(), memoryUpload.getCharset(), - definedSize); + definedSize, baseDir, deleteOnExit); fileUpload.setMaxSize(maxSize); // release old upload @@ -199,7 +212,7 @@ public void setContent(File file) throws IOException { .getName(), memoryUpload.getFilename(), memoryUpload .getContentType(), memoryUpload .getContentTransferEncoding(), memoryUpload.getCharset(), - definedSize); + definedSize, baseDir, deleteOnExit); fileUpload.setMaxSize(maxSize); // release old upload @@ -219,7 +232,7 @@ public void setContent(InputStream inputStream) throws IOException { .getName(), fileUpload.getFilename(), fileUpload .getContentType(), fileUpload .getContentTransferEncoding(), fileUpload.getCharset(), - definedSize); + definedSize, baseDir, deleteOnExit); fileUpload.setMaxSize(maxSize); // release old upload diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/package-info.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/package-info.java index bd53ddbb7a8..9575df5309c 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/package-info.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/package-info.java b/codec-http/src/main/java/io/netty/handler/codec/http/package-info.java index ae65eb7b4bf..305e125fecb 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/package-info.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/BinaryWebSocketFrame.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/BinaryWebSocketFrame.java index 5fbcd903863..9ea4288b171 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/BinaryWebSocketFrame.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/BinaryWebSocketFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,7 +19,7 @@ import io.netty.buffer.Unpooled; /** - * Web Socket frame containing binary data + * Web Socket frame containing binary data. */ public class BinaryWebSocketFrame extends WebSocketFrame { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java index 371be6e4abc..1703203902e 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrame.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ import io.netty.util.internal.StringUtil; /** - * Web Socket Frame for closing the connection + * Web Socket Frame for closing the connection. */ public class CloseWebSocketFrame extends WebSocketFrame { @@ -33,25 +33,49 @@ public CloseWebSocketFrame() { } /** - * Creates a new empty close frame with closing getStatus code and reason text + * Creates a new empty close frame with closing status code and reason text + * + * @param status + * Status code as per RFC 6455. For + * example, 1000 indicates normal closure. + */ + public CloseWebSocketFrame(WebSocketCloseStatus status) { + this(requireValidStatusCode(status.code()), status.reasonText()); + } + + /** + * Creates a new empty close frame with closing status code and reason text + * + * @param status + * Status code as per RFC 6455. For + * example, 1000 indicates normal closure. + * @param reasonText + * Reason text. Set to null if no text. + */ + public CloseWebSocketFrame(WebSocketCloseStatus status, String reasonText) { + this(requireValidStatusCode(status.code()), reasonText); + } + + /** + * Creates a new empty close frame with closing status code and reason text * * @param statusCode - * Integer status code as per RFC 6455. For + * Integer status code as per RFC 6455. For * example, 1000 indicates normal closure. * @param reasonText * Reason text. Set to null if no text. */ public CloseWebSocketFrame(int statusCode, String reasonText) { - this(true, 0, statusCode, reasonText); + this(true, 0, requireValidStatusCode(statusCode), reasonText); } /** - * Creates a new close frame with no losing getStatus code and no reason text + * Creates a new close frame with no losing status code and no reason text * * @param finalFragment * flag indicating if this frame is the final fragment * @param rsv - * reserved bits used for protocol extensions + * reserved bits used for protocol extensions. */ public CloseWebSocketFrame(boolean finalFragment, int rsv) { this(finalFragment, rsv, Unpooled.buffer(0)); @@ -65,13 +89,13 @@ public CloseWebSocketFrame(boolean finalFragment, int rsv) { * @param rsv * reserved bits used for protocol extensions * @param statusCode - * Integer status code as per RFC 6455. For + * Integer status code as per RFC 6455. For * example, 1000 indicates normal closure. * @param reasonText * Reason text. Set to null if no text. */ public CloseWebSocketFrame(boolean finalFragment, int rsv, int statusCode, String reasonText) { - super(finalFragment, rsv, newBinaryData(statusCode, reasonText)); + super(finalFragment, rsv, newBinaryData(requireValidStatusCode(statusCode), reasonText)); } private static ByteBuf newBinaryData(int statusCode, String reasonText) { @@ -104,8 +128,8 @@ public CloseWebSocketFrame(boolean finalFragment, int rsv, ByteBuf binaryData) { } /** - * Returns the closing status code as per RFC 6455. If - * a getStatus code is set, -1 is returned. + * Returns the closing status code as per RFC 6455. If + * a status code is set, -1 is returned. */ public int statusCode() { ByteBuf binaryData = content(); @@ -114,14 +138,11 @@ public int statusCode() { } binaryData.readerIndex(0); - int statusCode = binaryData.readShort(); - binaryData.readerIndex(0); - - return statusCode; + return binaryData.getShort(0); } /** - * Returns the reason text as per RFC 6455 If a reason + * Returns the reason text as per RFC 6455 If a reason * text is not supplied, an empty string is returned. */ public String reasonText() { @@ -180,4 +201,13 @@ public CloseWebSocketFrame touch(Object hint) { super.touch(hint); return this; } + + static int requireValidStatusCode(int statusCode) { + if (WebSocketCloseStatus.isValidStatusCode(statusCode)) { + return statusCode; + } else { + throw new IllegalArgumentException("WebSocket close status code does NOT comply with RFC-6455: " + + statusCode); + } + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/ContinuationWebSocketFrame.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/ContinuationWebSocketFrame.java index bd25ea0ea69..166bda00b59 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/ContinuationWebSocketFrame.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/ContinuationWebSocketFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -43,7 +43,7 @@ public ContinuationWebSocketFrame(ByteBuf binaryData) { } /** - * Creates a new continuation frame with the specified binary data + * Creates a new continuation frame with the specified binary data. * * @param finalFragment * flag indicating if this frame is the final fragment @@ -71,17 +71,17 @@ public ContinuationWebSocketFrame(boolean finalFragment, int rsv, String text) { } /** - * Returns the text data in this frame + * Returns the text data in this frame. */ public String text() { return content().toString(CharsetUtil.UTF_8); } /** - * Sets the string for this frame + * Sets the string for this frame. * * @param text - * text to store + * text to store. */ private static ByteBuf fromText(String text) { if (text == null || text.isEmpty()) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CorruptedWebSocketFrameException.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CorruptedWebSocketFrameException.java new file mode 100644 index 00000000000..92022dfb58a --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/CorruptedWebSocketFrameException.java @@ -0,0 +1,64 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import io.netty.handler.codec.CorruptedFrameException; +import io.netty.handler.codec.DecoderException; + +/** + * An {@link DecoderException} which is thrown when the received {@link WebSocketFrame} data could not be decoded by + * an inbound handler. + */ +public final class CorruptedWebSocketFrameException extends CorruptedFrameException { + + private static final long serialVersionUID = 3918055132492988338L; + + private final WebSocketCloseStatus closeStatus; + + /** + * Creates a new instance. + */ + public CorruptedWebSocketFrameException() { + this(WebSocketCloseStatus.PROTOCOL_ERROR, null, null); + } + + /** + * Creates a new instance. + */ + public CorruptedWebSocketFrameException(WebSocketCloseStatus status, String message, Throwable cause) { + super(message == null ? status.reasonText() : message, cause); + closeStatus = status; + } + + /** + * Creates a new instance. + */ + public CorruptedWebSocketFrameException(WebSocketCloseStatus status, String message) { + this(status, message, null); + } + + /** + * Creates a new instance. + */ + public CorruptedWebSocketFrameException(WebSocketCloseStatus status, Throwable cause) { + this(status, null, cause); + } + + public WebSocketCloseStatus closeStatus() { + return closeStatus; + } + +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/PingWebSocketFrame.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/PingWebSocketFrame.java index 08e7025088d..7208bb8b2e1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/PingWebSocketFrame.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/PingWebSocketFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,7 +19,7 @@ import io.netty.buffer.Unpooled; /** - * Web Socket frame containing binary data + * Web Socket frame containing binary data. */ public class PingWebSocketFrame extends WebSocketFrame { @@ -41,7 +41,7 @@ public PingWebSocketFrame(ByteBuf binaryData) { } /** - * Creates a new ping frame with the specified binary data + * Creates a new ping frame with the specified binary data. * * @param finalFragment * flag indicating if this frame is the final fragment diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/PongWebSocketFrame.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/PongWebSocketFrame.java index 29c0b0f7ddb..79cb9a7fab9 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/PongWebSocketFrame.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/PongWebSocketFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,7 +19,7 @@ import io.netty.buffer.Unpooled; /** - * Web Socket frame containing binary data + * Web Socket frame containing binary data. */ public class PongWebSocketFrame extends WebSocketFrame { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/TextWebSocketFrame.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/TextWebSocketFrame.java index 9b124710bbd..f520cf6a642 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/TextWebSocketFrame.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/TextWebSocketFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,7 +20,7 @@ import io.netty.util.CharsetUtil; /** - * Web Socket text frame + * Web Socket text frame. */ public class TextWebSocketFrame extends WebSocketFrame { @@ -35,7 +35,7 @@ public TextWebSocketFrame() { * Creates a new text frame with the specified text string. The final fragment flag is set to true. * * @param text - * String to put in the frame + * String to put in the frame. */ public TextWebSocketFrame(String text) { super(fromText(text)); @@ -59,7 +59,7 @@ public TextWebSocketFrame(ByteBuf binaryData) { * @param rsv * reserved bits used for protocol extensions * @param text - * String to put in the frame + * String to put in the frame. */ public TextWebSocketFrame(boolean finalFragment, int rsv, String text) { super(finalFragment, rsv, fromText(text)); @@ -74,7 +74,7 @@ private static ByteBuf fromText(String text) { } /** - * Creates a new text frame with the specified binary data. The final fragment flag is set to true. + * Creates a new text frame with the specified binary data and the final fragment flag. * * @param finalFragment * flag indicating if this frame is the final fragment @@ -88,7 +88,7 @@ public TextWebSocketFrame(boolean finalFragment, int rsv, ByteBuf binaryData) { } /** - * Returns the text data in this frame + * Returns the text data in this frame. */ public String text() { return content().toString(CharsetUtil.UTF_8); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/Utf8FrameValidator.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/Utf8FrameValidator.java index 55f07057ec0..3087dea9d45 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/Utf8FrameValidator.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/Utf8FrameValidator.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,15 +17,15 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelFutureListeners; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.handler.codec.CorruptedFrameException; /** * */ -public class Utf8FrameValidator extends ChannelInboundHandlerAdapter { +public class Utf8FrameValidator implements ChannelHandler { private int fragmentedFramesCount; private Utf8Validator utf8Validator; @@ -35,59 +35,65 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception if (msg instanceof WebSocketFrame) { WebSocketFrame frame = (WebSocketFrame) msg; - // Processing for possible fragmented messages for text and binary - // frames - if (((WebSocketFrame) msg).isFinalFragment()) { - // Final frame of the sequence. Apparently ping frames are - // allowed in the middle of a fragmented message - if (!(frame instanceof PingWebSocketFrame)) { - fragmentedFramesCount = 0; + try { + // Processing for possible fragmented messages for text and binary + // frames + if (((WebSocketFrame) msg).isFinalFragment()) { + // Final frame of the sequence. Apparently ping frames are + // allowed in the middle of a fragmented message + if (!(frame instanceof PingWebSocketFrame)) { + fragmentedFramesCount = 0; - // Check text for UTF8 correctness - if ((frame instanceof TextWebSocketFrame) || - (utf8Validator != null && utf8Validator.isChecking())) { - // Check UTF-8 correctness for this payload - checkUTF8String(ctx, frame.content()); + // Check text for UTF8 correctness + if ((frame instanceof TextWebSocketFrame) || + (utf8Validator != null && utf8Validator.isChecking())) { + // Check UTF-8 correctness for this payload + checkUTF8String(frame.content()); - // This does a second check to make sure UTF-8 - // correctness for entire text message - utf8Validator.finish(); - } - } - } else { - // Not final frame so we can expect more frames in the - // fragmented sequence - if (fragmentedFramesCount == 0) { - // First text or binary frame for a fragmented set - if (frame instanceof TextWebSocketFrame) { - checkUTF8String(ctx, frame.content()); + // This does a second check to make sure UTF-8 + // correctness for entire text message + utf8Validator.finish(); + } } } else { - // Subsequent frames - only check if init frame is text - if (utf8Validator != null && utf8Validator.isChecking()) { - checkUTF8String(ctx, frame.content()); + // Not final frame so we can expect more frames in the + // fragmented sequence + if (fragmentedFramesCount == 0) { + // First text or binary frame for a fragmented set + if (frame instanceof TextWebSocketFrame) { + checkUTF8String(frame.content()); + } + } else { + // Subsequent frames - only check if init frame is text + if (utf8Validator != null && utf8Validator.isChecking()) { + checkUTF8String(frame.content()); + } } - } - // Increment counter - fragmentedFramesCount++; + // Increment counter + fragmentedFramesCount++; + } + } catch (CorruptedWebSocketFrameException e) { + frame.release(); + throw e; } } - super.channelRead(ctx, msg); + ctx.fireChannelRead(msg); } - private void checkUTF8String(ChannelHandlerContext ctx, ByteBuf buffer) { - try { - if (utf8Validator == null) { - utf8Validator = new Utf8Validator(); - } - utf8Validator.check(buffer); - } catch (CorruptedFrameException ex) { - if (ctx.channel().isActive()) { - ctx.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); - } + private void checkUTF8String(ByteBuf buffer) { + if (utf8Validator == null) { + utf8Validator = new Utf8Validator(); } + utf8Validator.check(buffer); } + @Override + public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { + if (cause instanceof CorruptedFrameException && ctx.channel().isOpen()) { + ctx.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ctx.channel(), ChannelFutureListeners.CLOSE); + } + ctx.fireExceptionCaught(cause); + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/Utf8Validator.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/Utf8Validator.java index 3a377e7c5bf..0928698aff7 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/Utf8Validator.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/Utf8Validator.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -14,7 +14,7 @@ * under the License. */ /* - * Adaptation of http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + * Adaptation of https://bjoern.hoehrmann.de/utf-8/decoder/dfa/ * * Copyright (c) 2008-2009 Bjoern Hoehrmann * @@ -36,7 +36,6 @@ package io.netty.handler.codec.http.websocketx; import io.netty.buffer.ByteBuf; -import io.netty.handler.codec.CorruptedFrameException; import io.netty.util.ByteProcessor; /** @@ -79,12 +78,13 @@ public void finish() { codep = 0; if (state != UTF8_ACCEPT) { state = UTF8_ACCEPT; - throw new CorruptedFrameException("bytes are not UTF-8"); + throw new CorruptedWebSocketFrameException( + WebSocketCloseStatus.INVALID_PAYLOAD_DATA, "bytes are not UTF-8"); } } @Override - public boolean process(byte b) throws Exception { + public boolean process(byte b) { byte type = TYPES[b & 0xFF]; codep = state != UTF8_ACCEPT ? b & 0x3f | codep << 6 : 0xff >> type & b; @@ -93,7 +93,8 @@ public boolean process(byte b) throws Exception { if (state == UTF8_REJECT) { checking = false; - throw new CorruptedFrameException("bytes are not UTF-8"); + throw new CorruptedWebSocketFrameException( + WebSocketCloseStatus.INVALID_PAYLOAD_DATA, "bytes are not UTF-8"); } return true; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameDecoder.java index 1f6bad5e5a8..b1ac6556c49 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameDecoder.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,7 +20,7 @@ import io.netty.handler.codec.ReplayingDecoder; import io.netty.handler.codec.TooLongFrameException; -import java.util.List; +import java.util.Objects; import static io.netty.buffer.ByteBufUtil.readBytes; @@ -52,8 +52,19 @@ public WebSocket00FrameDecoder(int maxFrameSize) { this.maxFrameSize = maxFrameSize; } + /** + * Creates a new instance of {@code WebSocketFrameDecoder} with the specified {@code maxFrameSize}. If the client + * sends a frame size larger than {@code maxFrameSize}, the channel will be closed. + * + * @param decoderConfig + * Frames decoder configuration. + */ + public WebSocket00FrameDecoder(WebSocketDecoderConfig decoderConfig) { + this.maxFrameSize = Objects.requireNonNull(decoderConfig, "decoderConfig").maxFramePayloadLength(); + } + @Override - protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { + protected void decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { // Discard all data received if closing handshake was received before. if (receivedClosingHandshake) { in.skipBytes(actualReadableBytes()); @@ -72,7 +83,7 @@ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) t } if (frame != null) { - out.add(frame); + ctx.fireChannelRead(frame); } } @@ -96,7 +107,7 @@ private WebSocketFrame decodeBinaryFrame(ChannelHandlerContext ctx, byte type, B if (type == (byte) 0xFF && frameSize == 0) { receivedClosingHandshake = true; - return new CloseWebSocketFrame(); + return new CloseWebSocketFrame(true, 0, ctx.alloc().buffer(0)); } ByteBuf payload = readBytes(ctx.alloc(), buffer, (int) frameSize); return new BinaryWebSocketFrame(payload); diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameEncoder.java index a5c580100fa..fbea9988049 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -69,12 +69,10 @@ protected void encode(ChannelHandlerContext ctx, WebSocketFrame msg, List out) throws Exception { + /** + * Constructor + * + * @param decoderConfig + * Frames decoder configuration. + */ + public WebSocket08FrameDecoder(WebSocketDecoderConfig decoderConfig) { + this.config = Objects.requireNonNull(decoderConfig, "decoderConfig"); + } + @Override + protected void decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { // Discard all data received if closing handshake was received before. if (receivedClosingHandshake) { in.skipBytes(actualReadableBytes()); return; } - switch (state) { - case READING_FIRST: - if (!in.isReadable()) { - return; - } - - framePayloadLength = 0; - - // FIN, RSV, OPCODE - byte b = in.readByte(); - frameFinalFlag = (b & 0x80) != 0; - frameRsv = (b & 0x70) >> 4; - frameOpcode = b & 0x0F; - - if (logger.isDebugEnabled()) { - logger.debug("Decoding WebSocket Frame opCode={}", frameOpcode); - } - - state = State.READING_SECOND; - case READING_SECOND: - if (!in.isReadable()) { - return; - } - // MASK, PAYLOAD LEN 1 - b = in.readByte(); - frameMasked = (b & 0x80) != 0; - framePayloadLen1 = b & 0x7F; - - if (frameRsv != 0 && !allowExtensions) { - protocolViolation(ctx, "RSV != 0 and no extension negotiated, RSV:" + frameRsv); - return; - } - - if (!allowMaskMismatch && expectMaskedFrames != frameMasked) { - protocolViolation(ctx, "received a frame that is not masked as expected"); - return; - } - - if (frameOpcode > 7) { // control frame (have MSB in opcode set) - - // control frames MUST NOT be fragmented - if (!frameFinalFlag) { - protocolViolation(ctx, "fragmented control frame"); - return; - } - - // control frames MUST have payload 125 octets or less - if (framePayloadLen1 > 125) { - protocolViolation(ctx, "control frame with payload length > 125 octets"); - return; - } - - // check for reserved control frame opcodes - if (!(frameOpcode == OPCODE_CLOSE || frameOpcode == OPCODE_PING - || frameOpcode == OPCODE_PONG)) { - protocolViolation(ctx, "control frame using reserved opcode " + frameOpcode); - return; - } - - // close frame : if there is a body, the first two bytes of the - // body MUST be a 2-byte unsigned integer (in network byte - // order) representing a getStatus code - if (frameOpcode == 8 && framePayloadLen1 == 1) { - protocolViolation(ctx, "received close control frame with payload len 1"); - return; - } - } else { // data frame - // check for reserved data frame opcodes - if (!(frameOpcode == OPCODE_CONT || frameOpcode == OPCODE_TEXT - || frameOpcode == OPCODE_BINARY)) { - protocolViolation(ctx, "data frame using reserved opcode " + frameOpcode); - return; - } - - // check opcode vs message fragmentation state 1/2 - if (fragmentedFramesCount == 0 && frameOpcode == OPCODE_CONT) { - protocolViolation(ctx, "received continuation data frame outside fragmented message"); - return; - } - - // check opcode vs message fragmentation state 2/2 - if (fragmentedFramesCount != 0 && frameOpcode != OPCODE_CONT && frameOpcode != OPCODE_PING) { - protocolViolation(ctx, - "received non-continuation data frame while inside fragmented message"); - return; - } - } - - state = State.READING_SIZE; - case READING_SIZE: - - // Read frame payload length - if (framePayloadLen1 == 126) { - if (in.readableBytes() < 2) { - return; - } - framePayloadLength = in.readUnsignedShort(); - if (framePayloadLength < 126) { - protocolViolation(ctx, "invalid data frame length (not using minimal length encoding)"); - return; - } - } else if (framePayloadLen1 == 127) { - if (in.readableBytes() < 8) { - return; - } - framePayloadLength = in.readLong(); - // TODO: check if it's bigger than 0x7FFFFFFFFFFFFFFF, Maybe - // just check if it's negative? - - if (framePayloadLength < 65536) { - protocolViolation(ctx, "invalid data frame length (not using minimal length encoding)"); - return; - } - } else { - framePayloadLength = framePayloadLen1; - } - - if (framePayloadLength > maxFramePayloadLength) { - protocolViolation(ctx, "Max frame length of " + maxFramePayloadLength + " has been exceeded."); - return; - } - - if (logger.isDebugEnabled()) { - logger.debug("Decoding WebSocket Frame length={}", framePayloadLength); - } - - state = State.MASKING_KEY; - case MASKING_KEY: - if (frameMasked) { - if (in.readableBytes() < 4) { - return; - } - if (maskingKey == null) { - maskingKey = new byte[4]; - } - in.readBytes(maskingKey); - } - state = State.PAYLOAD; - case PAYLOAD: - if (in.readableBytes() < framePayloadLength) { - return; - } - - ByteBuf payloadBuffer = null; - try { - payloadBuffer = readBytes(ctx.alloc(), in, toFrameLength(framePayloadLength)); - - // Now we have all the data, the next checkpoint must be the next - // frame - state = State.READING_FIRST; - - // Unmask data if needed - if (frameMasked) { - unmask(payloadBuffer); - } - - // Processing ping/pong/close frames because they cannot be - // fragmented - if (frameOpcode == OPCODE_PING) { - out.add(new PingWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer)); - payloadBuffer = null; - return; - } - if (frameOpcode == OPCODE_PONG) { - out.add(new PongWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer)); - payloadBuffer = null; - return; - } - if (frameOpcode == OPCODE_CLOSE) { - receivedClosingHandshake = true; - checkCloseFrameBody(ctx, payloadBuffer); - out.add(new CloseWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer)); - payloadBuffer = null; - return; - } - - // Processing for possible fragmented messages for text and binary - // frames - if (frameFinalFlag) { - // Final frame of the sequence. Apparently ping frames are - // allowed in the middle of a fragmented message - if (frameOpcode != OPCODE_PING) { - fragmentedFramesCount = 0; - } - } else { - // Increment counter - fragmentedFramesCount++; - } - - // Return the frame - if (frameOpcode == OPCODE_TEXT) { - out.add(new TextWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer)); - payloadBuffer = null; - return; - } else if (frameOpcode == OPCODE_BINARY) { - out.add(new BinaryWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer)); - payloadBuffer = null; - return; - } else if (frameOpcode == OPCODE_CONT) { - out.add(new ContinuationWebSocketFrame(frameFinalFlag, frameRsv, - payloadBuffer)); - payloadBuffer = null; - return; - } else { - throw new UnsupportedOperationException("Cannot decode web socket frame with opcode: " - + frameOpcode); - } - } finally { - if (payloadBuffer != null) { - payloadBuffer.release(); - } - } - case CORRUPT: - if (in.isReadable()) { - // If we don't keep reading Netty will throw an exception saying - // we can't return null if no bytes read and state not changed. - in.readByte(); - } + + switch (state) { + case READING_FIRST: + if (!in.isReadable()) { + return; + } + + framePayloadLength = 0; + + // FIN, RSV, OPCODE + byte b = in.readByte(); + frameFinalFlag = (b & 0x80) != 0; + frameRsv = (b & 0x70) >> 4; + frameOpcode = b & 0x0F; + + if (logger.isTraceEnabled()) { + logger.trace("Decoding WebSocket Frame opCode={}", frameOpcode); + } + + state = State.READING_SECOND; + case READING_SECOND: + if (!in.isReadable()) { + return; + } + // MASK, PAYLOAD LEN 1 + b = in.readByte(); + frameMasked = (b & 0x80) != 0; + framePayloadLen1 = b & 0x7F; + + if (frameRsv != 0 && !config.allowExtensions()) { + protocolViolation(ctx, in, "RSV != 0 and no extension negotiated, RSV:" + frameRsv); + return; + } + + if (!config.allowMaskMismatch() && config.expectMaskedFrames() != frameMasked) { + protocolViolation(ctx, in, "received a frame that is not masked as expected"); + return; + } + + if (frameOpcode > 7) { // control frame (have MSB in opcode set) + + // control frames MUST NOT be fragmented + if (!frameFinalFlag) { + protocolViolation(ctx, in, "fragmented control frame"); + return; + } + + // control frames MUST have payload 125 octets or less + if (framePayloadLen1 > 125) { + protocolViolation(ctx, in, "control frame with payload length > 125 octets"); + return; + } + + // check for reserved control frame opcodes + if (!(frameOpcode == OPCODE_CLOSE || frameOpcode == OPCODE_PING + || frameOpcode == OPCODE_PONG)) { + protocolViolation(ctx, in, "control frame using reserved opcode " + frameOpcode); return; - default: - throw new Error("Shouldn't reach here."); + } + + // close frame : if there is a body, the first two bytes of the + // body MUST be a 2-byte unsigned integer (in network byte + // order) representing a getStatus code + if (frameOpcode == 8 && framePayloadLen1 == 1) { + protocolViolation(ctx, in, "received close control frame with payload len 1"); + return; + } + } else { // data frame + // check for reserved data frame opcodes + if (!(frameOpcode == OPCODE_CONT || frameOpcode == OPCODE_TEXT + || frameOpcode == OPCODE_BINARY)) { + protocolViolation(ctx, in, "data frame using reserved opcode " + frameOpcode); + return; + } + + // check opcode vs message fragmentation state 1/2 + if (fragmentedFramesCount == 0 && frameOpcode == OPCODE_CONT) { + protocolViolation(ctx, in, "received continuation data frame outside fragmented message"); + return; + } + + // check opcode vs message fragmentation state 2/2 + if (fragmentedFramesCount != 0 && frameOpcode != OPCODE_CONT) { + protocolViolation(ctx, in, + "received non-continuation data frame while inside fragmented message"); + return; + } } + + state = State.READING_SIZE; + case READING_SIZE: + + // Read frame payload length + if (framePayloadLen1 == 126) { + if (in.readableBytes() < 2) { + return; + } + framePayloadLength = in.readUnsignedShort(); + if (framePayloadLength < 126) { + protocolViolation(ctx, in, "invalid data frame length (not using minimal length encoding)"); + return; + } + } else if (framePayloadLen1 == 127) { + if (in.readableBytes() < 8) { + return; + } + framePayloadLength = in.readLong(); + // TODO: check if it's bigger than 0x7FFFFFFFFFFFFFFF, Maybe + // just check if it's negative? + + if (framePayloadLength < 65536) { + protocolViolation(ctx, in, "invalid data frame length (not using minimal length encoding)"); + return; + } + } else { + framePayloadLength = framePayloadLen1; + } + + if (framePayloadLength > config.maxFramePayloadLength()) { + protocolViolation(ctx, in, WebSocketCloseStatus.MESSAGE_TOO_BIG, + "Max frame length of " + config.maxFramePayloadLength() + " has been exceeded."); + return; + } + + if (logger.isTraceEnabled()) { + logger.trace("Decoding WebSocket Frame length={}", framePayloadLength); + } + + state = State.MASKING_KEY; + case MASKING_KEY: + if (frameMasked) { + if (in.readableBytes() < 4) { + return; + } + if (maskingKey == null) { + maskingKey = new byte[4]; + } + in.readBytes(maskingKey); + } + state = State.PAYLOAD; + case PAYLOAD: + if (in.readableBytes() < framePayloadLength) { + return; + } + + ByteBuf payloadBuffer = null; + try { + payloadBuffer = readBytes(ctx.alloc(), in, toFrameLength(framePayloadLength)); + + // Now we have all the data, the next checkpoint must be the next + // frame + state = State.READING_FIRST; + + // Unmask data if needed + if (frameMasked) { + unmask(payloadBuffer); + } + + // Processing ping/pong/close frames because they cannot be + // fragmented + if (frameOpcode == OPCODE_PING) { + WebSocketFrame frame = new PingWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer); + payloadBuffer = null; + ctx.fireChannelRead(frame); + return; + } + if (frameOpcode == OPCODE_PONG) { + WebSocketFrame frame = new PongWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer); + payloadBuffer = null; + ctx.fireChannelRead(frame); + return; + } + if (frameOpcode == OPCODE_CLOSE) { + receivedClosingHandshake = true; + checkCloseFrameBody(ctx, payloadBuffer); + WebSocketFrame frame = new CloseWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer); + payloadBuffer = null; + ctx.fireChannelRead(frame); + return; + } + + // Processing for possible fragmented messages for text and binary + // frames + if (frameFinalFlag) { + // Final frame of the sequence. Apparently ping frames are + // allowed in the middle of a fragmented message + fragmentedFramesCount = 0; + } else { + // Increment counter + fragmentedFramesCount++; + } + + // Return the frame + if (frameOpcode == OPCODE_TEXT) { + WebSocketFrame frame = new TextWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer); + payloadBuffer = null; + ctx.fireChannelRead(frame); + return; + } else if (frameOpcode == OPCODE_BINARY) { + WebSocketFrame frame = new BinaryWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer); + payloadBuffer = null; + ctx.fireChannelRead(frame); + return; + } else if (frameOpcode == OPCODE_CONT) { + WebSocketFrame frame = new ContinuationWebSocketFrame(frameFinalFlag, frameRsv, payloadBuffer); + payloadBuffer = null; + ctx.fireChannelRead(frame); + return; + } else { + throw new UnsupportedOperationException("Cannot decode web socket frame with opcode: " + + frameOpcode); + } + } finally { + if (payloadBuffer != null) { + payloadBuffer.release(); + } + } + case CORRUPT: + if (in.isReadable()) { + // If we don't keep reading Netty will throw an exception saying + // we can't return null if no bytes read and state not changed. + in.readByte(); + } + return; + default: + throw new Error("Shouldn't reach here."); + } } private void unmask(ByteBuf frame) { @@ -408,20 +420,35 @@ private void unmask(ByteBuf frame) { } } - private void protocolViolation(ChannelHandlerContext ctx, String reason) { - protocolViolation(ctx, new CorruptedFrameException(reason)); + private void protocolViolation(ChannelHandlerContext ctx, ByteBuf in, String reason) { + protocolViolation(ctx, in, WebSocketCloseStatus.PROTOCOL_ERROR, reason); } - private void protocolViolation(ChannelHandlerContext ctx, CorruptedFrameException ex) { + private void protocolViolation(ChannelHandlerContext ctx, ByteBuf in, WebSocketCloseStatus status, String reason) { + protocolViolation(ctx, in, new CorruptedWebSocketFrameException(status, reason)); + } + + private void protocolViolation(ChannelHandlerContext ctx, ByteBuf in, CorruptedWebSocketFrameException ex) { state = State.CORRUPT; - if (ctx.channel().isActive()) { + int readableBytes = in.readableBytes(); + if (readableBytes > 0) { + // Fix for memory leak, caused by ByteToMessageDecoder#channelRead: + // buffer 'cumulation' is released ONLY when no more readable bytes available. + in.skipBytes(readableBytes); + } + if (ctx.channel().isActive() && config.closeOnProtocolViolation()) { Object closeMessage; if (receivedClosingHandshake) { closeMessage = Unpooled.EMPTY_BUFFER; } else { - closeMessage = new CloseWebSocketFrame(1002, null); + WebSocketCloseStatus closeStatus = ex.closeStatus(); + String reasonText = ex.getMessage(); + if (reasonText == null) { + reasonText = closeStatus.reasonText(); + } + closeMessage = new CloseWebSocketFrame(closeStatus, reasonText); } - ctx.writeAndFlush(closeMessage).addListener(ChannelFutureListener.CLOSE); + ctx.writeAndFlush(closeMessage).addListener(ctx.channel(), ChannelFutureListeners.CLOSE); } throw ex; } @@ -441,7 +468,7 @@ protected void checkCloseFrameBody( return; } if (buffer.readableBytes() == 1) { - protocolViolation(ctx, "Invalid close frame body"); + protocolViolation(ctx, buffer, WebSocketCloseStatus.INVALID_PAYLOAD_DATA, "Invalid close frame body"); } // Save reader index @@ -450,17 +477,16 @@ protected void checkCloseFrameBody( // Must have 2 byte integer within the valid range int statusCode = buffer.readShort(); - if (statusCode >= 0 && statusCode <= 999 || statusCode >= 1004 && statusCode <= 1006 - || statusCode >= 1012 && statusCode <= 2999) { - protocolViolation(ctx, "Invalid close frame getStatus code: " + statusCode); + if (!WebSocketCloseStatus.isValidStatusCode(statusCode)) { + protocolViolation(ctx, buffer, "Invalid close frame getStatus code: " + statusCode); } // May have UTF-8 message if (buffer.isReadable()) { try { new Utf8Validator().check(buffer); - } catch (CorruptedFrameException ex) { - protocolViolation(ctx, ex); + } catch (CorruptedWebSocketFrameException ex) { + protocolViolation(ctx, buffer, ex); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket08FrameEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket08FrameEncoder.java index cb16953fcf0..af1fdace05d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket08FrameEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocket08FrameEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -13,7 +13,7 @@ * License for the specific language governing permissions and limitations * under the License. */ -// (BSD License: http://www.opensource.org/licenses/bsd-license) +// (BSD License: https://www.opensource.org/licenses/bsd-license) // // Copyright (c) 2011, Joe Walnes and contributors // All rights reserved. @@ -63,6 +63,7 @@ import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.util.List; +import java.util.concurrent.ThreadLocalRandom; /** *

    @@ -126,8 +127,8 @@ protected void encode(ChannelHandlerContext ctx, WebSocketFrame msg, List input) { * @throws NullPointerException if {@code input} is null */ public WebSocketChunkedInput(ChunkedInput input, int rsv) { - this.input = ObjectUtil.checkNotNull(input, "input"); + this.input = requireNonNull(input, "input"); this.rsv = rsv; } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakeException.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakeException.java new file mode 100644 index 00000000000..69f1839dc29 --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakeException.java @@ -0,0 +1,55 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import io.netty.handler.codec.http.DefaultHttpResponse; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.util.ReferenceCounted; + +/** + * Client exception during handshaking process. + * + *

    IMPORTANT: This exception does not contain any {@link ReferenceCounted} fields + * e.g. {@link FullHttpResponse}, so no special treatment is needed. + */ +public final class WebSocketClientHandshakeException extends WebSocketHandshakeException { + + private static final long serialVersionUID = 1L; + + private final HttpResponse response; + + public WebSocketClientHandshakeException(String message) { + this(message, null); + } + + public WebSocketClientHandshakeException(String message, HttpResponse httpResponse) { + super(message); + if (httpResponse != null) { + response = new DefaultHttpResponse(httpResponse.protocolVersion(), + httpResponse.status(), httpResponse.headers()); + } else { + response = null; + } + } + + /** + * Returns a {@link HttpResponse response} if exception occurs during response validation otherwise {@code null}. + */ + public HttpResponse response() { + return response; + } +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker.java index 44c91445a2a..aa219e26588 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,11 +16,10 @@ package io.netty.handler.codec.http.websocketx; import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundInvoker; import io.netty.channel.ChannelPipeline; -import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; @@ -35,21 +34,25 @@ import io.netty.handler.codec.http.HttpScheme; import io.netty.util.NetUtil; import io.netty.util.ReferenceCountUtil; -import io.netty.util.internal.ThrowableUtil; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; import java.net.URI; import java.nio.channels.ClosedChannelException; import java.util.Locale; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; + +import static java.util.Objects.requireNonNull; /** * Base class for web socket client handshake implementations */ public abstract class WebSocketClientHandshaker { - private static final ClosedChannelException CLOSED_CHANNEL_EXCEPTION = ThrowableUtil.unknownStackTrace( - new ClosedChannelException(), WebSocketClientHandshaker.class, "processHandshake(...)"); private static final String HTTP_SCHEME_PREFIX = HttpScheme.HTTP + "://"; private static final String HTTPS_SCHEME_PREFIX = HttpScheme.HTTPS + "://"; + protected static final int DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS = 10000; private final URI uri; @@ -57,6 +60,15 @@ public abstract class WebSocketClientHandshaker { private volatile boolean handshakeComplete; + private volatile long forceCloseTimeoutMillis = DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS; + + private volatile int forceCloseInit; + + private static final AtomicIntegerFieldUpdater FORCE_CLOSE_INIT_UPDATER = + AtomicIntegerFieldUpdater.newUpdater(WebSocketClientHandshaker.class, "forceCloseInit"); + + private volatile boolean forceCloseComplete; + private final String expectedSubprotocol; private volatile String actualSubprotocol; @@ -65,6 +77,8 @@ public abstract class WebSocketClientHandshaker { private final int maxFramePayloadLength; + private final boolean absoluteUpgradeUrl; + /** * Base constructor * @@ -82,11 +96,62 @@ public abstract class WebSocketClientHandshaker { */ protected WebSocketClientHandshaker(URI uri, WebSocketVersion version, String subprotocol, HttpHeaders customHeaders, int maxFramePayloadLength) { + this(uri, version, subprotocol, customHeaders, maxFramePayloadLength, DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS); + } + + /** + * Base constructor + * + * @param uri + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified + */ + protected WebSocketClientHandshaker(URI uri, WebSocketVersion version, String subprotocol, + HttpHeaders customHeaders, int maxFramePayloadLength, + long forceCloseTimeoutMillis) { + this(uri, version, subprotocol, customHeaders, maxFramePayloadLength, forceCloseTimeoutMillis, false); + } + + /** + * Base constructor + * + * @param uri + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified + * @param absoluteUpgradeUrl + * Use an absolute url for the Upgrade request, typically when connecting through an HTTP proxy over + * clear HTTP + */ + protected WebSocketClientHandshaker(URI uri, WebSocketVersion version, String subprotocol, + HttpHeaders customHeaders, int maxFramePayloadLength, + long forceCloseTimeoutMillis, boolean absoluteUpgradeUrl) { this.uri = uri; this.version = version; expectedSubprotocol = subprotocol; this.customHeaders = customHeaders; this.maxFramePayloadLength = maxFramePayloadLength; + this.forceCloseTimeoutMillis = forceCloseTimeoutMillis; + this.absoluteUpgradeUrl = absoluteUpgradeUrl; } /** @@ -140,17 +205,27 @@ private void setActualSubprotocol(String actualSubprotocol) { this.actualSubprotocol = actualSubprotocol; } + public long forceCloseTimeoutMillis() { + return forceCloseTimeoutMillis; + } + /** - * Begins the opening handshake + * Flag to indicate if the closing handshake was initiated because of timeout. + * For testing only. + */ + protected boolean isForceCloseComplete() { + return forceCloseComplete; + } + + /** + * Sets timeout to close the connection if it was not closed by the server. * - * @param channel - * Channel + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified */ - public ChannelFuture handshake(Channel channel) { - if (channel == null) { - throw new NullPointerException("channel"); - } - return handshake(channel, channel.newPromise()); + public WebSocketClientHandshaker setForceCloseTimeoutMillis(long forceCloseTimeoutMillis) { + this.forceCloseTimeoutMillis = forceCloseTimeoutMillis; + return this; } /** @@ -158,42 +233,39 @@ public ChannelFuture handshake(Channel channel) { * * @param channel * Channel - * @param promise - * the {@link ChannelPromise} to be notified when the opening handshake is sent */ - public final ChannelFuture handshake(Channel channel, final ChannelPromise promise) { - FullHttpRequest request = newHandshakeRequest(); - - HttpResponseDecoder decoder = channel.pipeline().get(HttpResponseDecoder.class); + public Future handshake(Channel channel) { + requireNonNull(channel, "channel"); + ChannelPipeline pipeline = channel.pipeline(); + HttpResponseDecoder decoder = pipeline.get(HttpResponseDecoder.class); if (decoder == null) { - HttpClientCodec codec = channel.pipeline().get(HttpClientCodec.class); + HttpClientCodec codec = pipeline.get(HttpClientCodec.class); if (codec == null) { - promise.setFailure(new IllegalStateException("ChannelPipeline does not contain " + - "a HttpResponseDecoder or HttpClientCodec")); - return promise; + return channel.newFailedFuture(new IllegalStateException("ChannelPipeline does not contain " + + "an HttpResponseDecoder or HttpClientCodec")); } } - channel.writeAndFlush(request).addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) { - if (future.isSuccess()) { - ChannelPipeline p = future.channel().pipeline(); - ChannelHandlerContext ctx = p.context(HttpRequestEncoder.class); - if (ctx == null) { - ctx = p.context(HttpClientCodec.class); - } - if (ctx == null) { - promise.setFailure(new IllegalStateException("ChannelPipeline does not contain " + - "a HttpRequestEncoder or HttpClientCodec")); - return; - } - p.addAfter(ctx.name(), "ws-encoder", newWebSocketEncoder()); + FullHttpRequest request = newHandshakeRequest(); - promise.setSuccess(); - } else { - promise.setFailure(future.cause()); + Promise promise = channel.newPromise(); + channel.writeAndFlush(request).addListener(channel, (ch, future) -> { + if (future.isSuccess()) { + ChannelPipeline p = ch.pipeline(); + ChannelHandlerContext ctx = p.context(HttpRequestEncoder.class); + if (ctx == null) { + ctx = p.context(HttpClientCodec.class); + } + if (ctx == null) { + promise.setFailure(new IllegalStateException("ChannelPipeline does not contain " + + "an HttpRequestEncoder or HttpClientCodec")); + return; } + p.addAfter(ctx.name(), "ws-encoder", newWebSocketEncoder()); + + promise.setSuccess(null); + } else { + promise.setFailure(future.cause()); } }); return promise; @@ -238,9 +310,9 @@ public final void finishHandshake(Channel channel, FullHttpResponse response) { } // else mixed cases - which are all errors if (!protocolValid) { - throw new WebSocketHandshakeException(String.format( + throw new WebSocketClientHandshakeException(String.format( "Invalid subprotocol. Actual: %s. Expected one of: %s", - receivedProtocol, expectedSubprotocol)); + receivedProtocol, expectedSubprotocol), response); } setHandshakeComplete(); @@ -263,7 +335,7 @@ public final void finishHandshake(Channel channel, FullHttpResponse response) { ctx = p.context(HttpClientCodec.class); if (ctx == null) { throw new IllegalStateException("ChannelPipeline does not contain " + - "a HttpRequestEncoder or HttpClientCodec"); + "an HttpRequestEncoder or HttpClientCodec"); } final HttpClientCodec codec = (HttpClientCodec) ctx.handler(); // Remove the encoder part of the codec as the user may start writing frames after this method returns. @@ -274,12 +346,7 @@ public final void finishHandshake(Channel channel, FullHttpResponse response) { // Delay the removal of the decoder so the user can setup the pipeline if needed to handle // WebSocketFrame messages. // See https://github.com/netty/netty/issues/4533 - channel.eventLoop().execute(new Runnable() { - @Override - public void run() { - p.remove(codec); - } - }); + channel.executor().execute(() -> p.remove(codec)); } else { if (p.get(HttpRequestEncoder.class) != null) { // Remove the encoder part of the codec as the user may start writing frames after this method returns. @@ -291,12 +358,7 @@ public void run() { // Delay the removal of the decoder so the user can setup the pipeline if needed to handle // WebSocketFrame messages. // See https://github.com/netty/netty/issues/4533 - channel.eventLoop().execute(new Runnable() { - @Override - public void run() { - p.remove(context.handler()); - } - }); + channel.executor().execute(() -> p.remove(context.handler())); } } @@ -308,32 +370,15 @@ public void run() { * @param response * HTTP response containing the closing handshake details * @return future - * the {@link ChannelFuture} which is notified once the handshake completes. - */ - public final ChannelFuture processHandshake(final Channel channel, HttpResponse response) { - return processHandshake(channel, response, channel.newPromise()); - } - - /** - * Process the opening handshake initiated by {@link #handshake}}. - * - * @param channel - * Channel - * @param response - * HTTP response containing the closing handshake details - * @param promise - * the {@link ChannelPromise} to notify once the handshake completes. - * @return future - * the {@link ChannelFuture} which is notified once the handshake completes. + * the {@link Future} which is notified once the handshake completes. */ - public final ChannelFuture processHandshake(final Channel channel, HttpResponse response, - final ChannelPromise promise) { + public final Future processHandshake(final Channel channel, HttpResponse response) { if (response instanceof FullHttpResponse) { try { finishHandshake(channel, (FullHttpResponse) response); - promise.setSuccess(); + return channel.newSucceededFuture(); } catch (Throwable cause) { - promise.setFailure(cause); + return channel.newFailedFuture(cause); } } else { ChannelPipeline p = channel.pipeline(); @@ -341,10 +386,12 @@ public final ChannelFuture processHandshake(final Channel channel, HttpResponse if (ctx == null) { ctx = p.context(HttpClientCodec.class); if (ctx == null) { - return promise.setFailure(new IllegalStateException("ChannelPipeline does not contain " + - "a HttpResponseDecoder or HttpClientCodec")); + return channel.newFailedFuture(new IllegalStateException("ChannelPipeline does not contain " + + "an HttpResponseDecoder or HttpClientCodec")); } } + + Promise promise = channel.newPromise(); // Add aggregator and ensure we feed the HttpResponse so it is aggregated. A limit of 8192 should be more // then enough for the websockets handshake payload. // @@ -353,12 +400,12 @@ public final ChannelFuture processHandshake(final Channel channel, HttpResponse p.addAfter(ctx.name(), aggregatorName, new HttpObjectAggregator(8192)); p.addAfter(aggregatorName, "handshaker", new SimpleChannelInboundHandler() { @Override - protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg) throws Exception { + protected void messageReceived(ChannelHandlerContext ctx, FullHttpResponse msg) throws Exception { // Remove ourself and do the actual handshake ctx.pipeline().remove(this); try { finishHandshake(channel, msg); - promise.setSuccess(); + promise.setSuccess(null); } catch (Throwable cause) { promise.setFailure(cause); } @@ -374,7 +421,9 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { // Fail promise if Channel was closed - promise.tryFailure(CLOSED_CHANNEL_EXCEPTION); + if (!promise.isDone()) { + promise.tryFailure(new ClosedChannelException()); + } ctx.fireChannelInactive(); } }); @@ -383,8 +432,8 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { } catch (Throwable cause) { promise.setFailure(cause); } + return promise; } - return promise; } /** @@ -403,48 +452,77 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { protected abstract WebSocketFrameEncoder newWebSocketEncoder(); /** - * Performs the closing handshake + * Performs the closing handshake. + * + * When called from within a {@link ChannelHandler} you most likely want to use + * {@link #close(ChannelHandlerContext, CloseWebSocketFrame)}. * * @param channel * Channel * @param frame * Closing Frame that was received */ - public ChannelFuture close(Channel channel, CloseWebSocketFrame frame) { - if (channel == null) { - throw new NullPointerException("channel"); - } - return close(channel, frame, channel.newPromise()); + public Future close(Channel channel, CloseWebSocketFrame frame) { + requireNonNull(channel, "channel"); + return close0(channel, channel, frame); } /** * Performs the closing handshake * - * @param channel - * Channel + * @param ctx + * the {@link ChannelHandlerContext} to use. * @param frame * Closing Frame that was received - * @param promise - * the {@link ChannelPromise} to be notified when the closing handshake is done */ - public ChannelFuture close(Channel channel, CloseWebSocketFrame frame, ChannelPromise promise) { - if (channel == null) { - throw new NullPointerException("channel"); + public Future close(ChannelHandlerContext ctx, CloseWebSocketFrame frame) { + requireNonNull(ctx, "ctx"); + return close0(ctx, ctx.channel(), frame); + } + + private Future close0(final ChannelOutboundInvoker invoker, final Channel channel, + CloseWebSocketFrame frame) { + Future f = invoker.writeAndFlush(frame); + final long forceCloseTimeoutMillis = this.forceCloseTimeoutMillis; + final WebSocketClientHandshaker handshaker = this; + if (forceCloseTimeoutMillis <= 0 || !channel.isActive() || forceCloseInit != 0) { + return f; } - return channel.writeAndFlush(frame, promise); + + f.addListener(future -> { + // If flush operation failed, there is no reason to expect + // a server to receive CloseFrame. Thus this should be handled + // by the application separately. + // Also, close might be called twice from different threads. + if (future.isSuccess() && channel.isActive() && + FORCE_CLOSE_INIT_UPDATER.compareAndSet(handshaker, 0, 1)) { + final Future forceCloseFuture = channel.executor().schedule(() -> { + if (channel.isActive()) { + channel.close(); + forceCloseComplete = true; + } + }, forceCloseTimeoutMillis, TimeUnit.MILLISECONDS); + + channel.closeFuture().addListener(ignore -> { + forceCloseFuture.cancel(false); + }); + } + }); + return f; } /** * Return the constructed raw path for the give {@link URI}. */ - static String rawPath(URI wsURL) { - String path = wsURL.getRawPath(); - String query = wsURL.getRawQuery(); - if (query != null && !query.isEmpty()) { - path = path + '?' + query; + protected String upgradeUrl(URI wsURL) { + if (absoluteUpgradeUrl) { + return wsURL.toString(); } - return path == null || path.isEmpty() ? "/" : path; + String path = wsURL.getRawPath(); + path = path == null || path.isEmpty() ? "/" : path; + String query = wsURL.getRawQuery(); + return query != null && !query.isEmpty() ? path + '?' + query : path; } static CharSequence websocketHostValue(URI wsURL) { @@ -453,19 +531,20 @@ static CharSequence websocketHostValue(URI wsURL) { return wsURL.getHost(); } String host = wsURL.getHost(); + String scheme = wsURL.getScheme(); if (port == HttpScheme.HTTP.port()) { - return HttpScheme.HTTP.name().contentEquals(wsURL.getScheme()) - || WebSocketScheme.WS.name().contentEquals(wsURL.getScheme()) ? + return HttpScheme.HTTP.name().contentEquals(scheme) + || WebSocketScheme.WS.name().contentEquals(scheme) ? host : NetUtil.toSocketAddressString(host, port); } if (port == HttpScheme.HTTPS.port()) { - return HttpScheme.HTTPS.name().contentEquals(wsURL.getScheme()) - || WebSocketScheme.WSS.name().contentEquals(wsURL.getScheme()) ? + return HttpScheme.HTTPS.name().contentEquals(scheme) + || WebSocketScheme.WSS.name().contentEquals(scheme) ? host : NetUtil.toSocketAddressString(host, port); } // if the port is not standard (80/443) its needed to add the port to the header. - // See http://tools.ietf.org/html/rfc6454#section-6.2 + // See https://tools.ietf.org/html/rfc6454#section-6.2 return NetUtil.toSocketAddressString(host, port); } @@ -490,7 +569,7 @@ static CharSequence websocketOriginValue(URI wsURL) { if (port != defaultPort && port != -1) { // if the port is not standard (80/443) its needed to add the port to the header. - // See http://tools.ietf.org/html/rfc6454#section-6.2 + // See https://tools.ietf.org/html/rfc6454#section-6.2 return schemePrefix + NetUtil.toSocketAddressString(host, port); } return schemePrefix + host; diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker00.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker00.java index 05070f74b76..71860369bcf 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker00.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker00.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -26,15 +26,15 @@ import io.netty.handler.codec.http.HttpMethod; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.handler.codec.http.HttpVersion; -import io.netty.util.AsciiString; import java.net.URI; import java.nio.ByteBuffer; +import java.util.concurrent.ThreadLocalRandom; /** *

    * Performs client side opening and closing handshakes for web socket specification version draft-ietf-hybi-thewebsocketprotocol- + * href="https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00" >draft-ietf-hybi-thewebsocketprotocol- * 00 *

    *

    @@ -43,12 +43,10 @@ */ public class WebSocketClientHandshaker00 extends WebSocketClientHandshaker { - private static final AsciiString WEBSOCKET = AsciiString.cached("WebSocket"); - private ByteBuf expectedChallengeResponseBytes; /** - * Constructor specifying the destination web socket location and version to initiate + * Creates a new instance with the specified destination WebSocket location and version to initiate. * * @param webSocketURL * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be @@ -64,7 +62,58 @@ public class WebSocketClientHandshaker00 extends WebSocketClientHandshaker { */ public WebSocketClientHandshaker00(URI webSocketURL, WebSocketVersion version, String subprotocol, HttpHeaders customHeaders, int maxFramePayloadLength) { - super(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength); + this(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength, + DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS); + } + + /** + * Creates a new instance with the specified destination WebSocket location and version to initiate. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified + */ + public WebSocketClientHandshaker00(URI webSocketURL, WebSocketVersion version, String subprotocol, + HttpHeaders customHeaders, int maxFramePayloadLength, + long forceCloseTimeoutMillis) { + this(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength, forceCloseTimeoutMillis, false); + } + + /** + * Creates a new instance with the specified destination WebSocket location and version to initiate. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified + * @param absoluteUpgradeUrl + * Use an absolute url for the Upgrade request, typically when connecting through an HTTP proxy over + * clear HTTP + */ + WebSocketClientHandshaker00(URI webSocketURL, WebSocketVersion version, String subprotocol, + HttpHeaders customHeaders, int maxFramePayloadLength, + long forceCloseTimeoutMillis, boolean absoluteUpgradeUrl) { + super(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength, forceCloseTimeoutMillis, + absoluteUpgradeUrl); } /** @@ -88,14 +137,14 @@ public WebSocketClientHandshaker00(URI webSocketURL, WebSocketVersion version, S @Override protected FullHttpRequest newHandshakeRequest() { // Make keys - int spaces1 = WebSocketUtil.randomNumber(1, 12); - int spaces2 = WebSocketUtil.randomNumber(1, 12); + int spaces1 = ThreadLocalRandom.current().nextInt(1, 13); + int spaces2 = ThreadLocalRandom.current().nextInt(1, 13); int max1 = Integer.MAX_VALUE / spaces1; int max2 = Integer.MAX_VALUE / spaces2; - int number1 = WebSocketUtil.randomNumber(0, max1); - int number2 = WebSocketUtil.randomNumber(0, max2); + int number1 = ThreadLocalRandom.current().nextInt(0, max1); + int number2 = ThreadLocalRandom.current().nextInt(0, max2); int product1 = number1 * spaces1; int product2 = number2 * spaces2; @@ -124,33 +173,35 @@ protected FullHttpRequest newHandshakeRequest() { System.arraycopy(key3, 0, challenge, 8, 8); expectedChallengeResponseBytes = Unpooled.wrappedBuffer(WebSocketUtil.md5(challenge)); - // Get path URI wsURL = uri(); - String path = rawPath(wsURL); // Format request - FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, path); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, upgradeUrl(wsURL), + Unpooled.wrappedBuffer(key3)); HttpHeaders headers = request.headers(); - headers.add(HttpHeaderNames.UPGRADE, WEBSOCKET) - .add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) - .add(HttpHeaderNames.HOST, websocketHostValue(wsURL)) - .add(HttpHeaderNames.ORIGIN, websocketOriginValue(wsURL)) - .add(HttpHeaderNames.SEC_WEBSOCKET_KEY1, key1) - .add(HttpHeaderNames.SEC_WEBSOCKET_KEY2, key2); - - String expectedSubprotocol = expectedSubprotocol(); - if (expectedSubprotocol != null && !expectedSubprotocol.isEmpty()) { - headers.add(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, expectedSubprotocol); - } if (customHeaders != null) { headers.add(customHeaders); } + headers.set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) + .set(HttpHeaderNames.HOST, websocketHostValue(wsURL)) + .set(HttpHeaderNames.SEC_WEBSOCKET_KEY1, key1) + .set(HttpHeaderNames.SEC_WEBSOCKET_KEY2, key2); + + if (!headers.contains(HttpHeaderNames.ORIGIN)) { + headers.set(HttpHeaderNames.ORIGIN, websocketOriginValue(wsURL)); + } + + String expectedSubprotocol = expectedSubprotocol(); + if (expectedSubprotocol != null && !expectedSubprotocol.isEmpty()) { + headers.set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, expectedSubprotocol); + } + // Set Content-Length to workaround some known defect. - // See also: http://www.ietf.org/mail-archive/web/hybi/current/msg02149.html + // See also: https://www.ietf.org/mail-archive/web/hybi/current/msg02149.html headers.set(HttpHeaderNames.CONTENT_LENGTH, key3.length); - request.content().writeBytes(key3); return request; } @@ -172,48 +223,47 @@ protected FullHttpRequest newHandshakeRequest() { * * @param response * HTTP response returned from the server for the request sent by beginOpeningHandshake00(). - * @throws WebSocketHandshakeException + * @throws WebSocketHandshakeException If the handshake or challenge is invalid. */ @Override protected void verify(FullHttpResponse response) { - if (!response.status().equals(HttpResponseStatus.SWITCHING_PROTOCOLS)) { - throw new WebSocketHandshakeException("Invalid handshake response getStatus: " + response.status()); + HttpResponseStatus status = response.status(); + if (!HttpResponseStatus.SWITCHING_PROTOCOLS.equals(status)) { + throw new WebSocketClientHandshakeException("Invalid handshake response getStatus: " + status, response); } HttpHeaders headers = response.headers(); - CharSequence upgrade = headers.get(HttpHeaderNames.UPGRADE); - if (!WEBSOCKET.contentEqualsIgnoreCase(upgrade)) { - throw new WebSocketHandshakeException("Invalid handshake response upgrade: " - + upgrade); + if (!HttpHeaderValues.WEBSOCKET.contentEqualsIgnoreCase(upgrade)) { + throw new WebSocketClientHandshakeException("Invalid handshake response upgrade: " + upgrade, response); } if (!headers.containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE, true)) { - throw new WebSocketHandshakeException("Invalid handshake response connection: " - + headers.get(HttpHeaderNames.CONNECTION)); + throw new WebSocketClientHandshakeException("Invalid handshake response connection: " + + headers.get(HttpHeaderNames.CONNECTION), response); } ByteBuf challenge = response.content(); if (!challenge.equals(expectedChallengeResponseBytes)) { - throw new WebSocketHandshakeException("Invalid challenge"); + throw new WebSocketClientHandshakeException("Invalid challenge", response); } } private static String insertRandomCharacters(String key) { - int count = WebSocketUtil.randomNumber(1, 12); + int count = ThreadLocalRandom.current().nextInt(1, 13); char[] randomChars = new char[count]; int randCount = 0; while (randCount < count) { - int rand = (int) (Math.random() * 0x7e + 0x21); - if (0x21 < rand && rand < 0x2f || 0x3a < rand && rand < 0x7e) { + int rand = ThreadLocalRandom.current().nextInt(0x22, 0x7e); + if (rand < 0x2f || 0x3a < rand) { randomChars[randCount] = (char) rand; randCount += 1; } } for (int i = 0; i < count; i++) { - int split = WebSocketUtil.randomNumber(0, key.length()); + int split = ThreadLocalRandom.current().nextInt(0, key.length() + 1); String part1 = key.substring(0, split); String part2 = key.substring(split); key = part1 + randomChars[i] + part2; @@ -224,7 +274,7 @@ private static String insertRandomCharacters(String key) { private static String insertSpaces(String key, int spaces) { for (int i = 0; i < spaces; i++) { - int split = WebSocketUtil.randomNumber(1, key.length() - 1); + int split = ThreadLocalRandom.current().nextInt(1, key.length()); String part1 = key.substring(0, split); String part2 = key.substring(split); key = part1 + ' ' + part2; @@ -242,4 +292,11 @@ protected WebSocketFrameDecoder newWebsocketDecoder() { protected WebSocketFrameEncoder newWebSocketEncoder() { return new WebSocket00FrameEncoder(); } + + @Override + public WebSocketClientHandshaker00 setForceCloseTimeoutMillis(long forceCloseTimeoutMillis) { + super.setForceCloseTimeoutMillis(forceCloseTimeoutMillis); + return this; + } + } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker07.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker07.java index f85d086b2f4..6b7587ebf43 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker07.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker07.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,7 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.buffer.Unpooled; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; @@ -33,7 +34,7 @@ /** *

    * Performs client side opening and closing handshakes for web socket specification version draft-ietf-hybi-thewebsocketprotocol- + * href="https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-07" >draft-ietf-hybi-thewebsocketprotocol- * 10 *

    */ @@ -94,10 +95,81 @@ public WebSocketClientHandshaker07(URI webSocketURL, WebSocketVersion version, S * When set to true, frames which are not masked properly according to the standard will still be * accepted. */ + public WebSocketClientHandshaker07(URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, + boolean performMasking, boolean allowMaskMismatch) { + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, performMasking, + allowMaskMismatch, DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS); + } + + /** + * Creates a new instance. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param allowExtensions + * Allow extensions to be used in the reserved bits of the web socket frame + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param performMasking + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + * @param allowMaskMismatch + * When set to true, frames which are not masked properly according to the standard will still be + * accepted + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified. + */ public WebSocketClientHandshaker07(URI webSocketURL, WebSocketVersion version, String subprotocol, boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, - boolean performMasking, boolean allowMaskMismatch) { - super(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength); + boolean performMasking, boolean allowMaskMismatch, long forceCloseTimeoutMillis) { + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, performMasking, + allowMaskMismatch, forceCloseTimeoutMillis, false); + } + + /** + * Creates a new instance. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param allowExtensions + * Allow extensions to be used in the reserved bits of the web socket frame + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param performMasking + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + * @param allowMaskMismatch + * When set to true, frames which are not masked properly according to the standard will still be + * accepted + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified. + * @param absoluteUpgradeUrl + * Use an absolute url for the Upgrade request, typically when connecting through an HTTP proxy over + * clear HTTP + */ + WebSocketClientHandshaker07(URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, + boolean performMasking, boolean allowMaskMismatch, long forceCloseTimeoutMillis, + boolean absoluteUpgradeUrl) { + super(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength, forceCloseTimeoutMillis, + absoluteUpgradeUrl); this.allowExtensions = allowExtensions; this.performMasking = performMasking; this.allowMaskMismatch = allowMaskMismatch; @@ -123,9 +195,7 @@ public WebSocketClientHandshaker07(URI webSocketURL, WebSocketVersion version, S */ @Override protected FullHttpRequest newHandshakeRequest() { - // Get path URI wsURL = uri(); - String path = rawPath(wsURL); // Get 16 bit nonce and base 64 encode it byte[] nonce = WebSocketUtil.randomBytes(16); @@ -142,25 +212,36 @@ protected FullHttpRequest newHandshakeRequest() { } // Format request - FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, path); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, upgradeUrl(wsURL), + Unpooled.EMPTY_BUFFER); HttpHeaders headers = request.headers(); - headers.add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) - .add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) - .add(HttpHeaderNames.SEC_WEBSOCKET_KEY, key) - .add(HttpHeaderNames.HOST, websocketHostValue(wsURL)) - .add(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, websocketOriginValue(wsURL)); + if (customHeaders != null) { + headers.add(customHeaders); + if (!headers.contains(HttpHeaderNames.HOST)) { + // Only add HOST header if customHeaders did not contain it. + // + // See https://github.com/netty/netty/issues/10101 + headers.set(HttpHeaderNames.HOST, websocketHostValue(wsURL)); + } + } else { + headers.set(HttpHeaderNames.HOST, websocketHostValue(wsURL)); + } + + headers.set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) + .set(HttpHeaderNames.SEC_WEBSOCKET_KEY, key); + + if (!headers.contains(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN)) { + headers.set(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, websocketOriginValue(wsURL)); + } String expectedSubprotocol = expectedSubprotocol(); if (expectedSubprotocol != null && !expectedSubprotocol.isEmpty()) { - headers.add(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, expectedSubprotocol); + headers.set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, expectedSubprotocol); } - headers.add(HttpHeaderNames.SEC_WEBSOCKET_VERSION, "7"); - - if (customHeaders != null) { - headers.add(customHeaders); - } + headers.set(HttpHeaderNames.SEC_WEBSOCKET_VERSION, version().toAsciiString()); return request; } @@ -179,31 +260,30 @@ protected FullHttpRequest newHandshakeRequest() { * * @param response * HTTP response returned from the server for the request sent by beginOpeningHandshake00(). - * @throws WebSocketHandshakeException + * @throws WebSocketHandshakeException If the handshake or challenge is invalid. */ @Override protected void verify(FullHttpResponse response) { - final HttpResponseStatus status = HttpResponseStatus.SWITCHING_PROTOCOLS; - final HttpHeaders headers = response.headers(); - - if (!response.status().equals(status)) { - throw new WebSocketHandshakeException("Invalid handshake response getStatus: " + response.status()); + HttpResponseStatus status = response.status(); + if (!HttpResponseStatus.SWITCHING_PROTOCOLS.equals(status)) { + throw new WebSocketClientHandshakeException("Invalid handshake response getStatus: " + status, response); } + HttpHeaders headers = response.headers(); CharSequence upgrade = headers.get(HttpHeaderNames.UPGRADE); if (!HttpHeaderValues.WEBSOCKET.contentEqualsIgnoreCase(upgrade)) { - throw new WebSocketHandshakeException("Invalid handshake response upgrade: " + upgrade); + throw new WebSocketClientHandshakeException("Invalid handshake response upgrade: " + upgrade, response); } if (!headers.containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE, true)) { - throw new WebSocketHandshakeException("Invalid handshake response connection: " - + headers.get(HttpHeaderNames.CONNECTION)); + throw new WebSocketClientHandshakeException("Invalid handshake response connection: " + + headers.get(HttpHeaderNames.CONNECTION), response); } CharSequence accept = headers.get(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT); if (accept == null || !accept.equals(expectedChallengeResponseString)) { - throw new WebSocketHandshakeException(String.format( - "Invalid challenge. Actual: %s. Expected: %s", accept, expectedChallengeResponseString)); + throw new WebSocketClientHandshakeException(String.format( + "Invalid challenge. Actual: %s. Expected: %s", accept, expectedChallengeResponseString), response); } } @@ -216,4 +296,11 @@ protected WebSocketFrameDecoder newWebsocketDecoder() { protected WebSocketFrameEncoder newWebSocketEncoder() { return new WebSocket07FrameEncoder(performMasking); } + + @Override + public WebSocketClientHandshaker07 setForceCloseTimeoutMillis(long forceCloseTimeoutMillis) { + super.setForceCloseTimeoutMillis(forceCloseTimeoutMillis); + return this; + } + } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker08.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker08.java index 5bfef6449c1..50dd4599fc7 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker08.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker08.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,7 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.buffer.Unpooled; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; @@ -33,7 +34,7 @@ /** *

    * Performs client side opening and closing handshakes for web socket specification version draft-ietf-hybi-thewebsocketprotocol- + * href="https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10" >draft-ietf-hybi-thewebsocketprotocol- * 10 *

    */ @@ -68,7 +69,8 @@ public class WebSocketClientHandshaker08 extends WebSocketClientHandshaker { */ public WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, String subprotocol, boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength) { - this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, true, false); + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, true, + false, DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS); } /** @@ -93,12 +95,83 @@ public WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, S * which doesn't require masking might set this to false to achieve a higher performance. * @param allowMaskMismatch * When set to true, frames which are not masked properly according to the standard will still be - * accepted. + * accepted + */ + public WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, + boolean performMasking, boolean allowMaskMismatch) { + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, performMasking, + allowMaskMismatch, DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS); + } + + /** + * Creates a new instance. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param allowExtensions + * Allow extensions to be used in the reserved bits of the web socket frame + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param performMasking + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + * @param allowMaskMismatch + * When set to true, frames which are not masked properly according to the standard will still be + * accepted + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified. */ public WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, String subprotocol, boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, - boolean performMasking, boolean allowMaskMismatch) { - super(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength); + boolean performMasking, boolean allowMaskMismatch, long forceCloseTimeoutMillis) { + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, performMasking, + allowMaskMismatch, forceCloseTimeoutMillis, false); + } + + /** + * Creates a new instance. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param allowExtensions + * Allow extensions to be used in the reserved bits of the web socket frame + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param performMasking + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + * @param allowMaskMismatch + * When set to true, frames which are not masked properly according to the standard will still be + * accepted + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified. + * @param absoluteUpgradeUrl + * Use an absolute url for the Upgrade request, typically when connecting through an HTTP proxy over + * clear HTTP + */ + WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, + boolean performMasking, boolean allowMaskMismatch, long forceCloseTimeoutMillis, + boolean absoluteUpgradeUrl) { + super(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength, forceCloseTimeoutMillis, + absoluteUpgradeUrl); this.allowExtensions = allowExtensions; this.performMasking = performMasking; this.allowMaskMismatch = allowMaskMismatch; @@ -124,9 +197,7 @@ public WebSocketClientHandshaker08(URI webSocketURL, WebSocketVersion version, S */ @Override protected FullHttpRequest newHandshakeRequest() { - // Get path URI wsURL = uri(); - String path = rawPath(wsURL); // Get 16 bit nonce and base 64 encode it byte[] nonce = WebSocketUtil.randomBytes(16); @@ -143,25 +214,36 @@ protected FullHttpRequest newHandshakeRequest() { } // Format request - FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, path); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, upgradeUrl(wsURL), + Unpooled.EMPTY_BUFFER); HttpHeaders headers = request.headers(); - headers.add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) - .add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) - .add(HttpHeaderNames.SEC_WEBSOCKET_KEY, key) - .add(HttpHeaderNames.HOST, websocketHostValue(wsURL)) - .add(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, websocketOriginValue(wsURL)); + if (customHeaders != null) { + headers.add(customHeaders); + if (!headers.contains(HttpHeaderNames.HOST)) { + // Only add HOST header if customHeaders did not contain it. + // + // See https://github.com/netty/netty/issues/10101 + headers.set(HttpHeaderNames.HOST, websocketHostValue(wsURL)); + } + } else { + headers.set(HttpHeaderNames.HOST, websocketHostValue(wsURL)); + } + + headers.set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) + .set(HttpHeaderNames.SEC_WEBSOCKET_KEY, key); + + if (!headers.contains(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN)) { + headers.set(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, websocketOriginValue(wsURL)); + } String expectedSubprotocol = expectedSubprotocol(); if (expectedSubprotocol != null && !expectedSubprotocol.isEmpty()) { - headers.add(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, expectedSubprotocol); + headers.set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, expectedSubprotocol); } - headers.add(HttpHeaderNames.SEC_WEBSOCKET_VERSION, "8"); - - if (customHeaders != null) { - headers.add(customHeaders); - } + headers.set(HttpHeaderNames.SEC_WEBSOCKET_VERSION, version().toAsciiString()); return request; } @@ -180,31 +262,30 @@ protected FullHttpRequest newHandshakeRequest() { * * @param response * HTTP response returned from the server for the request sent by beginOpeningHandshake00(). - * @throws WebSocketHandshakeException + * @throws WebSocketHandshakeException If the handshake or challenge is invalid. */ @Override protected void verify(FullHttpResponse response) { - final HttpResponseStatus status = HttpResponseStatus.SWITCHING_PROTOCOLS; - final HttpHeaders headers = response.headers(); - - if (!response.status().equals(status)) { - throw new WebSocketHandshakeException("Invalid handshake response getStatus: " + response.status()); + HttpResponseStatus status = response.status(); + if (!HttpResponseStatus.SWITCHING_PROTOCOLS.equals(status)) { + throw new WebSocketClientHandshakeException("Invalid handshake response getStatus: " + status, response); } + HttpHeaders headers = response.headers(); CharSequence upgrade = headers.get(HttpHeaderNames.UPGRADE); if (!HttpHeaderValues.WEBSOCKET.contentEqualsIgnoreCase(upgrade)) { - throw new WebSocketHandshakeException("Invalid handshake response upgrade: " + upgrade); + throw new WebSocketClientHandshakeException("Invalid handshake response upgrade: " + upgrade, response); } if (!headers.containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE, true)) { - throw new WebSocketHandshakeException("Invalid handshake response connection: " - + headers.get(HttpHeaderNames.CONNECTION)); + throw new WebSocketClientHandshakeException("Invalid handshake response connection: " + + headers.get(HttpHeaderNames.CONNECTION), response); } CharSequence accept = headers.get(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT); if (accept == null || !accept.equals(expectedChallengeResponseString)) { - throw new WebSocketHandshakeException(String.format( - "Invalid challenge. Actual: %s. Expected: %s", accept, expectedChallengeResponseString)); + throw new WebSocketClientHandshakeException(String.format( + "Invalid challenge. Actual: %s. Expected: %s", accept, expectedChallengeResponseString), response); } } @@ -217,4 +298,11 @@ protected WebSocketFrameDecoder newWebsocketDecoder() { protected WebSocketFrameEncoder newWebSocketEncoder() { return new WebSocket08FrameEncoder(performMasking); } + + @Override + public WebSocketClientHandshaker08 setForceCloseTimeoutMillis(long forceCloseTimeoutMillis) { + super.setForceCloseTimeoutMillis(forceCloseTimeoutMillis); + return this; + } + } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker13.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker13.java index 9490e3bcef5..70805611fa5 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker13.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker13.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,7 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.buffer.Unpooled; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; @@ -33,7 +34,7 @@ /** *

    * Performs client side opening and closing handshakes for web socket specification version draft-ietf-hybi-thewebsocketprotocol- + * href="https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17" >draft-ietf-hybi-thewebsocketprotocol- * 17 *

    */ @@ -68,7 +69,8 @@ public class WebSocketClientHandshaker13 extends WebSocketClientHandshaker { */ public WebSocketClientHandshaker13(URI webSocketURL, WebSocketVersion version, String subprotocol, boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength) { - this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, true, false); + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, + true, false); } /** @@ -98,7 +100,79 @@ public WebSocketClientHandshaker13(URI webSocketURL, WebSocketVersion version, S public WebSocketClientHandshaker13(URI webSocketURL, WebSocketVersion version, String subprotocol, boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, boolean performMasking, boolean allowMaskMismatch) { - super(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength); + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, + performMasking, allowMaskMismatch, DEFAULT_FORCE_CLOSE_TIMEOUT_MILLIS); + } + + /** + * Creates a new instance. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param allowExtensions + * Allow extensions to be used in the reserved bits of the web socket frame + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param performMasking + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + * @param allowMaskMismatch + * When set to true, frames which are not masked properly according to the standard will still be + * accepted + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified. + */ + public WebSocketClientHandshaker13(URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, + boolean performMasking, boolean allowMaskMismatch, + long forceCloseTimeoutMillis) { + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, performMasking, + allowMaskMismatch, forceCloseTimeoutMillis, false); + } + + /** + * Creates a new instance. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param allowExtensions + * Allow extensions to be used in the reserved bits of the web socket frame + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param performMasking + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + * @param allowMaskMismatch + * When set to true, frames which are not masked properly according to the standard will still be + * accepted + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified. + * @param absoluteUpgradeUrl + * Use an absolute url for the Upgrade request, typically when connecting through an HTTP proxy over + * clear HTTP + */ + WebSocketClientHandshaker13(URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, + boolean performMasking, boolean allowMaskMismatch, + long forceCloseTimeoutMillis, boolean absoluteUpgradeUrl) { + super(webSocketURL, version, subprotocol, customHeaders, maxFramePayloadLength, forceCloseTimeoutMillis, + absoluteUpgradeUrl); this.allowExtensions = allowExtensions; this.performMasking = performMasking; this.allowMaskMismatch = allowMaskMismatch; @@ -116,7 +190,7 @@ public WebSocketClientHandshaker13(URI webSocketURL, WebSocketVersion version, S * Upgrade: websocket * Connection: Upgrade * Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== - * Sec-WebSocket-Origin: http://example.com + * Origin: http://example.com * Sec-WebSocket-Protocol: chat, superchat * Sec-WebSocket-Version: 13 * @@ -124,9 +198,7 @@ public WebSocketClientHandshaker13(URI webSocketURL, WebSocketVersion version, S */ @Override protected FullHttpRequest newHandshakeRequest() { - // Get path URI wsURL = uri(); - String path = rawPath(wsURL); // Get 16 bit nonce and base 64 encode it byte[] nonce = WebSocketUtil.randomBytes(16); @@ -143,25 +215,36 @@ protected FullHttpRequest newHandshakeRequest() { } // Format request - FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, path); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, upgradeUrl(wsURL), + Unpooled.EMPTY_BUFFER); HttpHeaders headers = request.headers(); - headers.add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) - .add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) - .add(HttpHeaderNames.SEC_WEBSOCKET_KEY, key) - .add(HttpHeaderNames.HOST, websocketHostValue(wsURL)) - .add(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, websocketOriginValue(wsURL)); + if (customHeaders != null) { + headers.add(customHeaders); + if (!headers.contains(HttpHeaderNames.HOST)) { + // Only add HOST header if customHeaders did not contain it. + // + // See https://github.com/netty/netty/issues/10101 + headers.set(HttpHeaderNames.HOST, websocketHostValue(wsURL)); + } + } else { + headers.set(HttpHeaderNames.HOST, websocketHostValue(wsURL)); + } + + headers.set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) + .set(HttpHeaderNames.SEC_WEBSOCKET_KEY, key); + + if (!headers.contains(HttpHeaderNames.ORIGIN)) { + headers.set(HttpHeaderNames.ORIGIN, websocketOriginValue(wsURL)); + } String expectedSubprotocol = expectedSubprotocol(); if (expectedSubprotocol != null && !expectedSubprotocol.isEmpty()) { - headers.add(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, expectedSubprotocol); + headers.set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, expectedSubprotocol); } - headers.add(HttpHeaderNames.SEC_WEBSOCKET_VERSION, "13"); - - if (customHeaders != null) { - headers.add(customHeaders); - } + headers.set(HttpHeaderNames.SEC_WEBSOCKET_VERSION, version().toAsciiString()); return request; } @@ -180,31 +263,30 @@ protected FullHttpRequest newHandshakeRequest() { * * @param response * HTTP response returned from the server for the request sent by beginOpeningHandshake00(). - * @throws WebSocketHandshakeException + * @throws WebSocketHandshakeException if handshake response is invalid. */ @Override protected void verify(FullHttpResponse response) { - final HttpResponseStatus status = HttpResponseStatus.SWITCHING_PROTOCOLS; - final HttpHeaders headers = response.headers(); - - if (!response.status().equals(status)) { - throw new WebSocketHandshakeException("Invalid handshake response getStatus: " + response.status()); + HttpResponseStatus status = response.status(); + if (!HttpResponseStatus.SWITCHING_PROTOCOLS.equals(status)) { + throw new WebSocketClientHandshakeException("Invalid handshake response getStatus: " + status, response); } + HttpHeaders headers = response.headers(); CharSequence upgrade = headers.get(HttpHeaderNames.UPGRADE); if (!HttpHeaderValues.WEBSOCKET.contentEqualsIgnoreCase(upgrade)) { - throw new WebSocketHandshakeException("Invalid handshake response upgrade: " + upgrade); + throw new WebSocketClientHandshakeException("Invalid handshake response upgrade: " + upgrade, response); } if (!headers.containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE, true)) { - throw new WebSocketHandshakeException("Invalid handshake response connection: " - + headers.get(HttpHeaderNames.CONNECTION)); + throw new WebSocketClientHandshakeException("Invalid handshake response connection: " + + headers.get(HttpHeaderNames.CONNECTION), response); } CharSequence accept = headers.get(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT); if (accept == null || !accept.equals(expectedChallengeResponseString)) { - throw new WebSocketHandshakeException(String.format( - "Invalid challenge. Actual: %s. Expected: %s", accept, expectedChallengeResponseString)); + throw new WebSocketClientHandshakeException(String.format( + "Invalid challenge. Actual: %s. Expected: %s", accept, expectedChallengeResponseString), response); } } @@ -217,4 +299,11 @@ protected WebSocketFrameDecoder newWebsocketDecoder() { protected WebSocketFrameEncoder newWebSocketEncoder() { return new WebSocket13FrameEncoder(performMasking); } + + @Override + public WebSocketClientHandshaker13 setForceCloseTimeoutMillis(long forceCloseTimeoutMillis) { + super.setForceCloseTimeoutMillis(forceCloseTimeoutMillis); + return this; + } + } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakerFactory.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakerFactory.java index b07825f4a17..a5dabdcbd55 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakerFactory.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakerFactory.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -107,26 +107,119 @@ public static WebSocketClientHandshaker newHandshaker( URI webSocketURL, WebSocketVersion version, String subprotocol, boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, boolean performMasking, boolean allowMaskMismatch) { + return newHandshaker(webSocketURL, version, subprotocol, allowExtensions, customHeaders, + maxFramePayloadLength, performMasking, allowMaskMismatch, -1); + } + + /** + * Creates a new handshaker. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". + * Subsequent web socket frames will be sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. Null if no sub-protocol support is required. + * @param allowExtensions + * Allow extensions to be used in the reserved bits of the web socket frame + * @param customHeaders + * Custom HTTP headers to send during the handshake + * @param maxFramePayloadLength + * Maximum allowable frame payload length. Setting this value to your application's + * requirement may reduce denial of service attacks using long data frames. + * @param performMasking + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + * @param allowMaskMismatch + * When set to true, frames which are not masked properly according to the standard will still be + * accepted. + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified + */ + public static WebSocketClientHandshaker newHandshaker( + URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, + boolean performMasking, boolean allowMaskMismatch, long forceCloseTimeoutMillis) { if (version == V13) { return new WebSocketClientHandshaker13( webSocketURL, V13, subprotocol, allowExtensions, customHeaders, - maxFramePayloadLength, performMasking, allowMaskMismatch); + maxFramePayloadLength, performMasking, allowMaskMismatch, forceCloseTimeoutMillis); } if (version == V08) { return new WebSocketClientHandshaker08( webSocketURL, V08, subprotocol, allowExtensions, customHeaders, - maxFramePayloadLength, performMasking, allowMaskMismatch); + maxFramePayloadLength, performMasking, allowMaskMismatch, forceCloseTimeoutMillis); } if (version == V07) { return new WebSocketClientHandshaker07( webSocketURL, V07, subprotocol, allowExtensions, customHeaders, - maxFramePayloadLength, performMasking, allowMaskMismatch); + maxFramePayloadLength, performMasking, allowMaskMismatch, forceCloseTimeoutMillis); + } + if (version == V00) { + return new WebSocketClientHandshaker00( + webSocketURL, V00, subprotocol, customHeaders, maxFramePayloadLength, forceCloseTimeoutMillis); + } + + throw new WebSocketClientHandshakeException("Protocol version " + version + " not supported."); + } + + /** + * Creates a new handshaker. + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". + * Subsequent web socket frames will be sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. Null if no sub-protocol support is required. + * @param allowExtensions + * Allow extensions to be used in the reserved bits of the web socket frame + * @param customHeaders + * Custom HTTP headers to send during the handshake + * @param maxFramePayloadLength + * Maximum allowable frame payload length. Setting this value to your application's + * requirement may reduce denial of service attacks using long data frames. + * @param performMasking + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + * @param allowMaskMismatch + * When set to true, frames which are not masked properly according to the standard will still be + * accepted. + * @param forceCloseTimeoutMillis + * Close the connection if it was not closed by the server after timeout specified + * @param absoluteUpgradeUrl + * Use an absolute url for the Upgrade request, typically when connecting through an HTTP proxy over + * clear HTTP + */ + public static WebSocketClientHandshaker newHandshaker( + URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, + boolean performMasking, boolean allowMaskMismatch, long forceCloseTimeoutMillis, boolean absoluteUpgradeUrl) { + if (version == V13) { + return new WebSocketClientHandshaker13( + webSocketURL, V13, subprotocol, allowExtensions, customHeaders, + maxFramePayloadLength, performMasking, allowMaskMismatch, forceCloseTimeoutMillis, absoluteUpgradeUrl); + } + if (version == V08) { + return new WebSocketClientHandshaker08( + webSocketURL, V08, subprotocol, allowExtensions, customHeaders, + maxFramePayloadLength, performMasking, allowMaskMismatch, forceCloseTimeoutMillis, absoluteUpgradeUrl); + } + if (version == V07) { + return new WebSocketClientHandshaker07( + webSocketURL, V07, subprotocol, allowExtensions, customHeaders, + maxFramePayloadLength, performMasking, allowMaskMismatch, forceCloseTimeoutMillis, absoluteUpgradeUrl); } if (version == V00) { return new WebSocketClientHandshaker00( - webSocketURL, V00, subprotocol, customHeaders, maxFramePayloadLength); + webSocketURL, V00, subprotocol, customHeaders, + maxFramePayloadLength, forceCloseTimeoutMillis, absoluteUpgradeUrl); } - throw new WebSocketHandshakeException("Protocol version " + version + " not supported."); + throw new WebSocketClientHandshakeException("Protocol version " + version + " not supported."); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolConfig.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolConfig.java new file mode 100644 index 00000000000..461e4687f76 --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolConfig.java @@ -0,0 +1,392 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import io.netty.handler.codec.http.EmptyHttpHeaders; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.websocketx.WebSocketClientProtocolHandler.ClientHandshakeStateEvent; + +import java.net.URI; +import java.util.Objects; + +import static io.netty.handler.codec.http.websocketx.WebSocketServerProtocolConfig.DEFAULT_HANDSHAKE_TIMEOUT_MILLIS; +import static io.netty.util.internal.ObjectUtil.checkPositive; + +/** + * WebSocket server configuration. + */ +public final class WebSocketClientProtocolConfig { + + static final boolean DEFAULT_PERFORM_MASKING = true; + static final boolean DEFAULT_ALLOW_MASK_MISMATCH = false; + static final boolean DEFAULT_HANDLE_CLOSE_FRAMES = true; + static final boolean DEFAULT_DROP_PONG_FRAMES = true; + + private final URI webSocketUri; + private final String subprotocol; + private final WebSocketVersion version; + private final boolean allowExtensions; + private final HttpHeaders customHeaders; + private final int maxFramePayloadLength; + private final boolean performMasking; + private final boolean allowMaskMismatch; + private final boolean handleCloseFrames; + private final WebSocketCloseStatus sendCloseFrame; + private final boolean dropPongFrames; + private final long handshakeTimeoutMillis; + private final long forceCloseTimeoutMillis; + private final boolean absoluteUpgradeUrl; + + private WebSocketClientProtocolConfig( + URI webSocketUri, + String subprotocol, + WebSocketVersion version, + boolean allowExtensions, + HttpHeaders customHeaders, + int maxFramePayloadLength, + boolean performMasking, + boolean allowMaskMismatch, + boolean handleCloseFrames, + WebSocketCloseStatus sendCloseFrame, + boolean dropPongFrames, + long handshakeTimeoutMillis, + long forceCloseTimeoutMillis, + boolean absoluteUpgradeUrl + ) { + this.webSocketUri = webSocketUri; + this.subprotocol = subprotocol; + this.version = version; + this.allowExtensions = allowExtensions; + this.customHeaders = customHeaders; + this.maxFramePayloadLength = maxFramePayloadLength; + this.performMasking = performMasking; + this.allowMaskMismatch = allowMaskMismatch; + this.forceCloseTimeoutMillis = forceCloseTimeoutMillis; + this.handleCloseFrames = handleCloseFrames; + this.sendCloseFrame = sendCloseFrame; + this.dropPongFrames = dropPongFrames; + this.handshakeTimeoutMillis = checkPositive(handshakeTimeoutMillis, "handshakeTimeoutMillis"); + this.absoluteUpgradeUrl = absoluteUpgradeUrl; + } + + public URI webSocketUri() { + return webSocketUri; + } + + public String subprotocol() { + return subprotocol; + } + + public WebSocketVersion version() { + return version; + } + + public boolean allowExtensions() { + return allowExtensions; + } + + public HttpHeaders customHeaders() { + return customHeaders; + } + + public int maxFramePayloadLength() { + return maxFramePayloadLength; + } + + public boolean performMasking() { + return performMasking; + } + + public boolean allowMaskMismatch() { + return allowMaskMismatch; + } + + public boolean handleCloseFrames() { + return handleCloseFrames; + } + + public WebSocketCloseStatus sendCloseFrame() { + return sendCloseFrame; + } + + public boolean dropPongFrames() { + return dropPongFrames; + } + + public long handshakeTimeoutMillis() { + return handshakeTimeoutMillis; + } + + public long forceCloseTimeoutMillis() { + return forceCloseTimeoutMillis; + } + + public boolean absoluteUpgradeUrl() { + return absoluteUpgradeUrl; + } + + @Override + public String toString() { + return "WebSocketClientProtocolConfig" + + " {webSocketUri=" + webSocketUri + + ", subprotocol=" + subprotocol + + ", version=" + version + + ", allowExtensions=" + allowExtensions + + ", customHeaders=" + customHeaders + + ", maxFramePayloadLength=" + maxFramePayloadLength + + ", performMasking=" + performMasking + + ", allowMaskMismatch=" + allowMaskMismatch + + ", handleCloseFrames=" + handleCloseFrames + + ", sendCloseFrame=" + sendCloseFrame + + ", dropPongFrames=" + dropPongFrames + + ", handshakeTimeoutMillis=" + handshakeTimeoutMillis + + ", forceCloseTimeoutMillis=" + forceCloseTimeoutMillis + + ", absoluteUpgradeUrl=" + absoluteUpgradeUrl + + "}"; + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static Builder newBuilder() { + return new Builder( + URI.create("https://localhost/"), + null, + WebSocketVersion.V13, + false, + EmptyHttpHeaders.INSTANCE, + 65536, + DEFAULT_PERFORM_MASKING, + DEFAULT_ALLOW_MASK_MISMATCH, + DEFAULT_HANDLE_CLOSE_FRAMES, + WebSocketCloseStatus.NORMAL_CLOSURE, + DEFAULT_DROP_PONG_FRAMES, + DEFAULT_HANDSHAKE_TIMEOUT_MILLIS, + -1, + false); + } + + public static final class Builder { + private URI webSocketUri; + private String subprotocol; + private WebSocketVersion version; + private boolean allowExtensions; + private HttpHeaders customHeaders; + private int maxFramePayloadLength; + private boolean performMasking; + private boolean allowMaskMismatch; + private boolean handleCloseFrames; + private WebSocketCloseStatus sendCloseFrame; + private boolean dropPongFrames; + private long handshakeTimeoutMillis; + private long forceCloseTimeoutMillis; + private boolean absoluteUpgradeUrl; + + private Builder(WebSocketClientProtocolConfig clientConfig) { + this(Objects.requireNonNull(clientConfig, "clientConfig").webSocketUri(), + clientConfig.subprotocol(), + clientConfig.version(), + clientConfig.allowExtensions(), + clientConfig.customHeaders(), + clientConfig.maxFramePayloadLength(), + clientConfig.performMasking(), + clientConfig.allowMaskMismatch(), + clientConfig.handleCloseFrames(), + clientConfig.sendCloseFrame(), + clientConfig.dropPongFrames(), + clientConfig.handshakeTimeoutMillis(), + clientConfig.forceCloseTimeoutMillis(), + clientConfig.absoluteUpgradeUrl()); + } + + private Builder(URI webSocketUri, + String subprotocol, + WebSocketVersion version, + boolean allowExtensions, + HttpHeaders customHeaders, + int maxFramePayloadLength, + boolean performMasking, + boolean allowMaskMismatch, + boolean handleCloseFrames, + WebSocketCloseStatus sendCloseFrame, + boolean dropPongFrames, + long handshakeTimeoutMillis, + long forceCloseTimeoutMillis, + boolean absoluteUpgradeUrl) { + this.webSocketUri = webSocketUri; + this.subprotocol = subprotocol; + this.version = version; + this.allowExtensions = allowExtensions; + this.customHeaders = customHeaders; + this.maxFramePayloadLength = maxFramePayloadLength; + this.performMasking = performMasking; + this.allowMaskMismatch = allowMaskMismatch; + this.handleCloseFrames = handleCloseFrames; + this.sendCloseFrame = sendCloseFrame; + this.dropPongFrames = dropPongFrames; + this.handshakeTimeoutMillis = handshakeTimeoutMillis; + this.forceCloseTimeoutMillis = forceCloseTimeoutMillis; + this.absoluteUpgradeUrl = absoluteUpgradeUrl; + } + + /** + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + */ + public Builder webSocketUri(String webSocketUri) { + return webSocketUri(URI.create(webSocketUri)); + } + + /** + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + */ + public Builder webSocketUri(URI webSocketUri) { + this.webSocketUri = webSocketUri; + return this; + } + + /** + * Sub protocol request sent to the server. + */ + public Builder subprotocol(String subprotocol) { + this.subprotocol = subprotocol; + return this; + } + + /** + * Version of web socket specification to use to connect to the server + */ + public Builder version(WebSocketVersion version) { + this.version = version; + return this; + } + + /** + * Allow extensions to be used in the reserved bits of the web socket frame + */ + public Builder allowExtensions(boolean allowExtensions) { + this.allowExtensions = allowExtensions; + return this; + } + + /** + * Map of custom headers to add to the client request + */ + public Builder customHeaders(HttpHeaders customHeaders) { + this.customHeaders = customHeaders; + return this; + } + + /** + * Maximum length of a frame's payload + */ + public Builder maxFramePayloadLength(int maxFramePayloadLength) { + this.maxFramePayloadLength = maxFramePayloadLength; + return this; + } + + /** + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + */ + public Builder performMasking(boolean performMasking) { + this.performMasking = performMasking; + return this; + } + + /** + * When set to true, frames which are not masked properly according to the standard will still be accepted. + */ + public Builder allowMaskMismatch(boolean allowMaskMismatch) { + this.allowMaskMismatch = allowMaskMismatch; + return this; + } + + /** + * {@code true} if close frames should not be forwarded and just close the channel + */ + public Builder handleCloseFrames(boolean handleCloseFrames) { + this.handleCloseFrames = handleCloseFrames; + return this; + } + + /** + * Close frame to send, when close frame was not send manually. Or {@code null} to disable proper close. + */ + public Builder sendCloseFrame(WebSocketCloseStatus sendCloseFrame) { + this.sendCloseFrame = sendCloseFrame; + return this; + } + + /** + * {@code true} if pong frames should not be forwarded + */ + public Builder dropPongFrames(boolean dropPongFrames) { + this.dropPongFrames = dropPongFrames; + return this; + } + + /** + * Handshake timeout in mills, when handshake timeout, will trigger user + * event {@link ClientHandshakeStateEvent#HANDSHAKE_TIMEOUT} + */ + public Builder handshakeTimeoutMillis(long handshakeTimeoutMillis) { + this.handshakeTimeoutMillis = handshakeTimeoutMillis; + return this; + } + + /** + * Close the connection if it was not closed by the server after timeout specified + */ + public Builder forceCloseTimeoutMillis(long forceCloseTimeoutMillis) { + this.forceCloseTimeoutMillis = forceCloseTimeoutMillis; + return this; + } + + /** + * Use an absolute url for the Upgrade request, typically when connecting through an HTTP proxy over clear HTTP + */ + public Builder absoluteUpgradeUrl(boolean absoluteUpgradeUrl) { + this.absoluteUpgradeUrl = absoluteUpgradeUrl; + return this; + } + + /** + * Build unmodifiable client protocol configuration. + */ + public WebSocketClientProtocolConfig build() { + return new WebSocketClientProtocolConfig( + webSocketUri, + subprotocol, + version, + allowExtensions, + customHeaders, + maxFramePayloadLength, + performMasking, + allowMaskMismatch, + handleCloseFrames, + sendCloseFrame, + dropPongFrames, + handshakeTimeoutMillis, + forceCloseTimeoutMillis, + absoluteUpgradeUrl + ); + } + } +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolHandler.java index 2ace7e8aa09..ceb2a4e87d2 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,12 +16,18 @@ package io.netty.handler.codec.http.websocketx; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandler; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelPipeline; import io.netty.handler.codec.http.HttpHeaders; import java.net.URI; -import java.util.List; +import java.util.Objects; + +import static io.netty.handler.codec.http.websocketx.WebSocketClientProtocolConfig.DEFAULT_ALLOW_MASK_MISMATCH; +import static io.netty.handler.codec.http.websocketx.WebSocketClientProtocolConfig.DEFAULT_DROP_PONG_FRAMES; +import static io.netty.handler.codec.http.websocketx.WebSocketClientProtocolConfig.DEFAULT_HANDLE_CLOSE_FRAMES; +import static io.netty.handler.codec.http.websocketx.WebSocketClientProtocolConfig.DEFAULT_PERFORM_MASKING; +import static io.netty.handler.codec.http.websocketx.WebSocketServerProtocolConfig.DEFAULT_HANDSHAKE_TIMEOUT_MILLIS; /** * This handler does all the heavy lifting for you to run a websocket client. @@ -34,23 +40,29 @@ * This implementation will establish the websocket connection once the connection to the remote server was complete. * * To know once a handshake was done you can intercept the - * {@link ChannelInboundHandler#userEventTriggered(ChannelHandlerContext, Object)} and check if the event was of type + * {@link ChannelHandler#userEventTriggered(ChannelHandlerContext, Object)} and check if the event was of type * {@link ClientHandshakeStateEvent#HANDSHAKE_ISSUED} or {@link ClientHandshakeStateEvent#HANDSHAKE_COMPLETE}. */ public class WebSocketClientProtocolHandler extends WebSocketProtocolHandler { - private final WebSocketClientHandshaker handshaker; - private final boolean handleCloseFrames; + private final WebSocketClientProtocolConfig clientConfig; /** * Returns the used handshaker */ - public WebSocketClientHandshaker handshaker() { return handshaker; } + public WebSocketClientHandshaker handshaker() { + return handshaker; + } /** * Events that are fired to notify about handshake status */ public enum ClientHandshakeStateEvent { + /** + * The Handshake was timed out + */ + HANDSHAKE_TIMEOUT, + /** * The Handshake was started but the server did not response yet to the request */ @@ -62,6 +74,30 @@ public enum ClientHandshakeStateEvent { HANDSHAKE_COMPLETE } + /** + * Base constructor + * + * @param clientConfig + * Client protocol configuration. + */ + public WebSocketClientProtocolHandler(WebSocketClientProtocolConfig clientConfig) { + super(Objects.requireNonNull(clientConfig, "clientConfig").dropPongFrames(), + clientConfig.sendCloseFrame(), clientConfig.forceCloseTimeoutMillis()); + this.handshaker = WebSocketClientHandshakerFactory.newHandshaker( + clientConfig.webSocketUri(), + clientConfig.version(), + clientConfig.subprotocol(), + clientConfig.allowExtensions(), + clientConfig.customHeaders(), + clientConfig.maxFramePayloadLength(), + clientConfig.performMasking(), + clientConfig.allowMaskMismatch(), + clientConfig.forceCloseTimeoutMillis(), + clientConfig.absoluteUpgradeUrl() + ); + this.clientConfig = clientConfig; + } + /** * Base constructor * @@ -90,9 +126,45 @@ public WebSocketClientProtocolHandler(URI webSocketURL, WebSocketVersion version boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, boolean handleCloseFrames, boolean performMasking, boolean allowMaskMismatch) { + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, + handleCloseFrames, performMasking, allowMaskMismatch, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + /** + * Base constructor + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param handleCloseFrames + * {@code true} if close frames should not be forwarded and just close the channel + * @param performMasking + * Whether to mask all written websocket frames. This must be set to true in order to be fully compatible + * with the websocket specifications. Client applications that communicate with a non-standard server + * which doesn't require masking might set this to false to achieve a higher performance. + * @param allowMaskMismatch + * When set to true, frames which are not masked properly according to the standard will still be + * accepted. + * @param handshakeTimeoutMillis + * Handshake timeout in mills, when handshake timeout, will trigger user + * event {@link ClientHandshakeStateEvent#HANDSHAKE_TIMEOUT} + */ + public WebSocketClientProtocolHandler(URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, + int maxFramePayloadLength, boolean handleCloseFrames, boolean performMasking, + boolean allowMaskMismatch, long handshakeTimeoutMillis) { this(WebSocketClientHandshakerFactory.newHandshaker(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, - performMasking, allowMaskMismatch), handleCloseFrames); + performMasking, allowMaskMismatch), + handleCloseFrames, handshakeTimeoutMillis); } /** @@ -116,7 +188,34 @@ public WebSocketClientProtocolHandler(URI webSocketURL, WebSocketVersion version boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, boolean handleCloseFrames) { this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, - handleCloseFrames, true, false); + handleCloseFrames, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + /** + * Base constructor + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param handleCloseFrames + * {@code true} if close frames should not be forwarded and just close the channel + * @param handshakeTimeoutMillis + * Handshake timeout in mills, when handshake timeout, will trigger user + * event {@link ClientHandshakeStateEvent#HANDSHAKE_TIMEOUT} + */ + public WebSocketClientProtocolHandler(URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength, + boolean handleCloseFrames, long handshakeTimeoutMillis) { + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, maxFramePayloadLength, + handleCloseFrames, DEFAULT_PERFORM_MASKING, DEFAULT_ALLOW_MASK_MISMATCH, handshakeTimeoutMillis); } /** @@ -137,8 +236,33 @@ public WebSocketClientProtocolHandler(URI webSocketURL, WebSocketVersion version public WebSocketClientProtocolHandler(URI webSocketURL, WebSocketVersion version, String subprotocol, boolean allowExtensions, HttpHeaders customHeaders, int maxFramePayloadLength) { - this(webSocketURL, version, subprotocol, - allowExtensions, customHeaders, maxFramePayloadLength, true); + this(webSocketURL, version, subprotocol, allowExtensions, + customHeaders, maxFramePayloadLength, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + /** + * Base constructor + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param version + * Version of web socket specification to use to connect to the server + * @param subprotocol + * Sub protocol request sent to the server. + * @param customHeaders + * Map of custom headers to add to the client request + * @param maxFramePayloadLength + * Maximum length of a frame's payload + * @param handshakeTimeoutMillis + * Handshake timeout in mills, when handshake timeout, will trigger user + * event {@link ClientHandshakeStateEvent#HANDSHAKE_TIMEOUT} + */ + public WebSocketClientProtocolHandler(URI webSocketURL, WebSocketVersion version, String subprotocol, + boolean allowExtensions, HttpHeaders customHeaders, + int maxFramePayloadLength, long handshakeTimeoutMillis) { + this(webSocketURL, version, subprotocol, allowExtensions, customHeaders, + maxFramePayloadLength, DEFAULT_HANDLE_CLOSE_FRAMES, handshakeTimeoutMillis); } /** @@ -151,8 +275,64 @@ public WebSocketClientProtocolHandler(URI webSocketURL, WebSocketVersion version * {@code true} if close frames should not be forwarded and just close the channel */ public WebSocketClientProtocolHandler(WebSocketClientHandshaker handshaker, boolean handleCloseFrames) { + this(handshaker, handleCloseFrames, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + /** + * Base constructor + * + * @param handshaker + * The {@link WebSocketClientHandshaker} which will be used to issue the handshake once the connection + * was established to the remote peer. + * @param handleCloseFrames + * {@code true} if close frames should not be forwarded and just close the channel + * @param handshakeTimeoutMillis + * Handshake timeout in mills, when handshake timeout, will trigger user + * event {@link ClientHandshakeStateEvent#HANDSHAKE_TIMEOUT} + */ + public WebSocketClientProtocolHandler(WebSocketClientHandshaker handshaker, boolean handleCloseFrames, + long handshakeTimeoutMillis) { + this(handshaker, handleCloseFrames, DEFAULT_DROP_PONG_FRAMES, handshakeTimeoutMillis); + } + + /** + * Base constructor + * + * @param handshaker + * The {@link WebSocketClientHandshaker} which will be used to issue the handshake once the connection + * was established to the remote peer. + * @param handleCloseFrames + * {@code true} if close frames should not be forwarded and just close the channel + * @param dropPongFrames + * {@code true} if pong frames should not be forwarded + */ + public WebSocketClientProtocolHandler(WebSocketClientHandshaker handshaker, boolean handleCloseFrames, + boolean dropPongFrames) { + this(handshaker, handleCloseFrames, dropPongFrames, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + /** + * Base constructor + * + * @param handshaker + * The {@link WebSocketClientHandshaker} which will be used to issue the handshake once the connection + * was established to the remote peer. + * @param handleCloseFrames + * {@code true} if close frames should not be forwarded and just close the channel + * @param dropPongFrames + * {@code true} if pong frames should not be forwarded + * @param handshakeTimeoutMillis + * Handshake timeout in mills, when handshake timeout, will trigger user + * event {@link ClientHandshakeStateEvent#HANDSHAKE_TIMEOUT} + */ + public WebSocketClientProtocolHandler(WebSocketClientHandshaker handshaker, boolean handleCloseFrames, + boolean dropPongFrames, long handshakeTimeoutMillis) { + super(dropPongFrames); this.handshaker = handshaker; - this.handleCloseFrames = handleCloseFrames; + this.clientConfig = WebSocketClientProtocolConfig.newBuilder() + .handleCloseFrames(handleCloseFrames) + .handshakeTimeoutMillis(handshakeTimeoutMillis) + .build(); } /** @@ -163,16 +343,35 @@ public WebSocketClientProtocolHandler(WebSocketClientHandshaker handshaker, bool * was established to the remote peer. */ public WebSocketClientProtocolHandler(WebSocketClientHandshaker handshaker) { - this(handshaker, true); + this(handshaker, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + /** + * Base constructor + * + * @param handshaker + * The {@link WebSocketClientHandshaker} which will be used to issue the handshake once the connection + * was established to the remote peer. + * @param handshakeTimeoutMillis + * Handshake timeout in mills, when handshake timeout, will trigger user + * event {@link ClientHandshakeStateEvent#HANDSHAKE_TIMEOUT} + */ + public WebSocketClientProtocolHandler(WebSocketClientHandshaker handshaker, long handshakeTimeoutMillis) { + this(handshaker, DEFAULT_HANDLE_CLOSE_FRAMES, handshakeTimeoutMillis); } @Override - protected void decode(ChannelHandlerContext ctx, WebSocketFrame frame, List out) throws Exception { - if (handleCloseFrames && frame instanceof CloseWebSocketFrame) { + protected void decode(ChannelHandlerContext ctx, WebSocketFrame frame) throws Exception { + if (clientConfig.handleCloseFrames() && frame instanceof CloseWebSocketFrame) { ctx.close(); return; } - super.decode(ctx, frame, out); + super.decode(ctx, frame); + } + + @Override + protected WebSocketClientHandshakeException buildHandshakeException(String message) { + return new WebSocketClientHandshakeException(message); } @Override @@ -181,7 +380,7 @@ public void handlerAdded(ChannelHandlerContext ctx) { if (cp.get(WebSocketClientProtocolHandshakeHandler.class) == null) { // Add the WebSocketClientProtocolHandshakeHandler before this one. ctx.pipeline().addBefore(ctx.name(), WebSocketClientProtocolHandshakeHandler.class.getName(), - new WebSocketClientProtocolHandshakeHandler(handshaker)); + new WebSocketClientProtocolHandshakeHandler(handshaker, clientConfig.handshakeTimeoutMillis())); } if (cp.get(Utf8FrameValidator.class) == null) { // Add the UFT8 checking before this one. diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolHandshakeHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolHandshakeHandler.java index 3d130c63d62..6aa4e492588 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolHandshakeHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketClientProtocolHandshakeHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,33 +15,64 @@ */ package io.netty.handler.codec.http.websocketx; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.websocketx.WebSocketClientProtocolHandler.ClientHandshakeStateEvent; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; + +import java.util.concurrent.TimeUnit; + +import static io.netty.util.internal.ObjectUtil.checkPositive; + +class WebSocketClientProtocolHandshakeHandler implements ChannelHandler { + + private static final long DEFAULT_HANDSHAKE_TIMEOUT_MS = 10000L; -class WebSocketClientProtocolHandshakeHandler extends ChannelInboundHandlerAdapter { private final WebSocketClientHandshaker handshaker; + private final long handshakeTimeoutMillis; + private ChannelHandlerContext ctx; + private Promise handshakePromise; WebSocketClientProtocolHandshakeHandler(WebSocketClientHandshaker handshaker) { + this(handshaker, DEFAULT_HANDSHAKE_TIMEOUT_MS); + } + + WebSocketClientProtocolHandshakeHandler(WebSocketClientHandshaker handshaker, long handshakeTimeoutMillis) { this.handshaker = handshaker; + this.handshakeTimeoutMillis = checkPositive(handshakeTimeoutMillis, "handshakeTimeoutMillis"); + } + + @Override + public void handlerAdded(ChannelHandlerContext ctx) throws Exception { + this.ctx = ctx; + handshakePromise = ctx.newPromise(); } @Override public void channelActive(final ChannelHandlerContext ctx) throws Exception { - super.channelActive(ctx); - handshaker.handshake(ctx.channel()).addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { - ctx.fireExceptionCaught(future.cause()); - } else { - ctx.fireUserEventTriggered( - WebSocketClientProtocolHandler.ClientHandshakeStateEvent.HANDSHAKE_ISSUED); - } + ctx.fireChannelActive(); + handshaker.handshake(ctx.channel()).addListener(future -> { + if (future.isFailed()) { + handshakePromise.tryFailure(future.cause()); + ctx.fireExceptionCaught(future.cause()); + } else { + ctx.fireUserEventTriggered( + WebSocketClientProtocolHandler.ClientHandshakeStateEvent.HANDSHAKE_ISSUED); } }); + applyHandshakeTimeout(); + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + if (!handshakePromise.isDone()) { + handshakePromise.tryFailure(new WebSocketClientHandshakeException("channel closed with handshake " + + "in progress")); + } + + ctx.fireChannelInactive(); } @Override @@ -55,6 +86,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception try { if (!handshaker.isHandshakeComplete()) { handshaker.finishHandshake(ctx.channel(), response); + handshakePromise.trySuccess(null); ctx.fireUserEventTriggered( WebSocketClientProtocolHandler.ClientHandshakeStateEvent.HANDSHAKE_COMPLETE); ctx.pipeline().remove(this); @@ -65,4 +97,35 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception response.release(); } } + + private void applyHandshakeTimeout() { + final Promise localHandshakePromise = handshakePromise; + if (handshakeTimeoutMillis <= 0 || localHandshakePromise.isDone()) { + return; + } + + final Future timeoutFuture = ctx.executor().schedule(() -> { + if (localHandshakePromise.isDone()) { + return; + } + + if (localHandshakePromise.tryFailure(new WebSocketClientHandshakeException("handshake timed out"))) { + ctx.flush() + .fireUserEventTriggered(ClientHandshakeStateEvent.HANDSHAKE_TIMEOUT) + .close(); + } + }, handshakeTimeoutMillis, TimeUnit.MILLISECONDS); + + // Cancel the handshake timeout when handshake is finished. + localHandshakePromise.addListener(f -> timeoutFuture.cancel(false)); + } + + /** + * This method is visible for testing. + * + * @return current handshake future + */ + Future getHandshakeFuture() { + return handshakePromise; + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java new file mode 100644 index 00000000000..a15a2cddadb --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatus.java @@ -0,0 +1,330 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import java.util.Objects; + +/** + * WebSocket status codes specified in RFC-6455. + *
    + *
    + * RFC-6455 The WebSocket Protocol, December 2011:
    + * https://tools.ietf.org/html/rfc6455#section-7.4.1
    + *
    + * WebSocket Protocol Registries, April 2019:
    + * https://www.iana.org/assignments/websocket/websocket.xhtml
    + *
    + * 7.4.1.  Defined Status Codes
    + *
    + * Endpoints MAY use the following pre-defined status codes when sending
    + * a Close frame.
    + *
    + * 1000
    + *
    + *    1000 indicates a normal closure, meaning that the purpose for
    + *    which the connection was established has been fulfilled.
    + *
    + * 1001
    + *
    + *    1001 indicates that an endpoint is "going away", such as a server
    + *    going down or a browser having navigated away from a page.
    + *
    + * 1002
    + *
    + *    1002 indicates that an endpoint is terminating the connection due
    + *    to a protocol error.
    + *
    + * 1003
    + *
    + *    1003 indicates that an endpoint is terminating the connection
    + *    because it has received a type of data it cannot accept (e.g., an
    + *    endpoint that understands only text data MAY send this if it
    + *    receives a binary message).
    + *
    + * 1004
    + *
    + *    Reserved. The specific meaning might be defined in the future.
    + *
    + * 1005
    + *
    + *    1005 is a reserved value and MUST NOT be set as a status code in a
    + *    Close control frame by an endpoint. It is designated for use in
    + *    applications expecting a status code to indicate that no status
    + *    code was actually present.
    + *
    + * 1006
    + *
    + *    1006 is a reserved value and MUST NOT be set as a status code in a
    + *    Close control frame by an endpoint. It is designated for use in
    + *    applications expecting a status code to indicate that the
    + *    connection was closed abnormally, e.g., without sending or
    + *    receiving a Close control frame.
    + *
    + * 1007
    + *
    + *    1007 indicates that an endpoint is terminating the connection
    + *    because it has received data within a message that was not
    + *    consistent with the type of the message (e.g., non-UTF-8 [RFC3629]
    + *    data within a text message).
    + *
    + * 1008
    + *
    + *    1008 indicates that an endpoint is terminating the connection
    + *    because it has received a message that violates its policy. This
    + *    is a generic status code that can be returned when there is no
    + *    other more suitable status code (e.g., 1003 or 1009) or if there
    + *    is a need to hide specific details about the policy.
    + *
    + * 1009
    + *
    + *    1009 indicates that an endpoint is terminating the connection
    + *    because it has received a message that is too big for it to
    + *    process.
    + *
    + * 1010
    + *
    + *    1010 indicates that an endpoint (client) is terminating the
    + *    connection because it has expected the server to negotiate one or
    + *    more extension, but the server didn't return them in the response
    + *    message of the WebSocket handshake. The list of extensions that
    + *    are needed SHOULD appear in the /reason/ part of the Close frame.
    + *    Note that this status code is not used by the server, because it
    + *    can fail the WebSocket handshake instead.
    + *
    + * 1011
    + *
    + *    1011 indicates that a server is terminating the connection because
    + *    it encountered an unexpected condition that prevented it from
    + *    fulfilling the request.
    + *
    + * 1012 (IANA Registry, Non RFC-6455)
    + *
    + *    1012 indicates that the service is restarted. a client may reconnect,
    + *    and if it choses to do, should reconnect using a randomized delay
    + *    of 5 - 30 seconds.
    + *
    + * 1013 (IANA Registry, Non RFC-6455)
    + *
    + *    1013 indicates that the service is experiencing overload. a client
    + *    should only connect to a different IP (when there are multiple for the
    + *    target) or reconnect to the same IP upon user action.
    + *
    + * 1014 (IANA Registry, Non RFC-6455)
    + *
    + *    The server was acting as a gateway or proxy and received an invalid
    + *    response from the upstream server. This is similar to 502 HTTP Status Code.
    + *
    + * 1015
    + *
    + *    1015 is a reserved value and MUST NOT be set as a status code in a
    + *    Close control frame by an endpoint. It is designated for use in
    + *    applications expecting a status code to indicate that the
    + *    connection was closed due to a failure to perform a TLS handshake
    + *    (e.g., the server certificate can't be verified).
    + *
    + *
    + * 7.4.2. Reserved Status Code Ranges
    + *
    + * 0-999
    + *
    + *    Status codes in the range 0-999 are not used.
    + *
    + * 1000-2999
    + *
    + *    Status codes in the range 1000-2999 are reserved for definition by
    + *    this protocol, its future revisions, and extensions specified in a
    + *    permanent and readily available public specification.
    + *
    + * 3000-3999
    + *
    + *    Status codes in the range 3000-3999 are reserved for use by
    + *    libraries, frameworks, and applications. These status codes are
    + *    registered directly with IANA. The interpretation of these codes
    + *    is undefined by this protocol.
    + *
    + * 4000-4999
    + *
    + *    Status codes in the range 4000-4999 are reserved for private use
    + *    and thus can't be registered. Such codes can be used by prior
    + *    agreements between WebSocket applications. The interpretation of
    + *    these codes is undefined by this protocol.
    + * 
    + *

    + * While {@link WebSocketCloseStatus} is enum-like structure, its instances should NOT be compared by reference. + * Instead, either {@link #equals(Object)} should be used or direct comparison of {@link #code()} value. + */ +public final class WebSocketCloseStatus implements Comparable { + + public static final WebSocketCloseStatus NORMAL_CLOSURE = + new WebSocketCloseStatus(1000, "Bye"); + + public static final WebSocketCloseStatus ENDPOINT_UNAVAILABLE = + new WebSocketCloseStatus(1001, "Endpoint unavailable"); + + public static final WebSocketCloseStatus PROTOCOL_ERROR = + new WebSocketCloseStatus(1002, "Protocol error"); + + public static final WebSocketCloseStatus INVALID_MESSAGE_TYPE = + new WebSocketCloseStatus(1003, "Invalid message type"); + + public static final WebSocketCloseStatus INVALID_PAYLOAD_DATA = + new WebSocketCloseStatus(1007, "Invalid payload data"); + + public static final WebSocketCloseStatus POLICY_VIOLATION = + new WebSocketCloseStatus(1008, "Policy violation"); + + public static final WebSocketCloseStatus MESSAGE_TOO_BIG = + new WebSocketCloseStatus(1009, "Message too big"); + + public static final WebSocketCloseStatus MANDATORY_EXTENSION = + new WebSocketCloseStatus(1010, "Mandatory extension"); + + public static final WebSocketCloseStatus INTERNAL_SERVER_ERROR = + new WebSocketCloseStatus(1011, "Internal server error"); + + public static final WebSocketCloseStatus SERVICE_RESTART = + new WebSocketCloseStatus(1012, "Service Restart"); + + public static final WebSocketCloseStatus TRY_AGAIN_LATER = + new WebSocketCloseStatus(1013, "Try Again Later"); + + public static final WebSocketCloseStatus BAD_GATEWAY = + new WebSocketCloseStatus(1014, "Bad Gateway"); + + // 1004, 1005, 1006, 1015 are reserved and should never be used by user + //public static final WebSocketCloseStatus SPECIFIC_MEANING = register(1004, "..."); + + public static final WebSocketCloseStatus EMPTY = + new WebSocketCloseStatus(1005, "Empty", false); + + public static final WebSocketCloseStatus ABNORMAL_CLOSURE = + new WebSocketCloseStatus(1006, "Abnormal closure", false); + + public static final WebSocketCloseStatus TLS_HANDSHAKE_FAILED = + new WebSocketCloseStatus(1015, "TLS handshake failed", false); + + private final int statusCode; + private final String reasonText; + private String text; + + public WebSocketCloseStatus(int statusCode, String reasonText) { + this(statusCode, reasonText, true); + } + + public WebSocketCloseStatus(int statusCode, String reasonText, boolean validate) { + if (validate && !isValidStatusCode(statusCode)) { + throw new IllegalArgumentException( + "WebSocket close status code does NOT comply with RFC-6455: " + statusCode); + } + this.statusCode = statusCode; + this.reasonText = Objects.requireNonNull(reasonText, "reasonText"); + } + + public int code() { + return statusCode; + } + + public String reasonText() { + return reasonText; + } + + /** + * Order of {@link WebSocketCloseStatus} only depends on {@link #code()}. + */ + @Override + public int compareTo(WebSocketCloseStatus o) { + return code() - o.code(); + } + + /** + * Equality of {@link WebSocketCloseStatus} only depends on {@link #code()}. + */ + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (null == o || getClass() != o.getClass()) { + return false; + } + + WebSocketCloseStatus that = (WebSocketCloseStatus) o; + + return statusCode == that.statusCode; + } + + @Override + public int hashCode() { + return statusCode; + } + + @Override + public String toString() { + String text = this.text; + if (text == null) { + // E.g.: "1000 Bye", "1009 Message too big" + this.text = text = code() + " " + reasonText(); + } + return text; + } + + public static boolean isValidStatusCode(int code) { + return code < 0 || + 1000 <= code && code <= 1003 || + 1007 <= code && code <= 1014 || + 3000 <= code; + } + + public static WebSocketCloseStatus valueOf(int code) { + switch (code) { + case 1000: + return NORMAL_CLOSURE; + case 1001: + return ENDPOINT_UNAVAILABLE; + case 1002: + return PROTOCOL_ERROR; + case 1003: + return INVALID_MESSAGE_TYPE; + case 1005: + return EMPTY; + case 1006: + return ABNORMAL_CLOSURE; + case 1007: + return INVALID_PAYLOAD_DATA; + case 1008: + return POLICY_VIOLATION; + case 1009: + return MESSAGE_TOO_BIG; + case 1010: + return MANDATORY_EXTENSION; + case 1011: + return INTERNAL_SERVER_ERROR; + case 1012: + return SERVICE_RESTART; + case 1013: + return TRY_AGAIN_LATER; + case 1014: + return BAD_GATEWAY; + case 1015: + return TLS_HANDSHAKE_FAILED; + default: + return new WebSocketCloseStatus(code, "Close status #" + code); + } + } + +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketDecoderConfig.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketDecoderConfig.java new file mode 100644 index 00000000000..93a2b31eee3 --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketDecoderConfig.java @@ -0,0 +1,165 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import java.util.Objects; + +/** + * Frames decoder configuration. + */ +public final class WebSocketDecoderConfig { + + static final WebSocketDecoderConfig DEFAULT = + new WebSocketDecoderConfig(65536, true, false, false, true, true); + + private final int maxFramePayloadLength; + private final boolean expectMaskedFrames; + private final boolean allowMaskMismatch; + private final boolean allowExtensions; + private final boolean closeOnProtocolViolation; + private final boolean withUTF8Validator; + + /** + * Constructor + * + * @param maxFramePayloadLength + * Maximum length of a frame's payload. Setting this to an appropriate value for you application + * helps check for denial of services attacks. + * @param expectMaskedFrames + * Web socket servers must set this to true processed incoming masked payload. Client implementations + * must set this to false. + * @param allowMaskMismatch + * Allows to loosen the masking requirement on received frames. When this is set to false then also + * frames which are not masked properly according to the standard will still be accepted. + * @param allowExtensions + * Flag to allow reserved extension bits to be used or not + * @param closeOnProtocolViolation + * Flag to send close frame immediately on any protocol violation.ion. + * @param withUTF8Validator + * Allows you to avoid adding of Utf8FrameValidator to the pipeline on the + * WebSocketServerProtocolHandler creation. This is useful (less overhead) + * when you use only BinaryWebSocketFrame within your web socket connection. + */ + private WebSocketDecoderConfig(int maxFramePayloadLength, boolean expectMaskedFrames, boolean allowMaskMismatch, + boolean allowExtensions, boolean closeOnProtocolViolation, + boolean withUTF8Validator) { + this.maxFramePayloadLength = maxFramePayloadLength; + this.expectMaskedFrames = expectMaskedFrames; + this.allowMaskMismatch = allowMaskMismatch; + this.allowExtensions = allowExtensions; + this.closeOnProtocolViolation = closeOnProtocolViolation; + this.withUTF8Validator = withUTF8Validator; + } + + public int maxFramePayloadLength() { + return maxFramePayloadLength; + } + + public boolean expectMaskedFrames() { + return expectMaskedFrames; + } + + public boolean allowMaskMismatch() { + return allowMaskMismatch; + } + + public boolean allowExtensions() { + return allowExtensions; + } + + public boolean closeOnProtocolViolation() { + return closeOnProtocolViolation; + } + + public boolean withUTF8Validator() { + return withUTF8Validator; + } + + @Override + public String toString() { + return "WebSocketDecoderConfig" + + " [maxFramePayloadLength=" + maxFramePayloadLength + + ", expectMaskedFrames=" + expectMaskedFrames + + ", allowMaskMismatch=" + allowMaskMismatch + + ", allowExtensions=" + allowExtensions + + ", closeOnProtocolViolation=" + closeOnProtocolViolation + + ", withUTF8Validator=" + withUTF8Validator + + "]"; + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static Builder newBuilder() { + return new Builder(DEFAULT); + } + + public static final class Builder { + private int maxFramePayloadLength; + private boolean expectMaskedFrames; + private boolean allowMaskMismatch; + private boolean allowExtensions; + private boolean closeOnProtocolViolation; + private boolean withUTF8Validator; + + private Builder(WebSocketDecoderConfig decoderConfig) { + Objects.requireNonNull(decoderConfig, "decoderConfig"); + maxFramePayloadLength = decoderConfig.maxFramePayloadLength(); + expectMaskedFrames = decoderConfig.expectMaskedFrames(); + allowMaskMismatch = decoderConfig.allowMaskMismatch(); + allowExtensions = decoderConfig.allowExtensions(); + closeOnProtocolViolation = decoderConfig.closeOnProtocolViolation(); + withUTF8Validator = decoderConfig.withUTF8Validator(); + } + + public Builder maxFramePayloadLength(int maxFramePayloadLength) { + this.maxFramePayloadLength = maxFramePayloadLength; + return this; + } + + public Builder expectMaskedFrames(boolean expectMaskedFrames) { + this.expectMaskedFrames = expectMaskedFrames; + return this; + } + + public Builder allowMaskMismatch(boolean allowMaskMismatch) { + this.allowMaskMismatch = allowMaskMismatch; + return this; + } + + public Builder allowExtensions(boolean allowExtensions) { + this.allowExtensions = allowExtensions; + return this; + } + + public Builder closeOnProtocolViolation(boolean closeOnProtocolViolation) { + this.closeOnProtocolViolation = closeOnProtocolViolation; + return this; + } + + public Builder withUTF8Validator(boolean withUTF8Validator) { + this.withUTF8Validator = withUTF8Validator; + return this; + } + + public WebSocketDecoderConfig build() { + return new WebSocketDecoderConfig( + maxFramePayloadLength, expectMaskedFrames, allowMaskMismatch, + allowExtensions, closeOnProtocolViolation, withUTF8Validator); + } + } +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrame.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrame.java index 677f0473604..3cd0d1844f9 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrame.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,7 +20,7 @@ import io.netty.util.internal.StringUtil; /** - * Base class for web socket frames + * Base class for web socket frames. */ public abstract class WebSocketFrame extends DefaultByteBufHolder { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameAggregator.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameAggregator.java index ef84f96d368..ef500c8dbda 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameAggregator.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameAggregator.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameDecoder.java index 8c8142ec4ea..6964d882ca1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ package io.netty.handler.codec.http.websocketx; -import io.netty.channel.ChannelInboundHandler; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelPipeline; /** @@ -23,5 +23,5 @@ * * This makes it easier to access the added encoder later in the {@link ChannelPipeline}. */ -public interface WebSocketFrameDecoder extends ChannelInboundHandler { +public interface WebSocketFrameDecoder extends ChannelHandler { } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameEncoder.java index da1df6b394f..08ab94855a8 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketFrameEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ package io.netty.handler.codec.http.websocketx; -import io.netty.channel.ChannelOutboundHandler; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelPipeline; /** @@ -23,5 +23,5 @@ * * This makes it easier to access the added encoder later in the {@link ChannelPipeline}. */ -public interface WebSocketFrameEncoder extends ChannelOutboundHandler { +public interface WebSocketFrameEncoder extends ChannelHandler { } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeException.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeException.java index 4ee509c011d..dd0bf61194b 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeException.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeException.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketProtocolHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketProtocolHandler.java index 7037ae34686..7f4df07b159 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketProtocolHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketProtocolHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,23 +18,122 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.MessageToMessageDecoder; +import io.netty.util.ReferenceCountUtil; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import io.netty.util.concurrent.ScheduledFuture; -import java.util.List; +import java.nio.channels.ClosedChannelException; +import java.util.concurrent.TimeUnit; abstract class WebSocketProtocolHandler extends MessageToMessageDecoder { + + private final boolean dropPongFrames; + private final WebSocketCloseStatus closeStatus; + private final long forceCloseTimeoutMillis; + private Promise closeSent; + + /** + * Creates a new {@link WebSocketProtocolHandler} that will drop {@link PongWebSocketFrame}s. + */ + WebSocketProtocolHandler() { + this(true); + } + + /** + * Creates a new {@link WebSocketProtocolHandler}, given a parameter that determines whether or not to drop {@link + * PongWebSocketFrame}s. + * + * @param dropPongFrames + * {@code true} if {@link PongWebSocketFrame}s should be dropped + */ + WebSocketProtocolHandler(boolean dropPongFrames) { + this(dropPongFrames, null, 0L); + } + + WebSocketProtocolHandler(boolean dropPongFrames, + WebSocketCloseStatus closeStatus, + long forceCloseTimeoutMillis) { + this.dropPongFrames = dropPongFrames; + this.closeStatus = closeStatus; + this.forceCloseTimeoutMillis = forceCloseTimeoutMillis; + } + @Override - protected void decode(ChannelHandlerContext ctx, WebSocketFrame frame, List out) throws Exception { + protected void decode(ChannelHandlerContext ctx, WebSocketFrame frame) throws Exception { if (frame instanceof PingWebSocketFrame) { frame.content().retain(); ctx.channel().writeAndFlush(new PongWebSocketFrame(frame.content())); + readIfNeeded(ctx); return; } - if (frame instanceof PongWebSocketFrame) { - // Pong frames need to get ignored + if (frame instanceof PongWebSocketFrame && dropPongFrames) { + readIfNeeded(ctx); return; } - out.add(frame.retain()); + ctx.fireChannelRead(frame.retain()); + } + + private static void readIfNeeded(ChannelHandlerContext ctx) { + if (!ctx.channel().config().isAutoRead()) { + ctx.read(); + } + } + + @Override + public Future close(final ChannelHandlerContext ctx) { + if (closeStatus == null || !ctx.channel().isActive()) { + return ctx.close(); + } + final Future future = closeSent == null ? write(ctx, new CloseWebSocketFrame(closeStatus)) : closeSent; + + flush(ctx); + applyCloseSentTimeout(ctx); + Promise promise = ctx.newPromise(); + future.addListener(f -> ctx.close().cascadeTo(promise)); + return promise; + } + + @Override + public Future write(final ChannelHandlerContext ctx, Object msg) { + if (closeSent != null) { + ReferenceCountUtil.release(msg); + return ctx.newFailedFuture(new ClosedChannelException()); + } + if (msg instanceof CloseWebSocketFrame) { + Promise promise = ctx.newPromise(); + closeSent(promise); + ctx.write(msg).cascadeTo(closeSent); + return promise; + } + return ctx.write(msg); + } + + void closeSent(Promise promise) { + closeSent = promise; + } + + private void applyCloseSentTimeout(ChannelHandlerContext ctx) { + if (closeSent.isDone() || forceCloseTimeoutMillis < 0) { + return; + } + + final ScheduledFuture timeoutTask = ctx.executor().schedule(() -> { + if (!closeSent.isDone()) { + closeSent.tryFailure(buildHandshakeException("send close frame timed out")); + } + }, forceCloseTimeoutMillis, TimeUnit.MILLISECONDS); + + closeSent.addListener(future -> timeoutTask.cancel(false)); + } + + /** + * Returns a {@link WebSocketHandshakeException} that depends on which client or server pipeline + * this handler belongs. Should be overridden in implementation otherwise a default exception is used. + */ + protected WebSocketHandshakeException buildHandshakeException(String message) { + return new WebSocketHandshakeException(message); } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketScheme.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketScheme.java index 24908307907..056d6b057b1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketScheme.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketScheme.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakeException.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakeException.java new file mode 100644 index 00000000000..456cf19f07c --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakeException.java @@ -0,0 +1,55 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.util.ReferenceCounted; + +/** + * Server exception during handshaking process. + * + *

    IMPORTANT: This exception does not contain any {@link ReferenceCounted} fields + * e.g. {@link FullHttpRequest}, so no special treatment is needed. + */ +public final class WebSocketServerHandshakeException extends WebSocketHandshakeException { + + private static final long serialVersionUID = 1L; + + private final HttpRequest request; + + public WebSocketServerHandshakeException(String message) { + this(message, null); + } + + public WebSocketServerHandshakeException(String message, HttpRequest httpRequest) { + super(message); + if (httpRequest != null) { + request = new DefaultHttpRequest(httpRequest.protocolVersion(), httpRequest.method(), + httpRequest.uri(), httpRequest.headers()); + } else { + request = null; + } + } + + /** + * Returns a {@link HttpRequest request} if exception occurs during request validation otherwise {@code null}. + */ + public HttpRequest request() { + return request; + } +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker.java index 4eb43484c43..0e316c310a1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,11 +16,11 @@ package io.netty.handler.codec.http.websocketx; import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelFutureListeners; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelOutboundInvoker; import io.netty.channel.ChannelPipeline; -import io.netty.channel.ChannelPromise; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; @@ -32,8 +32,9 @@ import io.netty.handler.codec.http.HttpResponseEncoder; import io.netty.handler.codec.http.HttpServerCodec; import io.netty.util.ReferenceCountUtil; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; import io.netty.util.internal.EmptyArrays; -import io.netty.util.internal.ThrowableUtil; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; @@ -42,13 +43,13 @@ import java.util.LinkedHashSet; import java.util.Set; +import static java.util.Objects.requireNonNull; + /** * Base class for server side web socket opening and closing handshakes */ public abstract class WebSocketServerHandshaker { protected static final InternalLogger logger = InternalLoggerFactory.getInstance(WebSocketServerHandshaker.class); - private static final ClosedChannelException CLOSED_CHANNEL_EXCEPTION = ThrowableUtil.unknownStackTrace( - new ClosedChannelException(), WebSocketServerHandshaker.class, "handshake(...)"); private final String uri; @@ -56,7 +57,7 @@ public abstract class WebSocketServerHandshaker { private final WebSocketVersion version; - private final int maxFramePayloadLength; + private final WebSocketDecoderConfig decoderConfig; private String selectedSubprotocol; @@ -81,6 +82,26 @@ public abstract class WebSocketServerHandshaker { protected WebSocketServerHandshaker( WebSocketVersion version, String uri, String subprotocols, int maxFramePayloadLength) { + this(version, uri, subprotocols, WebSocketDecoderConfig.newBuilder() + .maxFramePayloadLength(maxFramePayloadLength) + .build()); + } + + /** + * Constructor specifying the destination web socket location + * + * @param version + * the protocol version + * @param uri + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param subprotocols + * CSV of supported protocols. Null if sub protocols not supported. + * @param decoderConfig + * Frames decoder configuration. + */ + protected WebSocketServerHandshaker( + WebSocketVersion version, String uri, String subprotocols, WebSocketDecoderConfig decoderConfig) { this.version = version; this.uri = uri; if (subprotocols != null) { @@ -92,7 +113,7 @@ protected WebSocketServerHandshaker( } else { this.subprotocols = EmptyArrays.EMPTY_STRINGS; } - this.maxFramePayloadLength = maxFramePayloadLength; + this.decoderConfig = requireNonNull(decoderConfig, "decoderConfig"); } /** @@ -106,7 +127,7 @@ public String uri() { * Returns the CSV of supported sub protocols */ public Set subprotocols() { - Set ret = new LinkedHashSet(); + Set ret = new LinkedHashSet<>(); Collections.addAll(ret, subprotocols); return ret; } @@ -124,7 +145,16 @@ public WebSocketVersion version() { * @return The maximum length for a frame's payload */ public int maxFramePayloadLength() { - return maxFramePayloadLength; + return decoderConfig.maxFramePayloadLength(); + } + + /** + * Gets this decoder configuration. + * + * @return This decoder configuration. + */ + public WebSocketDecoderConfig decoderConfig() { + return decoderConfig; } /** @@ -136,10 +166,10 @@ public int maxFramePayloadLength() { * @param req * HTTP Request * @return future - * The {@link ChannelFuture} which is notified once the opening handshake completes + * The {@link Future} which is notified once the opening handshake completes */ - public ChannelFuture handshake(Channel channel, FullHttpRequest req) { - return handshake(channel, req, null, channel.newPromise()); + public Future handshake(Channel channel, FullHttpRequest req) { + return handshake(channel, req, null); } /** @@ -153,13 +183,10 @@ public ChannelFuture handshake(Channel channel, FullHttpRequest req) { * HTTP Request * @param responseHeaders * Extra headers to add to the handshake response or {@code null} if no extra headers should be added - * @param promise - * the {@link ChannelPromise} to be notified when the opening handshake is done * @return future - * the {@link ChannelFuture} which is notified when the opening handshake is done + * the {@link Future} which is notified when the opening handshake is done */ - public final ChannelFuture handshake(Channel channel, FullHttpRequest req, - HttpHeaders responseHeaders, final ChannelPromise promise) { + public final Future handshake(Channel channel, FullHttpRequest req, HttpHeaders responseHeaders) { if (logger.isDebugEnabled()) { logger.debug("{} WebSocket version {} server handshake", channel, version()); @@ -175,15 +202,14 @@ public final ChannelFuture handshake(Channel channel, FullHttpRequest req, ChannelHandlerContext ctx = p.context(HttpRequestDecoder.class); final String encoderName; if (ctx == null) { - // this means the user use a HttpServerCodec + // this means the user use an HttpServerCodec ctx = p.context(HttpServerCodec.class); if (ctx == null) { - promise.setFailure( + return channel.newFailedFuture( new IllegalStateException("No HttpDecoder and no HttpServerCodec in the pipeline")); - return promise; } - p.addBefore(ctx.name(), "wsdecoder", newWebsocketDecoder()); p.addBefore(ctx.name(), "wsencoder", newWebSocketEncoder()); + p.addBefore(ctx.name(), "wsdecoder", newWebsocketDecoder()); encoderName = ctx.name(); } else { p.replace(ctx.name(), "wsdecoder", newWebsocketDecoder()); @@ -191,19 +217,12 @@ public final ChannelFuture handshake(Channel channel, FullHttpRequest req, encoderName = p.context(HttpResponseEncoder.class).name(); p.addBefore(encoderName, "wsencoder", newWebSocketEncoder()); } - channel.writeAndFlush(response).addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (future.isSuccess()) { - ChannelPipeline p = future.channel().pipeline(); - p.remove(encoderName); - promise.setSuccess(); - } else { - promise.setFailure(future.cause()); - } + return channel.writeAndFlush(response).addListener(channel, (ch, future) -> { + if (future.isSuccess()) { + ChannelPipeline p1 = ch.pipeline(); + p1.remove(encoderName); } }); - return promise; } /** @@ -215,10 +234,10 @@ public void operationComplete(ChannelFuture future) throws Exception { * @param req * HTTP Request * @return future - * The {@link ChannelFuture} which is notified once the opening handshake completes + * The {@link Future} which is notified once the opening handshake completes */ - public ChannelFuture handshake(Channel channel, HttpRequest req) { - return handshake(channel, req, null, channel.newPromise()); + public Future handshake(Channel channel, HttpRequest req) { + return handshake(channel, req, null); } /** @@ -232,16 +251,14 @@ public ChannelFuture handshake(Channel channel, HttpRequest req) { * HTTP Request * @param responseHeaders * Extra headers to add to the handshake response or {@code null} if no extra headers should be added - * @param promise - * the {@link ChannelPromise} to be notified when the opening handshake is done * @return future - * the {@link ChannelFuture} which is notified when the opening handshake is done + * the {@link Future} which is notified when the opening handshake is done */ - public final ChannelFuture handshake(final Channel channel, HttpRequest req, - final HttpHeaders responseHeaders, final ChannelPromise promise) { + public final Future handshake(final Channel channel, HttpRequest req, + final HttpHeaders responseHeaders) { if (req instanceof FullHttpRequest) { - return handshake(channel, (FullHttpRequest) req, responseHeaders, promise); + return handshake(channel, (FullHttpRequest) req, responseHeaders); } if (logger.isDebugEnabled()) { logger.debug("{} WebSocket version {} server handshake", channel, version()); @@ -249,14 +266,15 @@ public final ChannelFuture handshake(final Channel channel, HttpRequest req, ChannelPipeline p = channel.pipeline(); ChannelHandlerContext ctx = p.context(HttpRequestDecoder.class); if (ctx == null) { - // this means the user use a HttpServerCodec + // this means the user use an HttpServerCodec ctx = p.context(HttpServerCodec.class); if (ctx == null) { - promise.setFailure( + return channel.newFailedFuture( new IllegalStateException("No HttpDecoder and no HttpServerCodec in the pipeline")); - return promise; } } + + Promise promise = channel.newPromise(); // Add aggregator and ensure we feed the HttpRequest so it is aggregated. A limit o 8192 should be more then // enough for the websockets handshake payload. // @@ -265,24 +283,26 @@ public final ChannelFuture handshake(final Channel channel, HttpRequest req, p.addAfter(ctx.name(), aggregatorName, new HttpObjectAggregator(8192)); p.addAfter(aggregatorName, "handshaker", new SimpleChannelInboundHandler() { @Override - protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest msg) throws Exception { + protected void messageReceived(ChannelHandlerContext ctx, FullHttpRequest msg) throws Exception { // Remove ourself and do the actual handshake ctx.pipeline().remove(this); - handshake(channel, msg, responseHeaders, promise); + handshake(channel, msg, responseHeaders); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { // Remove ourself and fail the handshake promise. - ctx.pipeline().remove(this); promise.tryFailure(cause); ctx.fireExceptionCaught(cause); + ctx.pipeline().remove(this); } @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { // Fail promise if Channel was closed - promise.tryFailure(CLOSED_CHANNEL_EXCEPTION); + if (!promise.isDone()) { + promise.tryFailure(new ClosedChannelException()); + } ctx.fireChannelInactive(); } }); @@ -300,35 +320,36 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { protected abstract FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders responseHeaders); /** - * Performs the closing handshake + * Performs the closing handshake. + * + * When called from within a {@link ChannelHandler} you most likely want to use + * {@link #close(ChannelHandlerContext, CloseWebSocketFrame)}. * * @param channel - * Channel + * the {@link Channel} to use. * @param frame - * Closing Frame that was received + * Closing Frame that was received. */ - public ChannelFuture close(Channel channel, CloseWebSocketFrame frame) { - if (channel == null) { - throw new NullPointerException("channel"); - } - return close(channel, frame, channel.newPromise()); + public Future close(Channel channel, CloseWebSocketFrame frame) { + requireNonNull(channel, "channel"); + return close0(channel, channel, frame); } /** - * Performs the closing handshake + * Performs the closing handshake. * - * @param channel - * Channel + * @param ctx + * the {@link ChannelHandlerContext} to use. * @param frame - * Closing Frame that was received - * @param promise - * the {@link ChannelPromise} to be notified when the closing handshake is done + * Closing Frame that was received. */ - public ChannelFuture close(Channel channel, CloseWebSocketFrame frame, ChannelPromise promise) { - if (channel == null) { - throw new NullPointerException("channel"); - } - return channel.writeAndFlush(frame, promise).addListener(ChannelFutureListener.CLOSE); + public Future close(ChannelHandlerContext ctx, CloseWebSocketFrame frame) { + requireNonNull(ctx, "ctx"); + return close0(ctx, ctx.channel(), frame); + } + + private static Future close0(ChannelOutboundInvoker invoker, Channel channel, CloseWebSocketFrame frame) { + return invoker.writeAndFlush(frame).addListener(channel, ChannelFutureListeners.CLOSE); } /** diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker00.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker00.java index ff1797db642..92cd057eea5 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker00.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker00.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,8 +18,7 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; +import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; @@ -27,6 +26,7 @@ import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.concurrent.Future; import java.util.regex.Pattern; @@ -35,7 +35,7 @@ /** *

    * Performs server side opening and closing handshakes for web socket specification version draft-ietf-hybi-thewebsocketprotocol- + * href="https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00" >draft-ietf-hybi-thewebsocketprotocol- * 00 *

    *

    @@ -60,15 +60,32 @@ public class WebSocketServerHandshaker00 extends WebSocketServerHandshaker { * reduce denial of service attacks using long data frames. */ public WebSocketServerHandshaker00(String webSocketURL, String subprotocols, int maxFramePayloadLength) { - super(WebSocketVersion.V00, webSocketURL, subprotocols, maxFramePayloadLength); + this(webSocketURL, subprotocols, WebSocketDecoderConfig.newBuilder() + .maxFramePayloadLength(maxFramePayloadLength) + .build()); + } + + /** + * Constructor specifying the destination web socket location + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web socket frames will be + * sent to this URL. + * @param subprotocols + * CSV of supported protocols + * @param decoderConfig + * Frames decoder configuration. + */ + public WebSocketServerHandshaker00(String webSocketURL, String subprotocols, WebSocketDecoderConfig decoderConfig) { + super(WebSocketVersion.V00, webSocketURL, subprotocols, decoderConfig); } /** *

    * Handle the web socket handshake for the web socket specification HyBi version 0 and lower. This standard - * is really a rehash of hixie-76 and - * hixie-75. + * "https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-00">HyBi version 0 and lower. This standard + * is really a rehash of hixie-76 and + * hixie-75. *

    * *

    @@ -109,27 +126,35 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders // Serve the WebSocket handshake request. if (!req.headers().containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE, true) || !HttpHeaderValues.WEBSOCKET.contentEqualsIgnoreCase(req.headers().get(HttpHeaderNames.UPGRADE))) { - throw new WebSocketHandshakeException("not a WebSocket handshake request: missing upgrade"); + throw new WebSocketServerHandshakeException("not a WebSocket handshake request: missing upgrade", req); } // Hixie 75 does not contain these headers while Hixie 76 does boolean isHixie76 = req.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_KEY1) && req.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_KEY2); + String origin = req.headers().get(HttpHeaderNames.ORIGIN); + //throw before allocating FullHttpResponse + if (origin == null && !isHixie76) { + throw new WebSocketServerHandshakeException("Missing origin header, got only " + req.headers().names(), + req); + } + // Create the WebSocket handshake response. FullHttpResponse res = new DefaultFullHttpResponse(HTTP_1_1, new HttpResponseStatus(101, - isHixie76 ? "WebSocket Protocol Handshake" : "Web Socket Protocol Handshake")); + isHixie76 ? "WebSocket Protocol Handshake" : "Web Socket Protocol Handshake"), + req.content().alloc().buffer(0)); if (headers != null) { res.headers().add(headers); } - res.headers().add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET); - res.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE); + res.headers().set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE); // Fill in the headers and contents depending on handshake getMethod. if (isHixie76) { // New handshake getMethod with a challenge: - res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, req.headers().get(HttpHeaderNames.ORIGIN)); + res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, origin); res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_LOCATION, uri()); String subprotocols = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL); @@ -140,7 +165,7 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders logger.debug("Requested subprotocol(s) not supported: {}", subprotocols); } } else { - res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, selectedSubprotocol); + res.headers().set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, selectedSubprotocol); } } @@ -152,19 +177,19 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders int b = (int) (Long.parseLong(BEGINNING_DIGIT.matcher(key2).replaceAll("")) / BEGINNING_SPACE.matcher(key2).replaceAll("").length()); long c = req.content().readLong(); - ByteBuf input = Unpooled.buffer(16); + ByteBuf input = Unpooled.wrappedBuffer(new byte[16]).setIndex(0, 0); input.writeInt(a); input.writeInt(b); input.writeLong(c); res.content().writeBytes(WebSocketUtil.md5(input.array())); } else { // Old Hixie 75 handshake getMethod with no challenge: - res.headers().add(HttpHeaderNames.WEBSOCKET_ORIGIN, req.headers().get(HttpHeaderNames.ORIGIN)); + res.headers().add(HttpHeaderNames.WEBSOCKET_ORIGIN, origin); res.headers().add(HttpHeaderNames.WEBSOCKET_LOCATION, uri()); String protocol = req.headers().get(HttpHeaderNames.WEBSOCKET_PROTOCOL); if (protocol != null) { - res.headers().add(HttpHeaderNames.WEBSOCKET_PROTOCOL, selectSubprotocol(protocol)); + res.headers().set(HttpHeaderNames.WEBSOCKET_PROTOCOL, selectSubprotocol(protocol)); } } return res; @@ -174,18 +199,33 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders * Echo back the closing frame * * @param channel - * Channel + * the {@link Channel} to use. + * @param frame + * Web Socket frame that was received. + * @return the {@link Future} which will be notified once the operations completes. + */ + @Override + public Future close(Channel channel, CloseWebSocketFrame frame) { + return channel.writeAndFlush(frame); + } + + /** + * Echo back the closing frame + * + * @param ctx + * the {@link ChannelHandlerContext} to use. * @param frame - * Web Socket frame that was received + * Closing Frame that was received. + * @return the {@link ChannelFuture} which will be notified once the operations completes. */ @Override - public ChannelFuture close(Channel channel, CloseWebSocketFrame frame, ChannelPromise promise) { - return channel.writeAndFlush(frame, promise); + public Future close(ChannelHandlerContext ctx, CloseWebSocketFrame frame) { + return ctx.writeAndFlush(frame); } @Override protected WebSocketFrameDecoder newWebsocketDecoder() { - return new WebSocket00FrameDecoder(maxFramePayloadLength()); + return new WebSocket00FrameDecoder(decoderConfig()); } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker07.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker07.java index 6529109c7ff..3356e7dc5a4 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker07.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker07.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -29,7 +29,7 @@ /** *

    * Performs server side opening and closing handshakes for web socket specification version draft-ietf-hybi-thewebsocketprotocol- + * href="https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10" >draft-ietf-hybi-thewebsocketprotocol- * 10 *

    */ @@ -37,9 +37,6 @@ public class WebSocketServerHandshaker07 extends WebSocketServerHandshaker { public static final String WEBSOCKET_07_ACCEPT_GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; - private final boolean allowExtensions; - private final boolean allowMaskMismatch; - /** * Constructor specifying the destination web socket location * @@ -79,15 +76,27 @@ public WebSocketServerHandshaker07( public WebSocketServerHandshaker07( String webSocketURL, String subprotocols, boolean allowExtensions, int maxFramePayloadLength, boolean allowMaskMismatch) { - super(WebSocketVersion.V07, webSocketURL, subprotocols, maxFramePayloadLength); - this.allowExtensions = allowExtensions; - this.allowMaskMismatch = allowMaskMismatch; + this(webSocketURL, subprotocols, WebSocketDecoderConfig.newBuilder() + .allowExtensions(allowExtensions) + .maxFramePayloadLength(maxFramePayloadLength) + .allowMaskMismatch(allowMaskMismatch) + .build()); + } + + /** + * Constructor specifying the destination web socket location + * + * @param decoderConfig + * Frames decoder configuration. + */ + public WebSocketServerHandshaker07(String webSocketURL, String subprotocols, WebSocketDecoderConfig decoderConfig) { + super(WebSocketVersion.V07, webSocketURL, subprotocols, decoderConfig); } /** *

    * Handle the web socket handshake for the web socket specification HyBi version 7. + * "https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-07">HyBi version 7. *

    * *

    @@ -119,18 +128,19 @@ public WebSocketServerHandshaker07( */ @Override protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders headers) { + CharSequence key = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_KEY); + if (key == null) { + throw new WebSocketServerHandshakeException("not a WebSocket request: missing key", req); + } FullHttpResponse res = - new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.SWITCHING_PROTOCOLS); + new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.SWITCHING_PROTOCOLS, + req.content().alloc().buffer(0)); if (headers != null) { res.headers().add(headers); } - CharSequence key = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_KEY); - if (key == null) { - throw new WebSocketHandshakeException("not a WebSocket request: missing key"); - } String acceptSeed = key + WEBSOCKET_07_ACCEPT_GUID; byte[] sha1 = WebSocketUtil.sha1(acceptSeed.getBytes(CharsetUtil.US_ASCII)); String accept = WebSocketUtil.base64(sha1); @@ -139,9 +149,9 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders logger.debug("WebSocket version 07 server handshake key: {}, response: {}.", key, accept); } - res.headers().add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET); - res.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE); - res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT, accept); + res.headers().set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) + .set(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT, accept); String subprotocols = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL); if (subprotocols != null) { @@ -151,7 +161,7 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders logger.debug("Requested subprotocol(s) not supported: {}", subprotocols); } } else { - res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, selectedSubprotocol); + res.headers().set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, selectedSubprotocol); } } return res; @@ -159,7 +169,7 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders @Override protected WebSocketFrameDecoder newWebsocketDecoder() { - return new WebSocket07FrameDecoder(true, allowExtensions, maxFramePayloadLength(), allowMaskMismatch); + return new WebSocket07FrameDecoder(decoderConfig()); } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker08.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker08.java index f0b58f8bcac..8a13a94e22f 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker08.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker08.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -29,7 +29,7 @@ /** *

    * Performs server side opening and closing handshakes for web socket specification version draft-ietf-hybi-thewebsocketprotocol- + * href="https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-10" >draft-ietf-hybi-thewebsocketprotocol- * 10 *

    */ @@ -37,9 +37,6 @@ public class WebSocketServerHandshaker08 extends WebSocketServerHandshaker { public static final String WEBSOCKET_08_ACCEPT_GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; - private final boolean allowExtensions; - private final boolean allowMaskMismatch; - /** * Constructor specifying the destination web socket location * @@ -79,15 +76,33 @@ public WebSocketServerHandshaker08( public WebSocketServerHandshaker08( String webSocketURL, String subprotocols, boolean allowExtensions, int maxFramePayloadLength, boolean allowMaskMismatch) { - super(WebSocketVersion.V08, webSocketURL, subprotocols, maxFramePayloadLength); - this.allowExtensions = allowExtensions; - this.allowMaskMismatch = allowMaskMismatch; + this(webSocketURL, subprotocols, WebSocketDecoderConfig.newBuilder() + .allowExtensions(allowExtensions) + .maxFramePayloadLength(maxFramePayloadLength) + .allowMaskMismatch(allowMaskMismatch) + .build()); + } + + /** + * Constructor specifying the destination web socket location + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". + * Subsequent web socket frames will be sent to this URL. + * @param subprotocols + * CSV of supported protocols + * @param decoderConfig + * Frames decoder configuration. + */ + public WebSocketServerHandshaker08( + String webSocketURL, String subprotocols, WebSocketDecoderConfig decoderConfig) { + super(WebSocketVersion.V08, webSocketURL, subprotocols, decoderConfig); } /** *

    * Handle the web socket handshake for the web socket specification HyBi version 8 to 10. Version 8, 9 and + * "https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-08">HyBi version 8 to 10. Version 8, 9 and * 10 share the same wire protocol. *

    * @@ -120,16 +135,18 @@ public WebSocketServerHandshaker08( */ @Override protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders headers) { - FullHttpResponse res = new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.SWITCHING_PROTOCOLS); + CharSequence key = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_KEY); + if (key == null) { + throw new WebSocketServerHandshakeException("not a WebSocket request: missing key", req); + } + + FullHttpResponse res = new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.SWITCHING_PROTOCOLS, + req.content().alloc().buffer(0)); if (headers != null) { res.headers().add(headers); } - CharSequence key = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_KEY); - if (key == null) { - throw new WebSocketHandshakeException("not a WebSocket request: missing key"); - } String acceptSeed = key + WEBSOCKET_08_ACCEPT_GUID; byte[] sha1 = WebSocketUtil.sha1(acceptSeed.getBytes(CharsetUtil.US_ASCII)); String accept = WebSocketUtil.base64(sha1); @@ -138,9 +155,9 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders logger.debug("WebSocket version 08 server handshake key: {}, response: {}", key, accept); } - res.headers().add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET); - res.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE); - res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT, accept); + res.headers().set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) + .set(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT, accept); String subprotocols = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL); if (subprotocols != null) { @@ -150,7 +167,7 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders logger.debug("Requested subprotocol(s) not supported: {}", subprotocols); } } else { - res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, selectedSubprotocol); + res.headers().set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, selectedSubprotocol); } } return res; @@ -158,7 +175,7 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders @Override protected WebSocketFrameDecoder newWebsocketDecoder() { - return new WebSocket08FrameDecoder(true, allowExtensions, maxFramePayloadLength(), allowMaskMismatch); + return new WebSocket08FrameDecoder(decoderConfig()); } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker13.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker13.java index f36d06ca990..869c65eb50c 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker13.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker13.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -28,17 +28,14 @@ /** *

    - * Performs server side opening and closing handshakes for RFC 6455 - * (originally web socket specification draft-ietf-hybi-thewebsocketprotocol-17). + * Performs server side opening and closing handshakes for RFC 6455 + * (originally web socket specification draft-ietf-hybi-thewebsocketprotocol-17). *

    */ public class WebSocketServerHandshaker13 extends WebSocketServerHandshaker { public static final String WEBSOCKET_13_ACCEPT_GUID = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"; - private final boolean allowExtensions; - private final boolean allowMaskMismatch; - /** * Constructor specifying the destination web socket location * @@ -78,15 +75,33 @@ public WebSocketServerHandshaker13( public WebSocketServerHandshaker13( String webSocketURL, String subprotocols, boolean allowExtensions, int maxFramePayloadLength, boolean allowMaskMismatch) { - super(WebSocketVersion.V13, webSocketURL, subprotocols, maxFramePayloadLength); - this.allowExtensions = allowExtensions; - this.allowMaskMismatch = allowMaskMismatch; + this(webSocketURL, subprotocols, WebSocketDecoderConfig.newBuilder() + .allowExtensions(allowExtensions) + .maxFramePayloadLength(maxFramePayloadLength) + .allowMaskMismatch(allowMaskMismatch) + .build()); + } + + /** + * Constructor specifying the destination web socket location + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". Subsequent web + * socket frames will be sent to this URL. + * @param subprotocols + * CSV of supported protocols + * @param decoderConfig + * Frames decoder configuration. + */ + public WebSocketServerHandshaker13( + String webSocketURL, String subprotocols, WebSocketDecoderConfig decoderConfig) { + super(WebSocketVersion.V13, webSocketURL, subprotocols, decoderConfig); } /** *

    * Handle the web socket handshake for the web socket specification HyBi versions 13-17. Versions 13-17 + * "https://tools.ietf.org/html/draft-ietf-hybi-thewebsocketprotocol-17">HyBi versions 13-17. Versions 13-17 * share the same wire protocol. *

    * @@ -100,7 +115,7 @@ public WebSocketServerHandshaker13( * Upgrade: websocket * Connection: Upgrade * Sec-WebSocket-Key: dGhlIHNhbXBsZSBub25jZQ== - * Sec-WebSocket-Origin: http://example.com + * Origin: http://example.com * Sec-WebSocket-Protocol: chat, superchat * Sec-WebSocket-Version: 13 * @@ -119,15 +134,17 @@ public WebSocketServerHandshaker13( */ @Override protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders headers) { - FullHttpResponse res = new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.SWITCHING_PROTOCOLS); + CharSequence key = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_KEY); + if (key == null) { + throw new WebSocketServerHandshakeException("not a WebSocket request: missing key", req); + } + + FullHttpResponse res = new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.SWITCHING_PROTOCOLS, + req.content().alloc().buffer(0)); if (headers != null) { res.headers().add(headers); } - CharSequence key = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_KEY); - if (key == null) { - throw new WebSocketHandshakeException("not a WebSocket request: missing key"); - } String acceptSeed = key + WEBSOCKET_13_ACCEPT_GUID; byte[] sha1 = WebSocketUtil.sha1(acceptSeed.getBytes(CharsetUtil.US_ASCII)); String accept = WebSocketUtil.base64(sha1); @@ -136,9 +153,9 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders logger.debug("WebSocket version 13 server handshake key: {}, response: {}", key, accept); } - res.headers().add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET); - res.headers().add(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE); - res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT, accept); + res.headers().set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) + .set(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT, accept); String subprotocols = req.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL); if (subprotocols != null) { @@ -148,7 +165,7 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders logger.debug("Requested subprotocol(s) not supported: {}", subprotocols); } } else { - res.headers().add(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, selectedSubprotocol); + res.headers().set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, selectedSubprotocol); } } return res; @@ -156,7 +173,7 @@ protected FullHttpResponse newHandshakeResponse(FullHttpRequest req, HttpHeaders @Override protected WebSocketFrameDecoder newWebsocketDecoder() { - return new WebSocket13FrameDecoder(true, allowExtensions, maxFramePayloadLength(), allowMaskMismatch); + return new WebSocket13FrameDecoder(decoderConfig()); } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerFactory.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerFactory.java index 27fdfa0259c..387f88fd073 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerFactory.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerFactory.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,15 +16,17 @@ package io.netty.handler.codec.http.websocketx; import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelPromise; + import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpVersion; +import io.netty.util.concurrent.Future; + +import java.util.Objects; /** * Auto-detects the version of the Web Socket protocol in use and creates a new proper @@ -36,11 +38,7 @@ public class WebSocketServerHandshakerFactory { private final String subprotocols; - private final boolean allowExtensions; - - private final int maxFramePayloadLength; - - private final boolean allowMaskMismatch; + private final WebSocketDecoderConfig decoderConfig; /** * Constructor specifying the destination web socket location @@ -98,11 +96,29 @@ public WebSocketServerHandshakerFactory( public WebSocketServerHandshakerFactory( String webSocketURL, String subprotocols, boolean allowExtensions, int maxFramePayloadLength, boolean allowMaskMismatch) { + this(webSocketURL, subprotocols, WebSocketDecoderConfig.newBuilder() + .allowExtensions(allowExtensions) + .maxFramePayloadLength(maxFramePayloadLength) + .allowMaskMismatch(allowMaskMismatch) + .build()); + } + + /** + * Constructor specifying the destination web socket location + * + * @param webSocketURL + * URL for web socket communications. e.g "ws://myhost.com/mypath". + * Subsequent web socket frames will be sent to this URL. + * @param subprotocols + * CSV of supported protocols. Null if sub protocols not supported. + * @param decoderConfig + * Frames decoder options. + */ + public WebSocketServerHandshakerFactory( + String webSocketURL, String subprotocols, WebSocketDecoderConfig decoderConfig) { this.webSocketURL = webSocketURL; this.subprotocols = subprotocols; - this.allowExtensions = allowExtensions; - this.maxFramePayloadLength = maxFramePayloadLength; - this.allowMaskMismatch = allowMaskMismatch; + this.decoderConfig = Objects.requireNonNull(decoderConfig, "decoderConfig"); } /** @@ -118,21 +134,21 @@ public WebSocketServerHandshaker newHandshaker(HttpRequest req) { if (version.equals(WebSocketVersion.V13.toHttpHeaderValue())) { // Version 13 of the wire protocol - RFC 6455 (version 17 of the draft hybi specification). return new WebSocketServerHandshaker13( - webSocketURL, subprotocols, allowExtensions, maxFramePayloadLength, allowMaskMismatch); + webSocketURL, subprotocols, decoderConfig); } else if (version.equals(WebSocketVersion.V08.toHttpHeaderValue())) { // Version 8 of the wire protocol - version 10 of the draft hybi specification. return new WebSocketServerHandshaker08( - webSocketURL, subprotocols, allowExtensions, maxFramePayloadLength, allowMaskMismatch); + webSocketURL, subprotocols, decoderConfig); } else if (version.equals(WebSocketVersion.V07.toHttpHeaderValue())) { // Version 8 of the wire protocol - version 07 of the draft hybi specification. return new WebSocketServerHandshaker07( - webSocketURL, subprotocols, allowExtensions, maxFramePayloadLength, allowMaskMismatch); + webSocketURL, subprotocols, decoderConfig); } else { return null; } } else { // Assume version 00 where version header was not specified - return new WebSocketServerHandshaker00(webSocketURL, subprotocols, maxFramePayloadLength); + return new WebSocketServerHandshaker00(webSocketURL, subprotocols, decoderConfig); } } @@ -147,19 +163,12 @@ public static void sendUnsupportedWebSocketVersionResponse(Channel channel) { /** * Return that we need cannot not support the web socket version */ - public static ChannelFuture sendUnsupportedVersionResponse(Channel channel) { - return sendUnsupportedVersionResponse(channel, channel.newPromise()); - } - - /** - * Return that we need cannot not support the web socket version - */ - public static ChannelFuture sendUnsupportedVersionResponse(Channel channel, ChannelPromise promise) { + public static Future sendUnsupportedVersionResponse(Channel channel) { HttpResponse res = new DefaultFullHttpResponse( HttpVersion.HTTP_1_1, - HttpResponseStatus.UPGRADE_REQUIRED); + HttpResponseStatus.UPGRADE_REQUIRED, channel.alloc().buffer(0)); res.headers().set(HttpHeaderNames.SEC_WEBSOCKET_VERSION, WebSocketVersion.V13.toHttpHeaderValue()); HttpUtil.setContentLength(res, 0); - return channel.writeAndFlush(res, promise); + return channel.writeAndFlush(res); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolConfig.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolConfig.java new file mode 100644 index 00000000000..6f84edd5595 --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolConfig.java @@ -0,0 +1,297 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import io.netty.handler.codec.http.websocketx.WebSocketClientProtocolHandler.ClientHandshakeStateEvent; + +import java.util.Objects; + +import static io.netty.util.internal.ObjectUtil.checkPositive; + +/** + * WebSocket server configuration. + */ +public final class WebSocketServerProtocolConfig { + + static final long DEFAULT_HANDSHAKE_TIMEOUT_MILLIS = 10000L; + + private final String websocketPath; + private final String subprotocols; + private final boolean checkStartsWith; + private final long handshakeTimeoutMillis; + private final long forceCloseTimeoutMillis; + private final boolean handleCloseFrames; + private final WebSocketCloseStatus sendCloseFrame; + private final boolean dropPongFrames; + private final WebSocketDecoderConfig decoderConfig; + + private WebSocketServerProtocolConfig( + String websocketPath, + String subprotocols, + boolean checkStartsWith, + long handshakeTimeoutMillis, + long forceCloseTimeoutMillis, + boolean handleCloseFrames, + WebSocketCloseStatus sendCloseFrame, + boolean dropPongFrames, + WebSocketDecoderConfig decoderConfig + ) { + this.websocketPath = websocketPath; + this.subprotocols = subprotocols; + this.checkStartsWith = checkStartsWith; + this.handshakeTimeoutMillis = checkPositive(handshakeTimeoutMillis, "handshakeTimeoutMillis"); + this.forceCloseTimeoutMillis = forceCloseTimeoutMillis; + this.handleCloseFrames = handleCloseFrames; + this.sendCloseFrame = sendCloseFrame; + this.dropPongFrames = dropPongFrames; + this.decoderConfig = decoderConfig == null ? WebSocketDecoderConfig.DEFAULT : decoderConfig; + } + + public String websocketPath() { + return websocketPath; + } + + public String subprotocols() { + return subprotocols; + } + + public boolean checkStartsWith() { + return checkStartsWith; + } + + public long handshakeTimeoutMillis() { + return handshakeTimeoutMillis; + } + + public long forceCloseTimeoutMillis() { + return forceCloseTimeoutMillis; + } + + public boolean handleCloseFrames() { + return handleCloseFrames; + } + + public WebSocketCloseStatus sendCloseFrame() { + return sendCloseFrame; + } + + public boolean dropPongFrames() { + return dropPongFrames; + } + + public WebSocketDecoderConfig decoderConfig() { + return decoderConfig; + } + + @Override + public String toString() { + return "WebSocketServerProtocolConfig" + + " {websocketPath=" + websocketPath + + ", subprotocols=" + subprotocols + + ", checkStartsWith=" + checkStartsWith + + ", handshakeTimeoutMillis=" + handshakeTimeoutMillis + + ", forceCloseTimeoutMillis=" + forceCloseTimeoutMillis + + ", handleCloseFrames=" + handleCloseFrames + + ", sendCloseFrame=" + sendCloseFrame + + ", dropPongFrames=" + dropPongFrames + + ", decoderConfig=" + decoderConfig + + "}"; + } + + public Builder toBuilder() { + return new Builder(this); + } + + public static Builder newBuilder() { + return new Builder("/", null, false, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS, 0L, + true, WebSocketCloseStatus.NORMAL_CLOSURE, true, WebSocketDecoderConfig.DEFAULT); + } + + public static final class Builder { + private String websocketPath; + private String subprotocols; + private boolean checkStartsWith; + private long handshakeTimeoutMillis; + private long forceCloseTimeoutMillis; + private boolean handleCloseFrames; + private WebSocketCloseStatus sendCloseFrame; + private boolean dropPongFrames; + private WebSocketDecoderConfig decoderConfig; + private WebSocketDecoderConfig.Builder decoderConfigBuilder; + + private Builder(WebSocketServerProtocolConfig serverConfig) { + this(Objects.requireNonNull(serverConfig, "serverConfig").websocketPath(), + serverConfig.subprotocols(), + serverConfig.checkStartsWith(), + serverConfig.handshakeTimeoutMillis(), + serverConfig.forceCloseTimeoutMillis(), + serverConfig.handleCloseFrames(), + serverConfig.sendCloseFrame(), + serverConfig.dropPongFrames(), + serverConfig.decoderConfig() + ); + } + + private Builder(String websocketPath, + String subprotocols, + boolean checkStartsWith, + long handshakeTimeoutMillis, + long forceCloseTimeoutMillis, + boolean handleCloseFrames, + WebSocketCloseStatus sendCloseFrame, + boolean dropPongFrames, + WebSocketDecoderConfig decoderConfig) { + this.websocketPath = websocketPath; + this.subprotocols = subprotocols; + this.checkStartsWith = checkStartsWith; + this.handshakeTimeoutMillis = handshakeTimeoutMillis; + this.forceCloseTimeoutMillis = forceCloseTimeoutMillis; + this.handleCloseFrames = handleCloseFrames; + this.sendCloseFrame = sendCloseFrame; + this.dropPongFrames = dropPongFrames; + this.decoderConfig = decoderConfig; + } + + /** + * URI path component to handle websocket upgrade requests on. + */ + public Builder websocketPath(String websocketPath) { + this.websocketPath = websocketPath; + return this; + } + + /** + * CSV of supported protocols + */ + public Builder subprotocols(String subprotocols) { + this.subprotocols = subprotocols; + return this; + } + + /** + * {@code true} to handle all requests, where URI path component starts from + * {@link WebSocketServerProtocolConfig#websocketPath()}, {@code false} for exact match (default). + */ + public Builder checkStartsWith(boolean checkStartsWith) { + this.checkStartsWith = checkStartsWith; + return this; + } + + /** + * Handshake timeout in mills, when handshake timeout, will trigger user + * event {@link ClientHandshakeStateEvent#HANDSHAKE_TIMEOUT} + */ + public Builder handshakeTimeoutMillis(long handshakeTimeoutMillis) { + this.handshakeTimeoutMillis = handshakeTimeoutMillis; + return this; + } + + /** + * Close the connection if it was not closed by the client after timeout specified + */ + public Builder forceCloseTimeoutMillis(long forceCloseTimeoutMillis) { + this.forceCloseTimeoutMillis = forceCloseTimeoutMillis; + return this; + } + + /** + * {@code true} if close frames should not be forwarded and just close the channel + */ + public Builder handleCloseFrames(boolean handleCloseFrames) { + this.handleCloseFrames = handleCloseFrames; + return this; + } + + /** + * Close frame to send, when close frame was not send manually. Or {@code null} to disable proper close. + */ + public Builder sendCloseFrame(WebSocketCloseStatus sendCloseFrame) { + this.sendCloseFrame = sendCloseFrame; + return this; + } + + /** + * {@code true} if pong frames should not be forwarded + */ + public Builder dropPongFrames(boolean dropPongFrames) { + this.dropPongFrames = dropPongFrames; + return this; + } + + /** + * Frames decoder configuration. + */ + public Builder decoderConfig(WebSocketDecoderConfig decoderConfig) { + this.decoderConfig = decoderConfig == null ? WebSocketDecoderConfig.DEFAULT : decoderConfig; + this.decoderConfigBuilder = null; + return this; + } + + private WebSocketDecoderConfig.Builder decoderConfigBuilder() { + if (decoderConfigBuilder == null) { + decoderConfigBuilder = decoderConfig.toBuilder(); + } + return decoderConfigBuilder; + } + + public Builder maxFramePayloadLength(int maxFramePayloadLength) { + decoderConfigBuilder().maxFramePayloadLength(maxFramePayloadLength); + return this; + } + + public Builder expectMaskedFrames(boolean expectMaskedFrames) { + decoderConfigBuilder().expectMaskedFrames(expectMaskedFrames); + return this; + } + + public Builder allowMaskMismatch(boolean allowMaskMismatch) { + decoderConfigBuilder().allowMaskMismatch(allowMaskMismatch); + return this; + } + + public Builder allowExtensions(boolean allowExtensions) { + decoderConfigBuilder().allowExtensions(allowExtensions); + return this; + } + + public Builder closeOnProtocolViolation(boolean closeOnProtocolViolation) { + decoderConfigBuilder().closeOnProtocolViolation(closeOnProtocolViolation); + return this; + } + + public Builder withUTF8Validator(boolean withUTF8Validator) { + decoderConfigBuilder().withUTF8Validator(withUTF8Validator); + return this; + } + + /** + * Build unmodifiable server protocol configuration. + */ + public WebSocketServerProtocolConfig build() { + return new WebSocketServerProtocolConfig( + websocketPath, + subprotocols, + checkStartsWith, + handshakeTimeoutMillis, + forceCloseTimeoutMillis, + handleCloseFrames, + sendCloseFrame, + dropPongFrames, + decoderConfigBuilder == null ? decoderConfig : decoderConfigBuilder.build() + ); + } + } +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandler.java index 6b5ef13af53..d6ba5e19bef 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandler.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,22 +17,21 @@ import io.netty.buffer.Unpooled; import io.netty.channel.Channel; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelFutureListeners; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandler; -import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelPipeline; import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.util.AttributeKey; +import io.netty.util.concurrent.Promise; -import java.util.List; +import java.util.Objects; -import static io.netty.handler.codec.http.HttpVersion.*; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static io.netty.handler.codec.http.websocketx.WebSocketServerProtocolConfig.DEFAULT_HANDSHAKE_TIMEOUT_MILLIS; /** * This handler does all the heavy lifting for you to run a websocket server. @@ -47,7 +46,7 @@ * to the io.netty.example.http.websocketx.server.WebSocketServer example. * * To know once a handshake was done you can intercept the - * {@link ChannelInboundHandler#userEventTriggered(ChannelHandlerContext, Object)} and check if the event was instance + * {@link ChannelHandler#userEventTriggered(ChannelHandlerContext, Object)} and check if the event was instance * of {@link HandshakeComplete}, the event will contain extra information about the handshake such as the request and * selected subprotocol. */ @@ -64,7 +63,12 @@ public enum ServerHandshakeStateEvent { * it provides extra information about the handshake */ @Deprecated - HANDSHAKE_COMPLETE + HANDSHAKE_COMPLETE, + + /** + * The Handshake was timed out + */ + HANDSHAKE_TIMEOUT } /** @@ -97,47 +101,119 @@ public String selectedSubprotocol() { private static final AttributeKey HANDSHAKER_ATTR_KEY = AttributeKey.valueOf(WebSocketServerHandshaker.class, "HANDSHAKER"); - private final String websocketPath; - private final String subprotocols; - private final boolean allowExtensions; - private final int maxFramePayloadLength; - private final boolean allowMaskMismatch; - private final boolean checkStartsWith; + private final WebSocketServerProtocolConfig serverConfig; + + /** + * Base constructor + * + * @param serverConfig + * Server protocol configuration. + */ + public WebSocketServerProtocolHandler(WebSocketServerProtocolConfig serverConfig) { + super(Objects.requireNonNull(serverConfig, "serverConfig").dropPongFrames(), + serverConfig.sendCloseFrame(), + serverConfig.forceCloseTimeoutMillis() + ); + this.serverConfig = serverConfig; + } public WebSocketServerProtocolHandler(String websocketPath) { - this(websocketPath, null, false); + this(websocketPath, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + public WebSocketServerProtocolHandler(String websocketPath, long handshakeTimeoutMillis) { + this(websocketPath, false, handshakeTimeoutMillis); } public WebSocketServerProtocolHandler(String websocketPath, boolean checkStartsWith) { - this(websocketPath, null, false, 65536, false, checkStartsWith); + this(websocketPath, checkStartsWith, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + public WebSocketServerProtocolHandler(String websocketPath, boolean checkStartsWith, long handshakeTimeoutMillis) { + this(websocketPath, null, false, 65536, false, checkStartsWith, handshakeTimeoutMillis); } public WebSocketServerProtocolHandler(String websocketPath, String subprotocols) { - this(websocketPath, subprotocols, false); + this(websocketPath, subprotocols, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, long handshakeTimeoutMillis) { + this(websocketPath, subprotocols, false, handshakeTimeoutMillis); } public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, boolean allowExtensions) { - this(websocketPath, subprotocols, allowExtensions, 65536); + this(websocketPath, subprotocols, allowExtensions, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, boolean allowExtensions, + long handshakeTimeoutMillis) { + this(websocketPath, subprotocols, allowExtensions, 65536, handshakeTimeoutMillis); } public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, boolean allowExtensions, int maxFrameSize) { - this(websocketPath, subprotocols, allowExtensions, maxFrameSize, false); + this(websocketPath, subprotocols, allowExtensions, maxFrameSize, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, + boolean allowExtensions, int maxFrameSize, long handshakeTimeoutMillis) { + this(websocketPath, subprotocols, allowExtensions, maxFrameSize, false, handshakeTimeoutMillis); } public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, boolean allowExtensions, int maxFrameSize, boolean allowMaskMismatch) { - this(websocketPath, subprotocols, allowExtensions, maxFrameSize, allowMaskMismatch, false); + this(websocketPath, subprotocols, allowExtensions, maxFrameSize, allowMaskMismatch, + DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, boolean allowExtensions, + int maxFrameSize, boolean allowMaskMismatch, long handshakeTimeoutMillis) { + this(websocketPath, subprotocols, allowExtensions, maxFrameSize, allowMaskMismatch, false, + handshakeTimeoutMillis); } public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, boolean allowExtensions, int maxFrameSize, boolean allowMaskMismatch, boolean checkStartsWith) { - this.websocketPath = websocketPath; - this.subprotocols = subprotocols; - this.allowExtensions = allowExtensions; - maxFramePayloadLength = maxFrameSize; - this.allowMaskMismatch = allowMaskMismatch; - this.checkStartsWith = checkStartsWith; + this(websocketPath, subprotocols, allowExtensions, maxFrameSize, allowMaskMismatch, checkStartsWith, + DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, + boolean allowExtensions, int maxFrameSize, boolean allowMaskMismatch, + boolean checkStartsWith, long handshakeTimeoutMillis) { + this(websocketPath, subprotocols, allowExtensions, maxFrameSize, allowMaskMismatch, checkStartsWith, true, + handshakeTimeoutMillis); + } + + public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, + boolean allowExtensions, int maxFrameSize, boolean allowMaskMismatch, + boolean checkStartsWith, boolean dropPongFrames) { + this(websocketPath, subprotocols, allowExtensions, maxFrameSize, allowMaskMismatch, checkStartsWith, + dropPongFrames, DEFAULT_HANDSHAKE_TIMEOUT_MILLIS); + } + + public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, boolean allowExtensions, + int maxFrameSize, boolean allowMaskMismatch, boolean checkStartsWith, + boolean dropPongFrames, long handshakeTimeoutMillis) { + this(websocketPath, subprotocols, checkStartsWith, dropPongFrames, handshakeTimeoutMillis, + WebSocketDecoderConfig.newBuilder() + .maxFramePayloadLength(maxFrameSize) + .allowMaskMismatch(allowMaskMismatch) + .allowExtensions(allowExtensions) + .build()); + } + + public WebSocketServerProtocolHandler(String websocketPath, String subprotocols, boolean checkStartsWith, + boolean dropPongFrames, long handshakeTimeoutMillis, + WebSocketDecoderConfig decoderConfig) { + this(WebSocketServerProtocolConfig.newBuilder() + .websocketPath(websocketPath) + .subprotocols(subprotocols) + .checkStartsWith(checkStartsWith) + .handshakeTimeoutMillis(handshakeTimeoutMillis) + .dropPongFrames(dropPongFrames) + .decoderConfig(decoderConfig) + .build()); } @Override @@ -145,30 +221,36 @@ public void handlerAdded(ChannelHandlerContext ctx) { ChannelPipeline cp = ctx.pipeline(); if (cp.get(WebSocketServerProtocolHandshakeHandler.class) == null) { // Add the WebSocketHandshakeHandler before this one. - ctx.pipeline().addBefore(ctx.name(), WebSocketServerProtocolHandshakeHandler.class.getName(), - new WebSocketServerProtocolHandshakeHandler(websocketPath, subprotocols, - allowExtensions, maxFramePayloadLength, allowMaskMismatch, checkStartsWith)); + cp.addBefore(ctx.name(), WebSocketServerProtocolHandshakeHandler.class.getName(), + new WebSocketServerProtocolHandshakeHandler(serverConfig)); } - if (cp.get(Utf8FrameValidator.class) == null) { + if (serverConfig.decoderConfig().withUTF8Validator() && cp.get(Utf8FrameValidator.class) == null) { // Add the UFT8 checking before this one. - ctx.pipeline().addBefore(ctx.name(), Utf8FrameValidator.class.getName(), + cp.addBefore(ctx.name(), Utf8FrameValidator.class.getName(), new Utf8FrameValidator()); } } @Override - protected void decode(ChannelHandlerContext ctx, WebSocketFrame frame, List out) throws Exception { - if (frame instanceof CloseWebSocketFrame) { + protected void decode(ChannelHandlerContext ctx, WebSocketFrame frame) throws Exception { + if (serverConfig.handleCloseFrames() && frame instanceof CloseWebSocketFrame) { WebSocketServerHandshaker handshaker = getHandshaker(ctx.channel()); if (handshaker != null) { frame.retain(); - handshaker.close(ctx.channel(), (CloseWebSocketFrame) frame); + Promise promise = ctx.newPromise(); + closeSent(promise); + handshaker.close(ctx, (CloseWebSocketFrame) frame).cascadeTo(promise); } else { - ctx.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ChannelFutureListener.CLOSE); + ctx.writeAndFlush(Unpooled.EMPTY_BUFFER).addListener(ctx.channel(), ChannelFutureListeners.CLOSE); } return; } - super.decode(ctx, frame, out); + super.decode(ctx, frame); + } + + @Override + protected WebSocketServerHandshakeException buildHandshakeException(String message) { + return new WebSocketServerHandshakeException(message); } @Override @@ -176,7 +258,7 @@ public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws E if (cause instanceof WebSocketHandshakeException) { FullHttpResponse response = new DefaultFullHttpResponse( HTTP_1_1, HttpResponseStatus.BAD_REQUEST, Unpooled.wrappedBuffer(cause.getMessage().getBytes())); - ctx.channel().writeAndFlush(response).addListener(ChannelFutureListener.CLOSE); + ctx.channel().writeAndFlush(response).addListener(ctx.channel(), ChannelFutureListeners.CLOSE); } else { ctx.fireExceptionCaught(cause); ctx.close(); @@ -190,20 +272,4 @@ static WebSocketServerHandshaker getHandshaker(Channel channel) { static void setHandshaker(Channel channel, WebSocketServerHandshaker handshaker) { channel.attr(HANDSHAKER_ATTR_KEY).set(handshaker); } - - static ChannelHandler forbiddenHttpRequestResponder() { - return new ChannelInboundHandlerAdapter() { - @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - if (msg instanceof FullHttpRequest) { - ((FullHttpRequest) msg).release(); - FullHttpResponse response = - new DefaultFullHttpResponse(HTTP_1_1, HttpResponseStatus.FORBIDDEN); - ctx.channel().writeAndFlush(response); - } else { - ctx.fireChannelRead(msg); - } - } - }; - } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java index 26e01f1860f..87868f8e3ba 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandshakeHandler.java @@ -1,11 +1,11 @@ /* - * Copyright 2012 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,104 +15,119 @@ */ package io.netty.handler.codec.http.websocketx; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelFutureListeners; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.ChannelPipeline; import io.netty.handler.codec.http.DefaultFullHttpResponse; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler.ServerHandshakeStateEvent; import io.netty.handler.ssl.SslHandler; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.FutureListener; +import io.netty.util.concurrent.Promise; -import static io.netty.handler.codec.http.HttpUtil.*; -import static io.netty.handler.codec.http.HttpMethod.*; -import static io.netty.handler.codec.http.HttpResponseStatus.*; -import static io.netty.handler.codec.http.HttpVersion.*; +import java.util.Objects; +import java.util.concurrent.TimeUnit; + +import static io.netty.handler.codec.http.HttpMethod.GET; +import static io.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN; +import static io.netty.handler.codec.http.HttpUtil.isKeepAlive; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; /** * Handles the HTTP handshake (the HTTP Upgrade request) for {@link WebSocketServerProtocolHandler}. */ -class WebSocketServerProtocolHandshakeHandler extends ChannelInboundHandlerAdapter { - - private final String websocketPath; - private final String subprotocols; - private final boolean allowExtensions; - private final int maxFramePayloadSize; - private final boolean allowMaskMismatch; - private final boolean checkStartsWith; - - WebSocketServerProtocolHandshakeHandler(String websocketPath, String subprotocols, - boolean allowExtensions, int maxFrameSize, boolean allowMaskMismatch) { - this(websocketPath, subprotocols, allowExtensions, maxFrameSize, allowMaskMismatch, false); +class WebSocketServerProtocolHandshakeHandler implements ChannelHandler { + + private final WebSocketServerProtocolConfig serverConfig; + private Promise handshakePromise; + + WebSocketServerProtocolHandshakeHandler(WebSocketServerProtocolConfig serverConfig) { + this.serverConfig = Objects.requireNonNull(serverConfig, "serverConfig"); } - WebSocketServerProtocolHandshakeHandler(String websocketPath, String subprotocols, - boolean allowExtensions, int maxFrameSize, boolean allowMaskMismatch, boolean checkStartsWith) { - this.websocketPath = websocketPath; - this.subprotocols = subprotocols; - this.allowExtensions = allowExtensions; - maxFramePayloadSize = maxFrameSize; - this.allowMaskMismatch = allowMaskMismatch; - this.checkStartsWith = checkStartsWith; + @Override + public void handlerAdded(ChannelHandlerContext ctx) { + handshakePromise = ctx.newPromise(); } @Override public void channelRead(final ChannelHandlerContext ctx, Object msg) throws Exception { final FullHttpRequest req = (FullHttpRequest) msg; - if (isNotWebSocketPath(req)) { + if (!isWebSocketPath(req)) { ctx.fireChannelRead(msg); return; } try { - if (req.method() != GET) { - sendHttpResponse(ctx, req, new DefaultFullHttpResponse(HTTP_1_1, FORBIDDEN)); + if (!GET.equals(req.method())) { + sendHttpResponse(ctx, req, new DefaultFullHttpResponse(HTTP_1_1, FORBIDDEN, ctx.alloc().buffer(0))); return; } final WebSocketServerHandshakerFactory wsFactory = new WebSocketServerHandshakerFactory( - getWebSocketLocation(ctx.pipeline(), req, websocketPath), subprotocols, - allowExtensions, maxFramePayloadSize, allowMaskMismatch); + getWebSocketLocation(ctx.pipeline(), req, serverConfig.websocketPath()), + serverConfig.subprotocols(), serverConfig.decoderConfig()); final WebSocketServerHandshaker handshaker = wsFactory.newHandshaker(req); + Promise localHandshakePromise = handshakePromise; if (handshaker == null) { WebSocketServerHandshakerFactory.sendUnsupportedVersionResponse(ctx.channel()); } else { - final ChannelFuture handshakeFuture = handshaker.handshake(ctx.channel(), req); - handshakeFuture.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { - ctx.fireExceptionCaught(future.cause()); - } else { - // Kept for compatibility - ctx.fireUserEventTriggered( - WebSocketServerProtocolHandler.ServerHandshakeStateEvent.HANDSHAKE_COMPLETE); - ctx.fireUserEventTriggered( - new WebSocketServerProtocolHandler.HandshakeComplete( - req.uri(), req.headers(), handshaker.selectedSubprotocol())); - } + // Ensure we set the handshaker and replace this handler before we + // trigger the actual handshake. Otherwise we may receive websocket bytes in this handler + // before we had a chance to replace it. + // + // See https://github.com/netty/netty/issues/9471. + WebSocketServerProtocolHandler.setHandshaker(ctx.channel(), handshaker); + + Future handshakeFuture = handshaker.handshake(ctx.channel(), req); + handshakeFuture.addListener(future -> { + if (future.isFailed()) { + localHandshakePromise.tryFailure(future.cause()); + ctx.fireExceptionCaught(future.cause()); + } else { + localHandshakePromise.trySuccess(null); + // Kept for compatibility + ctx.fireUserEventTriggered( + WebSocketServerProtocolHandler.ServerHandshakeStateEvent.HANDSHAKE_COMPLETE); + ctx.fireUserEventTriggered( + new WebSocketServerProtocolHandler.HandshakeComplete( + req.uri(), req.headers(), handshaker.selectedSubprotocol())); } + ctx.pipeline().remove(this); }); - WebSocketServerProtocolHandler.setHandshaker(ctx.channel(), handshaker); - ctx.pipeline().replace(this, "WS403Responder", - WebSocketServerProtocolHandler.forbiddenHttpRequestResponder()); + applyHandshakeTimeout(ctx); } } finally { req.release(); } } - private boolean isNotWebSocketPath(FullHttpRequest req) { - return checkStartsWith ? !req.uri().startsWith(websocketPath) : !req.uri().equals(websocketPath); + private boolean isWebSocketPath(FullHttpRequest req) { + String websocketPath = serverConfig.websocketPath(); + String uri = req.uri(); + boolean checkStartUri = uri.startsWith(websocketPath); + boolean checkNextUri = "/".equals(websocketPath) || checkNextUri(uri, websocketPath); + return serverConfig.checkStartsWith() ? (checkStartUri && checkNextUri) : uri.equals(websocketPath); + } + + private boolean checkNextUri(String uri, String websocketPath) { + int len = websocketPath.length(); + if (uri.length() > len) { + char nextUri = uri.charAt(len); + return nextUri == '/' || nextUri == '?'; + } + return true; } private static void sendHttpResponse(ChannelHandlerContext ctx, HttpRequest req, HttpResponse res) { - ChannelFuture f = ctx.channel().writeAndFlush(res); + Future f = ctx.channel().writeAndFlush(res); if (!isKeepAlive(req) || res.status().code() != 200) { - f.addListener(ChannelFutureListener.CLOSE); + f.addListener(ctx.channel(), ChannelFutureListeners.CLOSE); } } @@ -125,4 +140,24 @@ private static String getWebSocketLocation(ChannelPipeline cp, HttpRequest req, String host = req.headers().get(HttpHeaderNames.HOST); return protocol + "://" + host + path; } + + private void applyHandshakeTimeout(ChannelHandlerContext ctx) { + Promise localHandshakePromise = handshakePromise; + final long handshakeTimeoutMillis = serverConfig.handshakeTimeoutMillis(); + if (handshakeTimeoutMillis <= 0 || localHandshakePromise.isDone()) { + return; + } + + final Future timeoutFuture = ctx.executor().schedule(() -> { + if (!localHandshakePromise.isDone() && + localHandshakePromise.tryFailure(new WebSocketServerHandshakeException("handshake timed out"))) { + ctx.flush() + .fireUserEventTriggered(ServerHandshakeStateEvent.HANDSHAKE_TIMEOUT) + .close(); + } + }, handshakeTimeoutMillis, TimeUnit.MILLISECONDS); + + // Cancel the handshake timeout when handshake is finished. + localHandshakePromise.addListener(f -> timeoutFuture.cancel(false)); + } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketUtil.java index defdd41f3c0..09cd09d0823 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,14 +15,12 @@ */ package io.netty.handler.codec.http.websocketx; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.handler.codec.base64.Base64; -import io.netty.util.CharsetUtil; import io.netty.util.concurrent.FastThreadLocal; import java.security.MessageDigest; import java.security.NoSuchAlgorithmException; +import java.util.Base64; +import java.util.concurrent.ThreadLocalRandom; /** * A utility class mainly for use by web sockets @@ -34,7 +32,9 @@ final class WebSocketUtil { protected MessageDigest initialValue() throws Exception { try { //Try to get a MessageDigest that uses MD5 - return MessageDigest.getInstance("MD5"); + //Suppress a warning about weak hash algorithm + //since it's defined in draft-ietf-hybi-thewebsocketprotocol-00 + return MessageDigest.getInstance("MD5"); // lgtm [java/weak-cryptographic-algorithm] } catch (NoSuchAlgorithmException e) { //This shouldn't happen! How old is the computer? throw new InternalError("MD5 not supported on this platform - Outdated?"); @@ -47,7 +47,9 @@ protected MessageDigest initialValue() throws Exception { protected MessageDigest initialValue() throws Exception { try { //Try to get a MessageDigest that uses SHA1 - return MessageDigest.getInstance("SHA1"); + //Suppress a warning about weak hash algorithm + //since it's defined in draft-ietf-hybi-thewebsocketprotocol-00 + return MessageDigest.getInstance("SHA1"); // lgtm [java/weak-cryptographic-algorithm] } catch (NoSuchAlgorithmException e) { //This shouldn't happen! How old is the computer? throw new InternalError("SHA-1 not supported on this platform - Outdated?"); @@ -90,13 +92,8 @@ private static byte[] digest(FastThreadLocal digestFastThreadLoca * @return An encoded string containing the data */ static String base64(byte[] data) { - ByteBuf encodedData = Unpooled.wrappedBuffer(data); - ByteBuf encoded = Base64.encode(encodedData); - String encodedString = encoded.toString(CharsetUtil.UTF_8); - encoded.release(); - return encodedString; + return Base64.getEncoder().encodeToString(data); } - /** * Creates an arbitrary number of random bytes * @@ -105,25 +102,10 @@ static String base64(byte[] data) { */ static byte[] randomBytes(int size) { byte[] bytes = new byte[size]; - - for (int index = 0; index < size; index++) { - bytes[index] = (byte) randomNumber(0, 255); - } - + ThreadLocalRandom.current().nextBytes(bytes); return bytes; } - /** - * Generates a pseudo-random number - * - * @param minimum The minimum allowable value - * @param maximum The maximum allowable value - * @return A pseudo-random number - */ - static int randomNumber(int minimum, int maximum) { - return (int) (Math.random() * maximum + minimum); - } - /** * A private constructor to ensure that instances of this class cannot be made */ diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketVersion.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketVersion.java index 2cb1c195777..0901ceb656a 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketVersion.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/WebSocketVersion.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.util.AsciiString; + /** *

    * Versions of the web socket specification. @@ -25,49 +27,47 @@ *

    */ public enum WebSocketVersion { - UNKNOWN, + + UNKNOWN(AsciiString.cached("unknown")), /** - * draft-ietf-hybi-thewebsocketprotocol- 00. */ - V00, + V00(AsciiString.cached("0")), /** - * draft-ietf-hybi-thewebsocketprotocol- 07 */ - V07, + V07(AsciiString.cached("7")), /** - * draft-ietf-hybi-thewebsocketprotocol- 10 */ - V08, + V08(AsciiString.cached("8")), /** - * RFC 6455. This was originally draft-ietf-hybi-thewebsocketprotocol- + * RFC 6455. This was originally draft-ietf-hybi-thewebsocketprotocol- * 17 */ - V13; + V13(AsciiString.cached("13")); + + private final AsciiString headerValue; + WebSocketVersion(AsciiString headerValue) { + this.headerValue = headerValue; + } /** * @return Value for HTTP Header 'Sec-WebSocket-Version' */ public String toHttpHeaderValue() { - if (this == V00) { - return "0"; - } - if (this == V07) { - return "7"; - } - if (this == V08) { - return "8"; - } - if (this == V13) { - return "13"; - } - throw new IllegalStateException("Unknown web socket version: " + this); + return toAsciiString().toString(); + } + + AsciiString toAsciiString() { + return headerValue; } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtension.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtension.java index 6917b1fda32..1d4a089ef2f 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtension.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtension.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java index 92ee9cc35d6..3c496ca08b8 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,19 +15,21 @@ */ package io.netty.handler.codec.http.websocketx.extensions; -import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; import io.netty.handler.codec.CodecException; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; +import io.netty.util.concurrent.Future; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; +import static io.netty.util.internal.ObjectUtil.checkNonEmpty; + /** * This handler negotiates and initializes the WebSocket Extensions. * @@ -38,7 +40,7 @@ * Find a basic implementation for compression extensions at * io.netty.handler.codec.http.websocketx.extensions.compression.WebSocketClientCompressionHandler. */ -public class WebSocketClientExtensionHandler extends ChannelDuplexHandler { +public class WebSocketClientExtensionHandler implements ChannelHandler { private final List extensionHandshakers; @@ -50,31 +52,26 @@ public class WebSocketClientExtensionHandler extends ChannelDuplexHandler { * with fallback configuration. */ public WebSocketClientExtensionHandler(WebSocketClientExtensionHandshaker... extensionHandshakers) { - if (extensionHandshakers == null) { - throw new NullPointerException("extensionHandshakers"); - } - if (extensionHandshakers.length == 0) { - throw new IllegalArgumentException("extensionHandshakers must contains at least one handshaker"); - } - this.extensionHandshakers = Arrays.asList(extensionHandshakers); + this.extensionHandshakers = Arrays.asList(checkNonEmpty(extensionHandshakers, "extensionHandshakers")); } @Override - public void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + public Future write(final ChannelHandlerContext ctx, Object msg) { if (msg instanceof HttpRequest && WebSocketExtensionUtil.isWebsocketUpgrade(((HttpRequest) msg).headers())) { HttpRequest request = (HttpRequest) msg; String headerValue = request.headers().getAsString(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS); - + List extraExtensions = + new ArrayList(extensionHandshakers.size()); for (WebSocketClientExtensionHandshaker extensionHandshaker : extensionHandshakers) { - WebSocketExtensionData extensionData = extensionHandshaker.newRequestData(); - headerValue = WebSocketExtensionUtil.appendExtension(headerValue, - extensionData.name(), extensionData.parameters()); + extraExtensions.add(extensionHandshaker.newRequestData()); } + String newHeaderValue = WebSocketExtensionUtil + .computeMergeExtensionsHeaderValue(headerValue, extraExtensions); - request.headers().set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, headerValue); + request.headers().set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, newHeaderValue); } - super.write(ctx, msg, promise); + return ctx.write(msg); } @Override @@ -90,7 +87,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) List extensions = WebSocketExtensionUtil.extractExtensions(extensionsHeader); List validExtensions = - new ArrayList(extensions.size()); + new ArrayList<>(extensions.size()); int rsv = 0; for (WebSocketExtensionData extensionData : extensions) { @@ -120,12 +117,13 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) ctx.pipeline().addAfter(ctx.name(), encoder.getClass().getName(), encoder); } } - - ctx.pipeline().remove(ctx.name()); + ctx.fireChannelRead(msg); + ctx.pipeline().remove(this); + return; } } - super.channelRead(ctx, msg); + ctx.fireChannelRead(msg); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandshaker.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandshaker.java index 70514a326e9..4812966f822 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandshaker.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandshaker.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtension.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtension.java index e5605e986e1..30bfb4ca830 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtension.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtension.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionData.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionData.java index eb3c5864de5..fecae3b2320 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionData.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionData.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http.websocketx.extensions; +import static java.util.Objects.requireNonNull; + import java.util.Collections; import java.util.Map; @@ -29,12 +31,8 @@ public final class WebSocketExtensionData { private final Map parameters; public WebSocketExtensionData(String name, Map parameters) { - if (name == null) { - throw new NullPointerException("name"); - } - if (parameters == null) { - throw new NullPointerException("parameters"); - } + requireNonNull(name, "name"); + requireNonNull(parameters, "parameters"); this.name = name; this.parameters = Collections.unmodifiableMap(parameters); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionDecoder.java index 0223cb46793..d86b1ad8a20 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionEncoder.java index de505bba128..96a84f8aa5d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilter.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilter.java new file mode 100644 index 00000000000..af9ca75e273 --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilter.java @@ -0,0 +1,44 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx.extensions; + +import io.netty.handler.codec.http.websocketx.WebSocketFrame; + +/** + * Filter that is responsible to skip the evaluation of a certain extension + * according to standard. + */ +public interface WebSocketExtensionFilter { + + /** + * A {@link WebSocketExtensionFilter} that never skip the evaluation of an + * any given extensions {@link WebSocketExtension}. + */ + WebSocketExtensionFilter NEVER_SKIP = frame -> false; + + /** + * A {@link WebSocketExtensionFilter} that always skip the evaluation of an + * any given extensions {@link WebSocketExtension}. + */ + WebSocketExtensionFilter ALWAYS_SKIP = frame -> true; + + /** + * Returns {@code true} if the evaluation of the extension must skipped + * for the given frame otherwise {@code false}. + */ + boolean mustSkip(WebSocketFrame frame); + +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilterProvider.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilterProvider.java new file mode 100644 index 00000000000..4633e761547 --- /dev/null +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilterProvider.java @@ -0,0 +1,45 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx.extensions; + +/** + * Extension filter provider that is responsible to provide filters for a certain {@link WebSocketExtension} extension. + */ +public interface WebSocketExtensionFilterProvider { + + WebSocketExtensionFilterProvider DEFAULT = new WebSocketExtensionFilterProvider() { + @Override + public WebSocketExtensionFilter encoderFilter() { + return WebSocketExtensionFilter.NEVER_SKIP; + } + + @Override + public WebSocketExtensionFilter decoderFilter() { + return WebSocketExtensionFilter.NEVER_SKIP; + } + }; + + /** + * Returns the extension filter for {@link WebSocketExtensionEncoder} encoder. + */ + WebSocketExtensionFilter encoderFilter(); + + /** + * Returns the extension filter for {@link WebSocketExtensionDecoder} decoder. + */ + WebSocketExtensionFilter decoderFilter(); + +} diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java index fd91f72eab0..d1fe9584b26 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -37,20 +37,23 @@ public final class WebSocketExtensionUtil { private static final Pattern PARAMETER = Pattern.compile("^([^=]+)(=[\\\"]?([^\\\"]+)[\\\"]?)?$"); static boolean isWebsocketUpgrade(HttpHeaders headers) { - return headers.containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE, true) && + //this contains check does not allocate an iterator, and most requests are not upgrades + //so we do the contains check first before checking for specific values + return headers.contains(HttpHeaderNames.UPGRADE) && + headers.containsValue(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE, true) && headers.contains(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET, true); } public static List extractExtensions(String extensionHeader) { String[] rawExtensions = extensionHeader.split(EXTENSION_SEPARATOR); if (rawExtensions.length > 0) { - List extensions = new ArrayList(rawExtensions.length); + List extensions = new ArrayList<>(rawExtensions.length); for (String rawExtension : rawExtensions) { String[] extensionParameters = rawExtension.split(PARAMETER_SEPARATOR); String name = extensionParameters[0].trim(); Map parameters; if (extensionParameters.length > 1) { - parameters = new HashMap(extensionParameters.length - 1); + parameters = new HashMap<>(extensionParameters.length - 1); for (int i = 1; i < extensionParameters.length; i++) { String parameter = extensionParameters[i].trim(); Matcher parameterMatcher = PARAMETER.matcher(parameter); @@ -69,25 +72,53 @@ public static List extractExtensions(String extensionHea } } - static String appendExtension(String currentHeaderValue, String extensionName, - Map extensionParameters) { + static String computeMergeExtensionsHeaderValue(String userDefinedHeaderValue, + List extraExtensions) { + List userDefinedExtensions = + userDefinedHeaderValue != null ? + extractExtensions(userDefinedHeaderValue) : + Collections.emptyList(); - StringBuilder newHeaderValue = new StringBuilder( - currentHeaderValue != null ? currentHeaderValue.length() : extensionName.length() + 1); - if (currentHeaderValue != null && !currentHeaderValue.trim().isEmpty()) { - newHeaderValue.append(currentHeaderValue); - newHeaderValue.append(EXTENSION_SEPARATOR); + for (WebSocketExtensionData userDefined: userDefinedExtensions) { + WebSocketExtensionData matchingExtra = null; + int i; + for (i = 0; i < extraExtensions.size(); i ++) { + WebSocketExtensionData extra = extraExtensions.get(i); + if (extra.name().equals(userDefined.name())) { + matchingExtra = extra; + break; + } + } + if (matchingExtra == null) { + extraExtensions.add(userDefined); + } else { + // merge with higher precedence to user defined parameters + Map mergedParameters = new HashMap(matchingExtra.parameters()); + mergedParameters.putAll(userDefined.parameters()); + extraExtensions.set(i, new WebSocketExtensionData(matchingExtra.name(), mergedParameters)); + } } - newHeaderValue.append(extensionName); - for (Entry extensionParameter : extensionParameters.entrySet()) { - newHeaderValue.append(PARAMETER_SEPARATOR); - newHeaderValue.append(extensionParameter.getKey()); - if (extensionParameter.getValue() != null) { - newHeaderValue.append(PARAMETER_EQUAL); - newHeaderValue.append(extensionParameter.getValue()); + + StringBuilder sb = new StringBuilder(150); + + for (WebSocketExtensionData data: extraExtensions) { + sb.append(data.name()); + for (Entry parameter : data.parameters().entrySet()) { + sb.append(PARAMETER_SEPARATOR); + sb.append(parameter.getKey()); + if (parameter.getValue() != null) { + sb.append(PARAMETER_EQUAL); + sb.append(parameter.getValue()); + } } + sb.append(EXTENSION_SEPARATOR); + } + + if (!extraExtensions.isEmpty()) { + sb.setLength(sb.length() - EXTENSION_SEPARATOR.length()); } - return newHeaderValue.toString(); + + return sb.toString(); } private WebSocketExtensionUtil() { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtension.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtension.java index 60f16eb3c8e..9f3451666f4 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtension.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtension.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -26,7 +26,6 @@ public interface WebSocketServerExtension extends WebSocketExtension { * * @return the acknowledged extension configuration. */ - //TODO: after migrating to JDK 8 rename this to 'newResponseData()' and mark old as deprecated with default method - WebSocketExtensionData newReponseData(); + WebSocketExtensionData newResponseData(); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java index 1b2e39189c3..5be980fb789 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,20 +15,23 @@ */ package io.netty.handler.codec.http.websocketx.extensions; -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.FutureListener; import java.util.ArrayList; import java.util.Arrays; import java.util.Iterator; import java.util.List; +import static io.netty.util.internal.ObjectUtil.checkNonEmpty; + /** * This handler negotiates and initializes the WebSocket Extensions. * @@ -39,7 +42,7 @@ * Find a basic implementation for compression extensions at * io.netty.handler.codec.http.websocketx.extensions.compression.WebSocketServerCompressionHandler. */ -public class WebSocketServerExtensionHandler extends ChannelDuplexHandler { +public class WebSocketServerExtensionHandler implements ChannelHandler { private final List extensionHandshakers; @@ -53,18 +56,11 @@ public class WebSocketServerExtensionHandler extends ChannelDuplexHandler { * with fallback configuration. */ public WebSocketServerExtensionHandler(WebSocketServerExtensionHandshaker... extensionHandshakers) { - if (extensionHandshakers == null) { - throw new NullPointerException("extensionHandshakers"); - } - if (extensionHandshakers.length == 0) { - throw new IllegalArgumentException("extensionHandshakers must contains at least one handshaker"); - } - this.extensionHandshakers = Arrays.asList(extensionHandshakers); + this.extensionHandshakers = Arrays.asList(checkNonEmpty(extensionHandshakers, "extensionHandshakers")); } @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) - throws Exception { + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (msg instanceof HttpRequest) { HttpRequest request = (HttpRequest) msg; @@ -89,7 +85,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) if (validExtension != null && ((validExtension.rsv() & rsv) == 0)) { if (validExtensions == null) { - validExtensions = new ArrayList(1); + validExtensions = new ArrayList<>(1); } rsv = rsv | validExtension.rsv(); validExtensions.add(validExtension); @@ -99,43 +95,60 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) } } - super.channelRead(ctx, msg); + ctx.fireChannelRead(msg); } @Override - public void write(final ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { - if (msg instanceof HttpResponse && - WebSocketExtensionUtil.isWebsocketUpgrade(((HttpResponse) msg).headers()) && validExtensions != null) { - HttpResponse response = (HttpResponse) msg; - String headerValue = response.headers().getAsString(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS); - - for (WebSocketServerExtension extension : validExtensions) { - WebSocketExtensionData extensionData = extension.newReponseData(); - headerValue = WebSocketExtensionUtil.appendExtension(headerValue, - extensionData.name(), extensionData.parameters()); - } - - promise.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (future.isSuccess()) { + public Future write(final ChannelHandlerContext ctx, Object msg) { + if (msg instanceof HttpResponse) { + HttpResponse httpResponse = (HttpResponse) msg; + //checking the status is faster than looking at headers + //so we do this first + if (HttpResponseStatus.SWITCHING_PROTOCOLS.equals(httpResponse.status())) { + HttpHeaders headers = httpResponse.headers(); + + FutureListener listener = null; + if (WebSocketExtensionUtil.isWebsocketUpgrade(headers)) { + if (validExtensions != null) { + String headerValue = headers.getAsString(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS); + List extraExtensions = + new ArrayList(extensionHandshakers.size()); for (WebSocketServerExtension extension : validExtensions) { - WebSocketExtensionDecoder decoder = extension.newExtensionDecoder(); - WebSocketExtensionEncoder encoder = extension.newExtensionEncoder(); - ctx.pipeline().addAfter(ctx.name(), decoder.getClass().getName(), decoder); - ctx.pipeline().addAfter(ctx.name(), encoder.getClass().getName(), encoder); + extraExtensions.add(extension.newResponseData()); } - } + String newHeaderValue = WebSocketExtensionUtil + .computeMergeExtensionsHeaderValue(headerValue, extraExtensions); + listener = future -> { + if (future.isSuccess()) { + for (WebSocketServerExtension extension : validExtensions) { + WebSocketExtensionDecoder decoder = extension.newExtensionDecoder(); + WebSocketExtensionEncoder encoder = extension.newExtensionEncoder(); + String name = ctx.name(); + ctx.pipeline() + + .addAfter(name, decoder.getClass().getName(), decoder) + .addAfter(name, encoder.getClass().getName(), encoder); + } + } + }; - ctx.pipeline().remove(ctx.name()); + if (newHeaderValue != null) { + headers.set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, newHeaderValue); + } + } + Future f = ctx.write(httpResponse); + if (listener != null) { + f.addListener(listener); + } + f.addListener(future -> { + if (future.isSuccess()) { + ctx.pipeline().remove(this); + } + }); + return f; } - }); - - if (headerValue != null) { - response.headers().set(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS, headerValue); } } - - super.write(ctx, msg, promise); + return ctx.write(msg); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandshaker.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandshaker.java index de02e546d14..599b1b46edb 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandshaker.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandshaker.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateDecoder.java index 89053fd82d2..1aa2aabf085 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -28,8 +28,9 @@ import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; import io.netty.handler.codec.http.websocketx.WebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionDecoder; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter; -import java.util.List; +import java.util.Objects; /** * Deflate implementation of a payload decompressor for @@ -37,18 +38,35 @@ */ abstract class DeflateDecoder extends WebSocketExtensionDecoder { - static final byte[] FRAME_TAIL = new byte[] {0x00, 0x00, (byte) 0xff, (byte) 0xff}; + static final ByteBuf FRAME_TAIL = Unpooled.unreleasableBuffer( + Unpooled.wrappedBuffer(new byte[] {0x00, 0x00, (byte) 0xff, (byte) 0xff})) + .asReadOnly(); + + static final ByteBuf EMPTY_DEFLATE_BLOCK = Unpooled.unreleasableBuffer( + Unpooled.wrappedBuffer(new byte[] { 0x00 })) + .asReadOnly(); private final boolean noContext; + private final WebSocketExtensionFilter extensionDecoderFilter; private EmbeddedChannel decoder; /** * Constructor + * * @param noContext true to disable context takeover. + * @param extensionDecoderFilter extension decoder filter. */ - public DeflateDecoder(boolean noContext) { + DeflateDecoder(boolean noContext, WebSocketExtensionFilter extensionDecoderFilter) { this.noContext = noContext; + this.extensionDecoderFilter = Objects.requireNonNull(extensionDecoderFilter, "extensionDecoderFilter"); + } + + /** + * Returns the extension decoder filter. + */ + protected WebSocketExtensionFilter extensionDecoderFilter() { + return extensionDecoderFilter; } protected abstract boolean appendFrameTail(WebSocketFrame msg); @@ -56,7 +74,36 @@ public DeflateDecoder(boolean noContext) { protected abstract int newRsv(WebSocketFrame msg); @Override - protected void decode(ChannelHandlerContext ctx, WebSocketFrame msg, List out) throws Exception { + protected void decode(ChannelHandlerContext ctx, WebSocketFrame msg) throws Exception { + final ByteBuf decompressedContent = decompressContent(ctx, msg); + + final WebSocketFrame outMsg; + if (msg instanceof TextWebSocketFrame) { + outMsg = new TextWebSocketFrame(msg.isFinalFragment(), newRsv(msg), decompressedContent); + } else if (msg instanceof BinaryWebSocketFrame) { + outMsg = new BinaryWebSocketFrame(msg.isFinalFragment(), newRsv(msg), decompressedContent); + } else if (msg instanceof ContinuationWebSocketFrame) { + outMsg = new ContinuationWebSocketFrame(msg.isFinalFragment(), newRsv(msg), decompressedContent); + } else { + throw new CodecException("unexpected frame type: " + msg.getClass().getName()); + } + + ctx.fireChannelRead(outMsg); + } + + @Override + public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { + cleanup(); + super.handlerRemoved(ctx); + } + + @Override + public void channelInactive(ChannelHandlerContext ctx) throws Exception { + cleanup(); + super.channelInactive(ctx); + } + + private ByteBuf decompressContent(ChannelHandlerContext ctx, WebSocketFrame msg) { if (decoder == null) { if (!(msg instanceof TextWebSocketFrame) && !(msg instanceof BinaryWebSocketFrame)) { throw new CodecException("unexpected initial frame type: " + msg.getClass().getName()); @@ -65,12 +112,14 @@ protected void decode(ChannelHandlerContext ctx, WebSocketFrame msg, List out) throws Exception { + protected void encode(ChannelHandlerContext ctx, WebSocketFrame msg, List out) throws Exception { + final ByteBuf compressedContent; + if (msg.content().isReadable()) { + compressedContent = compressContent(ctx, msg); + } else if (msg.isFinalFragment()) { + // Set empty DEFLATE block manually for unknown buffer size + // https://tools.ietf.org/html/rfc7692#section-7.2.3.6 + compressedContent = EMPTY_DEFLATE_BLOCK.duplicate(); + } else { + throw new CodecException("cannot compress content buffer"); + } + + final WebSocketFrame outMsg; + if (msg instanceof TextWebSocketFrame) { + outMsg = new TextWebSocketFrame(msg.isFinalFragment(), rsv(msg), compressedContent); + } else if (msg instanceof BinaryWebSocketFrame) { + outMsg = new BinaryWebSocketFrame(msg.isFinalFragment(), rsv(msg), compressedContent); + } else if (msg instanceof ContinuationWebSocketFrame) { + outMsg = new ContinuationWebSocketFrame(msg.isFinalFragment(), rsv(msg), compressedContent); + } else { + throw new CodecException("unexpected frame type: " + msg.getClass().getName()); + } + + out.add(outMsg); + } + + @Override + public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { + cleanup(); + super.handlerRemoved(ctx); + } + + private ByteBuf compressContent(ChannelHandlerContext ctx, WebSocketFrame msg) { if (encoder == null) { encoder = new EmbeddedChannel(ZlibCodecFactory.newZlibEncoder( ZlibWrapper.NONE, compressionLevel, windowSize, 8)); @@ -89,6 +134,7 @@ protected void encode(ChannelHandlerContext ctx, WebSocketFrame msg, } fullCompressedContent.addComponent(true, partCompressedContent); } + if (fullCompressedContent.numComponents() <= 0) { fullCompressedContent.release(); throw new CodecException("cannot read compressed buffer"); @@ -100,44 +146,19 @@ protected void encode(ChannelHandlerContext ctx, WebSocketFrame msg, ByteBuf compressedContent; if (removeFrameTail(msg)) { - int realLength = fullCompressedContent.readableBytes() - FRAME_TAIL.length; + int realLength = fullCompressedContent.readableBytes() - FRAME_TAIL.readableBytes(); compressedContent = fullCompressedContent.slice(0, realLength); } else { compressedContent = fullCompressedContent; } - WebSocketFrame outMsg; - if (msg instanceof TextWebSocketFrame) { - outMsg = new TextWebSocketFrame(msg.isFinalFragment(), rsv(msg), compressedContent); - } else if (msg instanceof BinaryWebSocketFrame) { - outMsg = new BinaryWebSocketFrame(msg.isFinalFragment(), rsv(msg), compressedContent); - } else if (msg instanceof ContinuationWebSocketFrame) { - outMsg = new ContinuationWebSocketFrame(msg.isFinalFragment(), rsv(msg), compressedContent); - } else { - throw new CodecException("unexpected frame type: " + msg.getClass().getName()); - } - out.add(outMsg); - } - - @Override - public void handlerRemoved(ChannelHandlerContext ctx) throws Exception { - cleanup(); - super.handlerRemoved(ctx); + return compressedContent; } private void cleanup() { if (encoder != null) { // Clean-up the previous encoder if not cleaned up correctly. - if (encoder.finish()) { - for (;;) { - ByteBuf buf = encoder.readOutbound(); - if (buf == null) { - break; - } - // Release the buffer - buf.release(); - } - } + encoder.finishAndReleaseAll(); encoder = null; } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameClientExtensionHandshaker.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameClientExtensionHandshaker.java index 6671d1f1af8..dbeeefc15cc 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameClientExtensionHandshaker.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameClientExtensionHandshaker.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,15 +15,17 @@ */ package io.netty.handler.codec.http.websocketx.extensions.compression; -import static io.netty.handler.codec.http.websocketx.extensions.compression. - DeflateFrameServerExtensionHandshaker.*; import io.netty.handler.codec.http.websocketx.extensions.WebSocketClientExtension; import io.netty.handler.codec.http.websocketx.extensions.WebSocketClientExtensionHandshaker; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionData; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionDecoder; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionEncoder; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilterProvider; import java.util.Collections; +import java.util.Objects; + +import static io.netty.handler.codec.http.websocketx.extensions.compression.DeflateFrameServerExtensionHandshaker.*; /** * perframe-deflate @@ -33,6 +35,7 @@ public final class DeflateFrameClientExtensionHandshaker implements WebSocketCli private final int compressionLevel; private final boolean useWebkitExtensionName; + private final WebSocketExtensionFilterProvider extensionFilterProvider; /** * Constructor with default configuration. @@ -48,19 +51,33 @@ public DeflateFrameClientExtensionHandshaker(boolean useWebkitExtensionName) { * Compression level between 0 and 9 (default is 6). */ public DeflateFrameClientExtensionHandshaker(int compressionLevel, boolean useWebkitExtensionName) { + this(compressionLevel, useWebkitExtensionName, WebSocketExtensionFilterProvider.DEFAULT); + } + + /** + * Constructor with custom configuration. + * + * @param compressionLevel + * Compression level between 0 and 9 (default is 6). + * @param extensionFilterProvider + * provides client extension filters for per frame deflate encoder and decoder. + */ + public DeflateFrameClientExtensionHandshaker(int compressionLevel, boolean useWebkitExtensionName, + WebSocketExtensionFilterProvider extensionFilterProvider) { if (compressionLevel < 0 || compressionLevel > 9) { throw new IllegalArgumentException( "compressionLevel: " + compressionLevel + " (expected: 0-9)"); } this.compressionLevel = compressionLevel; this.useWebkitExtensionName = useWebkitExtensionName; + this.extensionFilterProvider = Objects.requireNonNull(extensionFilterProvider, "extensionFilterProvider"); } @Override public WebSocketExtensionData newRequestData() { return new WebSocketExtensionData( useWebkitExtensionName ? X_WEBKIT_DEFLATE_FRAME_EXTENSION : DEFLATE_FRAME_EXTENSION, - Collections.emptyMap()); + Collections.emptyMap()); } @Override @@ -71,7 +88,7 @@ public WebSocketClientExtension handshakeExtension(WebSocketExtensionData extens } if (extensionData.parameters().isEmpty()) { - return new DeflateFrameClientExtension(compressionLevel); + return new DeflateFrameClientExtension(compressionLevel, extensionFilterProvider); } else { return null; } @@ -80,9 +97,11 @@ public WebSocketClientExtension handshakeExtension(WebSocketExtensionData extens private static class DeflateFrameClientExtension implements WebSocketClientExtension { private final int compressionLevel; + private final WebSocketExtensionFilterProvider extensionFilterProvider; - public DeflateFrameClientExtension(int compressionLevel) { + DeflateFrameClientExtension(int compressionLevel, WebSocketExtensionFilterProvider extensionFilterProvider) { this.compressionLevel = compressionLevel; + this.extensionFilterProvider = extensionFilterProvider; } @Override @@ -92,12 +111,13 @@ public int rsv() { @Override public WebSocketExtensionEncoder newExtensionEncoder() { - return new PerFrameDeflateEncoder(compressionLevel, 15, false); + return new PerFrameDeflateEncoder(compressionLevel, 15, false, + extensionFilterProvider.encoderFilter()); } @Override public WebSocketExtensionDecoder newExtensionDecoder() { - return new PerFrameDeflateDecoder(false); + return new PerFrameDeflateDecoder(false, extensionFilterProvider.decoderFilter()); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameServerExtensionHandshaker.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameServerExtensionHandshaker.java index e7ea9f3571b..d1129f7de18 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameServerExtensionHandshaker.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameServerExtensionHandshaker.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,10 +18,12 @@ import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionData; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionDecoder; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionEncoder; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilterProvider; import io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtension; import io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtensionHandshaker; import java.util.Collections; +import java.util.Objects; /** * perframe-deflate @@ -33,6 +35,7 @@ public final class DeflateFrameServerExtensionHandshaker implements WebSocketSer static final String DEFLATE_FRAME_EXTENSION = "deflate-frame"; private final int compressionLevel; + private final WebSocketExtensionFilterProvider extensionFilterProvider; /** * Constructor with default configuration. @@ -48,11 +51,25 @@ public DeflateFrameServerExtensionHandshaker() { * Compression level between 0 and 9 (default is 6). */ public DeflateFrameServerExtensionHandshaker(int compressionLevel) { + this(compressionLevel, WebSocketExtensionFilterProvider.DEFAULT); + } + + /** + * Constructor with custom configuration. + * + * @param compressionLevel + * Compression level between 0 and 9 (default is 6). + * @param extensionFilterProvider + * provides server extension filters for per frame deflate encoder and decoder. + */ + public DeflateFrameServerExtensionHandshaker(int compressionLevel, + WebSocketExtensionFilterProvider extensionFilterProvider) { if (compressionLevel < 0 || compressionLevel > 9) { throw new IllegalArgumentException( "compressionLevel: " + compressionLevel + " (expected: 0-9)"); } this.compressionLevel = compressionLevel; + this.extensionFilterProvider = Objects.requireNonNull(extensionFilterProvider, "extensionFilterProvider"); } @Override @@ -63,7 +80,7 @@ public WebSocketServerExtension handshakeExtension(WebSocketExtensionData extens } if (extensionData.parameters().isEmpty()) { - return new DeflateFrameServerExtension(compressionLevel, extensionData.name()); + return new DeflateFrameServerExtension(compressionLevel, extensionData.name(), extensionFilterProvider); } else { return null; } @@ -73,10 +90,13 @@ private static class DeflateFrameServerExtension implements WebSocketServerExten private final String extensionName; private final int compressionLevel; + private final WebSocketExtensionFilterProvider extensionFilterProvider; - public DeflateFrameServerExtension(int compressionLevel, String extensionName) { + DeflateFrameServerExtension(int compressionLevel, String extensionName, + WebSocketExtensionFilterProvider extensionFilterProvider) { this.extensionName = extensionName; this.compressionLevel = compressionLevel; + this.extensionFilterProvider = extensionFilterProvider; } @Override @@ -86,17 +106,18 @@ public int rsv() { @Override public WebSocketExtensionEncoder newExtensionEncoder() { - return new PerFrameDeflateEncoder(compressionLevel, 15, false); + return new PerFrameDeflateEncoder(compressionLevel, 15, false, + extensionFilterProvider.encoderFilter()); } @Override public WebSocketExtensionDecoder newExtensionDecoder() { - return new PerFrameDeflateDecoder(false); + return new PerFrameDeflateDecoder(false, extensionFilterProvider.decoderFilter()); } @Override - public WebSocketExtensionData newReponseData() { - return new WebSocketExtensionData(extensionName, Collections.emptyMap()); + public WebSocketExtensionData newResponseData() { + return new WebSocketExtensionData(extensionName, Collections.emptyMap()); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateDecoder.java index ad955442507..a987e2c6406 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,6 +20,7 @@ import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; import io.netty.handler.codec.http.websocketx.WebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter; /** * Per-frame implementation of deflate decompressor. @@ -28,18 +29,37 @@ class PerFrameDeflateDecoder extends DeflateDecoder { /** * Constructor + * * @param noContext true to disable context takeover. */ - public PerFrameDeflateDecoder(boolean noContext) { - super(noContext); + PerFrameDeflateDecoder(boolean noContext) { + super(noContext, WebSocketExtensionFilter.NEVER_SKIP); + } + + /** + * Constructor + * + * @param noContext true to disable context takeover. + * @param extensionDecoderFilter extension decoder filter for per frame deflate decoder. + */ + PerFrameDeflateDecoder(boolean noContext, WebSocketExtensionFilter extensionDecoderFilter) { + super(noContext, extensionDecoderFilter); } @Override public boolean acceptInboundMessage(Object msg) throws Exception { - return (msg instanceof TextWebSocketFrame || - msg instanceof BinaryWebSocketFrame || + if (!super.acceptInboundMessage(msg)) { + return false; + } + + WebSocketFrame wsFrame = (WebSocketFrame) msg; + if (extensionDecoderFilter().mustSkip(wsFrame)) { + return false; + } + + return (msg instanceof TextWebSocketFrame || msg instanceof BinaryWebSocketFrame || msg instanceof ContinuationWebSocketFrame) && - (((WebSocketFrame) msg).rsv() & WebSocketExtension.RSV1) > 0; + (wsFrame.rsv() & WebSocketExtension.RSV1) > 0; } @Override @@ -51,4 +71,5 @@ protected int newRsv(WebSocketFrame msg) { protected boolean appendFrameTail(WebSocketFrame msg) { return true; } + } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateEncoder.java index aaffd8dc03e..9cd3ede1d01 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,6 +20,7 @@ import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; import io.netty.handler.codec.http.websocketx.WebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter; /** * Per-frame implementation of deflate compressor. @@ -28,21 +29,43 @@ class PerFrameDeflateEncoder extends DeflateEncoder { /** * Constructor + * * @param compressionLevel compression level of the compressor. - * @param windowSize maximum size of the window compressor buffer. - * @param noContext true to disable context takeover. + * @param windowSize maximum size of the window compressor buffer. + * @param noContext true to disable context takeover. */ - public PerFrameDeflateEncoder(int compressionLevel, int windowSize, boolean noContext) { - super(compressionLevel, windowSize, noContext); + PerFrameDeflateEncoder(int compressionLevel, int windowSize, boolean noContext) { + super(compressionLevel, windowSize, noContext, WebSocketExtensionFilter.NEVER_SKIP); + } + + /** + * Constructor + * + * @param compressionLevel compression level of the compressor. + * @param windowSize maximum size of the window compressor buffer. + * @param noContext true to disable context takeover. + * @param extensionEncoderFilter extension encoder filter for per frame deflate encoder. + */ + PerFrameDeflateEncoder(int compressionLevel, int windowSize, boolean noContext, + WebSocketExtensionFilter extensionEncoderFilter) { + super(compressionLevel, windowSize, noContext, extensionEncoderFilter); } @Override public boolean acceptOutboundMessage(Object msg) throws Exception { - return (msg instanceof TextWebSocketFrame || - msg instanceof BinaryWebSocketFrame || + if (!super.acceptOutboundMessage(msg)) { + return false; + } + + WebSocketFrame wsFrame = (WebSocketFrame) msg; + if (extensionEncoderFilter().mustSkip(wsFrame)) { + return false; + } + + return (msg instanceof TextWebSocketFrame || msg instanceof BinaryWebSocketFrame || msg instanceof ContinuationWebSocketFrame) && - ((WebSocketFrame) msg).content().readableBytes() > 0 && - (((WebSocketFrame) msg).rsv() & WebSocketExtension.RSV1) == 0; + wsFrame.content().readableBytes() > 0 && + (wsFrame.rsv() & WebSocketExtension.RSV1) == 0; } @Override diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateClientExtensionHandshaker.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateClientExtensionHandshaker.java index ef3dfe7245e..bf24a13baf1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateClientExtensionHandshaker.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateClientExtensionHandshaker.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,22 +15,23 @@ */ package io.netty.handler.codec.http.websocketx.extensions.compression; -import static io.netty.handler.codec.http.websocketx.extensions.compression. - PerMessageDeflateServerExtensionHandshaker.*; - import io.netty.handler.codec.compression.ZlibCodecFactory; import io.netty.handler.codec.http.websocketx.extensions.WebSocketClientExtension; import io.netty.handler.codec.http.websocketx.extensions.WebSocketClientExtensionHandshaker; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionData; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionDecoder; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionEncoder; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilterProvider; import java.util.HashMap; import java.util.Iterator; import java.util.Map.Entry; +import java.util.Objects; + +import static io.netty.handler.codec.http.websocketx.extensions.compression.PerMessageDeflateServerExtensionHandshaker.*; /** - * permessage-deflate + * permessage-deflate * handshake implementation. */ public final class PerMessageDeflateClientExtensionHandshaker implements WebSocketClientExtensionHandshaker { @@ -40,6 +41,7 @@ public final class PerMessageDeflateClientExtensionHandshaker implements WebSock private final int requestedServerWindowSize; private final boolean allowClientNoContext; private final boolean requestedServerNoContext; + private final WebSocketExtensionFilterProvider extensionFilterProvider; /** * Constructor with default configuration. @@ -68,6 +70,34 @@ public PerMessageDeflateClientExtensionHandshaker() { public PerMessageDeflateClientExtensionHandshaker(int compressionLevel, boolean allowClientWindowSize, int requestedServerWindowSize, boolean allowClientNoContext, boolean requestedServerNoContext) { + this(compressionLevel, allowClientWindowSize, requestedServerWindowSize, + allowClientNoContext, requestedServerNoContext, WebSocketExtensionFilterProvider.DEFAULT); + } + + /** + * Constructor with custom configuration. + * + * @param compressionLevel + * Compression level between 0 and 9 (default is 6). + * @param allowClientWindowSize + * allows WebSocket server to customize the client inflater window size + * (default is false). + * @param requestedServerWindowSize + * indicates the requested sever window size to use if server inflater is customizable. + * @param allowClientNoContext + * allows WebSocket server to activate client_no_context_takeover + * (default is false). + * @param requestedServerNoContext + * indicates if client needs to activate server_no_context_takeover + * if server is compatible with (default is false). + * @param extensionFilterProvider + * provides client extension filters for per message deflate encoder and decoder. + */ + public PerMessageDeflateClientExtensionHandshaker(int compressionLevel, + boolean allowClientWindowSize, int requestedServerWindowSize, + boolean allowClientNoContext, boolean requestedServerNoContext, + WebSocketExtensionFilterProvider extensionFilterProvider) { + if (requestedServerWindowSize > MAX_WINDOW_SIZE || requestedServerWindowSize < MIN_WINDOW_SIZE) { throw new IllegalArgumentException( "requestedServerWindowSize: " + requestedServerWindowSize + " (expected: 8-15)"); @@ -81,12 +111,13 @@ public PerMessageDeflateClientExtensionHandshaker(int compressionLevel, this.requestedServerWindowSize = requestedServerWindowSize; this.allowClientNoContext = allowClientNoContext; this.requestedServerNoContext = requestedServerNoContext; + this.extensionFilterProvider = Objects.requireNonNull(extensionFilterProvider, "extensionFilterProvider"); } @Override public WebSocketExtensionData newRequestData() { - HashMap parameters = new HashMap(4); - if (requestedServerWindowSize != MAX_WINDOW_SIZE) { + HashMap parameters = new HashMap<>(4); + if (requestedServerNoContext) { parameters.put(SERVER_NO_CONTEXT, null); } if (allowClientNoContext) { @@ -122,13 +153,16 @@ public WebSocketClientExtension handshakeExtension(WebSocketExtensionData extens // allowed client_window_size_bits if (allowClientWindowSize) { clientWindowSize = Integer.parseInt(parameter.getValue()); + if (clientWindowSize > MAX_WINDOW_SIZE || clientWindowSize < MIN_WINDOW_SIZE) { + succeed = false; + } } else { succeed = false; } } else if (SERVER_MAX_WINDOW.equalsIgnoreCase(parameter.getKey())) { // acknowledged server_window_size_bits serverWindowSize = Integer.parseInt(parameter.getValue()); - if (clientWindowSize > MAX_WINDOW_SIZE || clientWindowSize < MIN_WINDOW_SIZE) { + if (serverWindowSize > MAX_WINDOW_SIZE || serverWindowSize < MIN_WINDOW_SIZE) { succeed = false; } } else if (CLIENT_NO_CONTEXT.equalsIgnoreCase(parameter.getKey())) { @@ -140,11 +174,7 @@ public WebSocketClientExtension handshakeExtension(WebSocketExtensionData extens } } else if (SERVER_NO_CONTEXT.equalsIgnoreCase(parameter.getKey())) { // acknowledged server_no_context_takeover - if (requestedServerNoContext) { - serverNoContext = true; - } else { - succeed = false; - } + serverNoContext = true; } else { // unknown parameter succeed = false; @@ -152,13 +182,13 @@ public WebSocketClientExtension handshakeExtension(WebSocketExtensionData extens } if ((requestedServerNoContext && !serverNoContext) || - requestedServerWindowSize != serverWindowSize) { + requestedServerWindowSize < serverWindowSize) { succeed = false; } if (succeed) { return new PermessageDeflateExtension(serverNoContext, serverWindowSize, - clientNoContext, clientWindowSize); + clientNoContext, clientWindowSize, extensionFilterProvider); } else { return null; } @@ -170,28 +200,32 @@ private final class PermessageDeflateExtension implements WebSocketClientExtensi private final int serverWindowSize; private final boolean clientNoContext; private final int clientWindowSize; + private final WebSocketExtensionFilterProvider extensionFilterProvider; @Override public int rsv() { return RSV1; } - public PermessageDeflateExtension(boolean serverNoContext, int serverWindowSize, - boolean clientNoContext, int clientWindowSize) { + PermessageDeflateExtension(boolean serverNoContext, int serverWindowSize, + boolean clientNoContext, int clientWindowSize, + WebSocketExtensionFilterProvider extensionFilterProvider) { this.serverNoContext = serverNoContext; this.serverWindowSize = serverWindowSize; this.clientNoContext = clientNoContext; this.clientWindowSize = clientWindowSize; + this.extensionFilterProvider = extensionFilterProvider; } @Override public WebSocketExtensionEncoder newExtensionEncoder() { - return new PerMessageDeflateEncoder(compressionLevel, serverWindowSize, serverNoContext); + return new PerMessageDeflateEncoder(compressionLevel, clientWindowSize, clientNoContext, + extensionFilterProvider.encoderFilter()); } @Override public WebSocketExtensionDecoder newExtensionDecoder() { - return new PerMessageDeflateDecoder(clientNoContext); + return new PerMessageDeflateDecoder(serverNoContext, extensionFilterProvider.decoderFilter()); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateDecoder.java index a69294ed22a..5928dc32bcc 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,8 +21,7 @@ import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; import io.netty.handler.codec.http.websocketx.WebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension; - -import java.util.List; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter; /** * Per-message implementation of deflate decompressor. @@ -33,23 +32,45 @@ class PerMessageDeflateDecoder extends DeflateDecoder { /** * Constructor + * + * @param noContext true to disable context takeover. + */ + PerMessageDeflateDecoder(boolean noContext) { + super(noContext, WebSocketExtensionFilter.NEVER_SKIP); + } + + /** + * Constructor + * * @param noContext true to disable context takeover. + * @param extensionDecoderFilter extension decoder for per message deflate decoder. */ - public PerMessageDeflateDecoder(boolean noContext) { - super(noContext); + PerMessageDeflateDecoder(boolean noContext, WebSocketExtensionFilter extensionDecoderFilter) { + super(noContext, extensionDecoderFilter); } @Override public boolean acceptInboundMessage(Object msg) throws Exception { - return ((msg instanceof TextWebSocketFrame || - msg instanceof BinaryWebSocketFrame) && - (((WebSocketFrame) msg).rsv() & WebSocketExtension.RSV1) > 0) || - (msg instanceof ContinuationWebSocketFrame && compressing); + if (!super.acceptInboundMessage(msg)) { + return false; + } + + WebSocketFrame wsFrame = (WebSocketFrame) msg; + if (extensionDecoderFilter().mustSkip(wsFrame)) { + if (compressing) { + throw new IllegalStateException("Cannot skip per message deflate decoder, compression in progress"); + } + return false; + } + + return ((wsFrame instanceof TextWebSocketFrame || wsFrame instanceof BinaryWebSocketFrame) && + (wsFrame.rsv() & WebSocketExtension.RSV1) > 0) || + (wsFrame instanceof ContinuationWebSocketFrame && compressing); } @Override protected int newRsv(WebSocketFrame msg) { - return (msg.rsv() & WebSocketExtension.RSV1) > 0 ? + return (msg.rsv() & WebSocketExtension.RSV1) > 0? msg.rsv() ^ WebSocketExtension.RSV1 : msg.rsv(); } @@ -59,14 +80,15 @@ protected boolean appendFrameTail(WebSocketFrame msg) { } @Override - protected void decode(ChannelHandlerContext ctx, WebSocketFrame msg, - List out) throws Exception { - super.decode(ctx, msg, out); + protected void decode(ChannelHandlerContext ctx, WebSocketFrame msg) throws Exception { + boolean isFinal = msg.isFinalFragment(); + super.decode(ctx, msg); - if (msg.isFinalFragment()) { + if (isFinal) { compressing = false; } else if (msg instanceof TextWebSocketFrame || msg instanceof BinaryWebSocketFrame) { compressing = true; } } + } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateEncoder.java index b1cdc660fef..9e3d274d8d2 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,6 +21,7 @@ import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; import io.netty.handler.codec.http.websocketx.WebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter; import java.util.List; @@ -33,25 +34,50 @@ class PerMessageDeflateEncoder extends DeflateEncoder { /** * Constructor + * * @param compressionLevel compression level of the compressor. * @param windowSize maximum size of the window compressor buffer. * @param noContext true to disable context takeover. */ - public PerMessageDeflateEncoder(int compressionLevel, int windowSize, boolean noContext) { - super(compressionLevel, windowSize, noContext); + PerMessageDeflateEncoder(int compressionLevel, int windowSize, boolean noContext) { + super(compressionLevel, windowSize, noContext, WebSocketExtensionFilter.NEVER_SKIP); + } + + /** + * Constructor + * + * @param compressionLevel compression level of the compressor. + * @param windowSize maximum size of the window compressor buffer. + * @param noContext true to disable context takeover. + * @param extensionEncoderFilter extension filter for per message deflate encoder. + */ + PerMessageDeflateEncoder(int compressionLevel, int windowSize, boolean noContext, + WebSocketExtensionFilter extensionEncoderFilter) { + super(compressionLevel, windowSize, noContext, extensionEncoderFilter); } @Override public boolean acceptOutboundMessage(Object msg) throws Exception { - return ((msg instanceof TextWebSocketFrame || - msg instanceof BinaryWebSocketFrame) && - (((WebSocketFrame) msg).rsv() & WebSocketExtension.RSV1) == 0) || - (msg instanceof ContinuationWebSocketFrame && compressing); + if (!super.acceptOutboundMessage(msg)) { + return false; + } + + WebSocketFrame wsFrame = (WebSocketFrame) msg; + if (extensionEncoderFilter().mustSkip(wsFrame)) { + if (compressing) { + throw new IllegalStateException("Cannot skip per message deflate encoder, compression in progress"); + } + return false; + } + + return ((wsFrame instanceof TextWebSocketFrame || wsFrame instanceof BinaryWebSocketFrame) && + (wsFrame.rsv() & WebSocketExtension.RSV1) == 0) || + (wsFrame instanceof ContinuationWebSocketFrame && compressing); } @Override protected int rsv(WebSocketFrame msg) { - return msg instanceof TextWebSocketFrame || msg instanceof BinaryWebSocketFrame ? + return msg instanceof TextWebSocketFrame || msg instanceof BinaryWebSocketFrame? msg.rsv() | WebSocketExtension.RSV1 : msg.rsv(); } @@ -62,7 +88,7 @@ protected boolean removeFrameTail(WebSocketFrame msg) { @Override protected void encode(ChannelHandlerContext ctx, WebSocketFrame msg, - List out) throws Exception { + List out) throws Exception { super.encode(ctx, msg, out); if (msg.isFinalFragment()) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateServerExtensionHandshaker.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateServerExtensionHandshaker.java index 738b6f927d4..63d0f1c03d0 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateServerExtensionHandshaker.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateServerExtensionHandshaker.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,15 +19,17 @@ import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionData; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionDecoder; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionEncoder; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilterProvider; import io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtension; import io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtensionHandshaker; import java.util.HashMap; import java.util.Iterator; import java.util.Map.Entry; +import java.util.Objects; /** - * permessage-deflate + * permessage-deflate * handshake implementation. */ public final class PerMessageDeflateServerExtensionHandshaker implements WebSocketServerExtensionHandshaker { @@ -46,6 +48,7 @@ public final class PerMessageDeflateServerExtensionHandshaker implements WebSock private final int preferredClientWindowSize; private final boolean allowServerNoContext; private final boolean preferredClientNoContext; + private final WebSocketExtensionFilterProvider extensionFilterProvider; /** * Constructor with default configuration. @@ -71,9 +74,36 @@ public PerMessageDeflateServerExtensionHandshaker() { * indicates if server prefers to activate client_no_context_takeover * if client is compatible with (default is false). */ - public PerMessageDeflateServerExtensionHandshaker(int compressionLevel, - boolean allowServerWindowSize, int preferredClientWindowSize, + public PerMessageDeflateServerExtensionHandshaker(int compressionLevel, boolean allowServerWindowSize, + int preferredClientWindowSize, boolean allowServerNoContext, boolean preferredClientNoContext) { + this(compressionLevel, allowServerWindowSize, preferredClientWindowSize, allowServerNoContext, + preferredClientNoContext, WebSocketExtensionFilterProvider.DEFAULT); + } + + /** + * Constructor with custom configuration. + * + * @param compressionLevel + * Compression level between 0 and 9 (default is 6). + * @param allowServerWindowSize + * allows WebSocket client to customize the server inflater window size + * (default is false). + * @param preferredClientWindowSize + * indicates the preferred client window size to use if client inflater is customizable. + * @param allowServerNoContext + * allows WebSocket client to activate server_no_context_takeover + * (default is false). + * @param preferredClientNoContext + * indicates if server prefers to activate client_no_context_takeover + * if client is compatible with (default is false). + * @param extensionFilterProvider + * provides server extension filters for per message deflate encoder and decoder. + */ + public PerMessageDeflateServerExtensionHandshaker(int compressionLevel, boolean allowServerWindowSize, + int preferredClientWindowSize, + boolean allowServerNoContext, boolean preferredClientNoContext, + WebSocketExtensionFilterProvider extensionFilterProvider) { if (preferredClientWindowSize > MAX_WINDOW_SIZE || preferredClientWindowSize < MIN_WINDOW_SIZE) { throw new IllegalArgumentException( "preferredServerWindowSize: " + preferredClientWindowSize + " (expected: 8-15)"); @@ -87,6 +117,7 @@ public PerMessageDeflateServerExtensionHandshaker(int compressionLevel, this.preferredClientWindowSize = preferredClientWindowSize; this.allowServerNoContext = allowServerNoContext; this.preferredClientNoContext = preferredClientNoContext; + this.extensionFilterProvider = Objects.requireNonNull(extensionFilterProvider, "extensionFilterProvider"); } @Override @@ -137,7 +168,7 @@ public WebSocketServerExtension handshakeExtension(WebSocketExtensionData extens if (deflateEnabled) { return new PermessageDeflateExtension(compressionLevel, serverNoContext, - serverWindowSize, clientNoContext, clientWindowSize); + serverWindowSize, clientNoContext, clientWindowSize, extensionFilterProvider); } else { return null; } @@ -150,14 +181,17 @@ private static class PermessageDeflateExtension implements WebSocketServerExtens private final int serverWindowSize; private final boolean clientNoContext; private final int clientWindowSize; + private final WebSocketExtensionFilterProvider extensionFilterProvider; - public PermessageDeflateExtension(int compressionLevel, boolean serverNoContext, - int serverWindowSize, boolean clientNoContext, int clientWindowSize) { + PermessageDeflateExtension(int compressionLevel, boolean serverNoContext, + int serverWindowSize, boolean clientNoContext, int clientWindowSize, + WebSocketExtensionFilterProvider extensionFilterProvider) { this.compressionLevel = compressionLevel; this.serverNoContext = serverNoContext; this.serverWindowSize = serverWindowSize; this.clientNoContext = clientNoContext; this.clientWindowSize = clientWindowSize; + this.extensionFilterProvider = extensionFilterProvider; } @Override @@ -167,17 +201,18 @@ public int rsv() { @Override public WebSocketExtensionEncoder newExtensionEncoder() { - return new PerMessageDeflateEncoder(compressionLevel, clientWindowSize, clientNoContext); + return new PerMessageDeflateEncoder(compressionLevel, serverWindowSize, serverNoContext, + extensionFilterProvider.encoderFilter()); } @Override public WebSocketExtensionDecoder newExtensionDecoder() { - return new PerMessageDeflateDecoder(serverNoContext); + return new PerMessageDeflateDecoder(clientNoContext, extensionFilterProvider.decoderFilter()); } @Override - public WebSocketExtensionData newReponseData() { - HashMap parameters = new HashMap(4); + public WebSocketExtensionData newResponseData() { + HashMap parameters = new HashMap<>(4); if (serverNoContext) { parameters.put(SERVER_NO_CONTEXT, null); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketClientCompressionHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketClientCompressionHandler.java index bfd0375accf..383215b7bcc 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketClientCompressionHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketClientCompressionHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketServerCompressionHandler.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketServerCompressionHandler.java index 2db3fda2f96..ff07d8890e1 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketServerCompressionHandler.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketServerCompressionHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/package-info.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/package-info.java index 7c7b6dff268..ca029c141ff 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/package-info.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/compression/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,7 +20,7 @@ * This package supports different web socket extensions. * The specification currently supported are: * diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/package-info.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/package-info.java index 83f16b0edb3..89cef750392 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/package-info.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/extensions/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,7 +16,7 @@ /** * Encoder, decoder, handshakers to handle - * WebSocket Extensions. + * WebSocket Extensions. * * See WebSocketServerExtensionHandler for more details. */ diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/package-info.java b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/package-info.java index 1425a824d6f..6a203364b02 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/package-info.java +++ b/codec-http/src/main/java/io/netty/handler/codec/http/websocketx/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,16 +16,16 @@ /** * Encoder, decoder, handshakers and their related message types for - * Web Socket data frames. + * Web Socket data frames. *

    * This package supports different web socket specification versions (hence the X suffix). * The specification current supported are: *

    *

    diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspDecoder.java index acc028978f0..b1ebaca5195 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspDecoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -71,16 +71,6 @@ public class RtspDecoder extends HttpObjectDecoder { */ private static final Pattern versionPattern = Pattern.compile("RTSP/\\d\\.\\d"); - /** - * Constant for default max initial line length. - */ - public static final int DEFAULT_MAX_INITIAL_LINE_LENGTH = 4096; - - /** - * Constant for default max header size. - */ - public static final int DEFAULT_MAX_HEADER_SIZE = 8192; - /** * Constant for default max content length. */ @@ -106,7 +96,7 @@ public RtspDecoder() { public RtspDecoder(final int maxInitialLineLength, final int maxHeaderSize, final int maxContentLength) { - super(maxInitialLineLength, maxHeaderSize, maxContentLength * 2, false); + super(maxInitialLineLength, maxHeaderSize, false); } /** @@ -122,7 +112,6 @@ public RtspDecoder(final int maxInitialLineLength, final boolean validateHeaders) { super(maxInitialLineLength, maxHeaderSize, - maxContentLength * 2, false, validateHeaders); } diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspEncoder.java index 4e5be8ea08b..20987beadf7 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspEncoder.java +++ b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaderNames.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaderNames.java index 13ca8602ae6..f9c025c31cc 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaderNames.java +++ b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaderNames.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaderValues.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaderValues.java index 5122f8b56e9..8c502768032 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaderValues.java +++ b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaderValues.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaders.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaders.java deleted file mode 100644 index 984443da17f..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspHeaders.java +++ /dev/null @@ -1,398 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.rtsp; - -import io.netty.handler.codec.http.HttpHeaders; - - -/** - * @deprecated Use {@link RtspHeaderNames} or {@link RtspHeaderValues} instead. - - * Standard RTSP header names and values. - */ -@Deprecated -@SuppressWarnings("deprecation") -public final class RtspHeaders { - - /** - * @deprecated Use {@link RtspHeaderNames} instead. - * - * Standard RTSP header names. - */ - @Deprecated - public static final class Names { - /** - * {@code "Accept"} - */ - public static final String ACCEPT = HttpHeaders.Names.ACCEPT; - /** - * {@code "Accept-Encoding"} - */ - public static final String ACCEPT_ENCODING = HttpHeaders.Names.ACCEPT_ENCODING; - /** - * {@code "Accept-Language"} - */ - public static final String ACCEPT_LANGUAGE = HttpHeaders.Names.ACCEPT_LANGUAGE; - /** - * {@code "Allow"} - */ - public static final String ALLOW = "Allow"; - /** - * {@code "Authorization"} - */ - public static final String AUTHORIZATION = HttpHeaders.Names.AUTHORIZATION; - /** - * {@code "Bandwidth"} - */ - public static final String BANDWIDTH = "Bandwidth"; - /** - * {@code "Blocksize"} - */ - public static final String BLOCKSIZE = "Blocksize"; - /** - * {@code "Cache-Control"} - */ - public static final String CACHE_CONTROL = HttpHeaders.Names.CACHE_CONTROL; - /** - * {@code "Conference"} - */ - public static final String CONFERENCE = "Conference"; - /** - * {@code "Connection"} - */ - public static final String CONNECTION = HttpHeaders.Names.CONNECTION; - /** - * {@code "Content-Base"} - */ - public static final String CONTENT_BASE = HttpHeaders.Names.CONTENT_BASE; - /** - * {@code "Content-Encoding"} - */ - public static final String CONTENT_ENCODING = HttpHeaders.Names.CONTENT_ENCODING; - /** - * {@code "Content-Language"} - */ - public static final String CONTENT_LANGUAGE = HttpHeaders.Names.CONTENT_LANGUAGE; - /** - * {@code "Content-Length"} - */ - public static final String CONTENT_LENGTH = HttpHeaders.Names.CONTENT_LENGTH; - /** - * {@code "Content-Location"} - */ - public static final String CONTENT_LOCATION = HttpHeaders.Names.CONTENT_LOCATION; - /** - * {@code "Content-Type"} - */ - public static final String CONTENT_TYPE = HttpHeaders.Names.CONTENT_TYPE; - /** - * {@code "CSeq"} - */ - public static final String CSEQ = "CSeq"; - /** - * {@code "Date"} - */ - public static final String DATE = HttpHeaders.Names.DATE; - /** - * {@code "Expires"} - */ - public static final String EXPIRES = HttpHeaders.Names.EXPIRES; - /** - * {@code "From"} - */ - public static final String FROM = HttpHeaders.Names.FROM; - /** - * {@code "Host"} - */ - public static final String HOST = HttpHeaders.Names.HOST; - /** - * {@code "If-Match"} - */ - public static final String IF_MATCH = HttpHeaders.Names.IF_MATCH; - /** - * {@code "If-Modified-Since"} - */ - public static final String IF_MODIFIED_SINCE = HttpHeaders.Names.IF_MODIFIED_SINCE; - /** - * {@code "KeyMgmt"} - */ - public static final String KEYMGMT = "KeyMgmt"; - /** - * {@code "Last-Modified"} - */ - public static final String LAST_MODIFIED = HttpHeaders.Names.LAST_MODIFIED; - /** - * {@code "Proxy-Authenticate"} - */ - public static final String PROXY_AUTHENTICATE = HttpHeaders.Names.PROXY_AUTHENTICATE; - /** - * {@code "Proxy-Require"} - */ - public static final String PROXY_REQUIRE = "Proxy-Require"; - /** - * {@code "Public"} - */ - public static final String PUBLIC = "Public"; - /** - * {@code "Range"} - */ - public static final String RANGE = HttpHeaders.Names.RANGE; - /** - * {@code "Referer"} - */ - public static final String REFERER = HttpHeaders.Names.REFERER; - /** - * {@code "Require"} - */ - public static final String REQUIRE = "Require"; - /** - * {@code "Retry-After"} - */ - public static final String RETRT_AFTER = HttpHeaders.Names.RETRY_AFTER; - /** - * {@code "RTP-Info"} - */ - public static final String RTP_INFO = "RTP-Info"; - /** - * {@code "Scale"} - */ - public static final String SCALE = "Scale"; - /** - * {@code "Session"} - */ - public static final String SESSION = "Session"; - /** - * {@code "Server"} - */ - public static final String SERVER = HttpHeaders.Names.SERVER; - /** - * {@code "Speed"} - */ - public static final String SPEED = "Speed"; - /** - * {@code "Timestamp"} - */ - public static final String TIMESTAMP = "Timestamp"; - /** - * {@code "Transport"} - */ - public static final String TRANSPORT = "Transport"; - /** - * {@code "Unsupported"} - */ - public static final String UNSUPPORTED = "Unsupported"; - /** - * {@code "User-Agent"} - */ - public static final String USER_AGENT = HttpHeaders.Names.USER_AGENT; - /** - * {@code "Vary"} - */ - public static final String VARY = HttpHeaders.Names.VARY; - /** - * {@code "Via"} - */ - public static final String VIA = HttpHeaders.Names.VIA; - /** - * {@code "WWW-Authenticate"} - */ - public static final String WWW_AUTHENTICATE = HttpHeaders.Names.WWW_AUTHENTICATE; - - private Names() { - } - } - - /** - * @deprecated Use {@link RtspHeaderValues} instead. - * - * Standard RTSP header values. - */ - @Deprecated - public static final class Values { - /** - * {@code "append"} - */ - public static final String APPEND = "append"; - /** - * {@code "AVP"} - */ - public static final String AVP = "AVP"; - /** - * {@code "bytes"} - */ - public static final String BYTES = HttpHeaders.Values.BYTES; - /** - * {@code "charset"} - */ - public static final String CHARSET = HttpHeaders.Values.CHARSET; - /** - * {@code "client_port"} - */ - public static final String CLIENT_PORT = "client_port"; - /** - * {@code "clock"} - */ - public static final String CLOCK = "clock"; - /** - * {@code "close"} - */ - public static final String CLOSE = HttpHeaders.Values.CLOSE; - /** - * {@code "compress"} - */ - public static final String COMPRESS = HttpHeaders.Values.COMPRESS; - /** - * {@code "100-continue"} - */ - public static final String CONTINUE = HttpHeaders.Values.CONTINUE; - /** - * {@code "deflate"} - */ - public static final String DEFLATE = HttpHeaders.Values.DEFLATE; - /** - * {@code "destination"} - */ - public static final String DESTINATION = "destination"; - /** - * {@code "gzip"} - */ - public static final String GZIP = HttpHeaders.Values.GZIP; - /** - * {@code "identity"} - */ - public static final String IDENTITY = HttpHeaders.Values.IDENTITY; - /** - * {@code "interleaved"} - */ - public static final String INTERLEAVED = "interleaved"; - /** - * {@code "keep-alive"} - */ - public static final String KEEP_ALIVE = HttpHeaders.Values.KEEP_ALIVE; - /** - * {@code "layers"} - */ - public static final String LAYERS = "layers"; - /** - * {@code "max-age"} - */ - public static final String MAX_AGE = HttpHeaders.Values.MAX_AGE; - /** - * {@code "max-stale"} - */ - public static final String MAX_STALE = HttpHeaders.Values.MAX_STALE; - /** - * {@code "min-fresh"} - */ - public static final String MIN_FRESH = HttpHeaders.Values.MIN_FRESH; - /** - * {@code "mode"} - */ - public static final String MODE = "mode"; - /** - * {@code "multicast"} - */ - public static final String MULTICAST = "multicast"; - /** - * {@code "must-revalidate"} - */ - public static final String MUST_REVALIDATE = HttpHeaders.Values.MUST_REVALIDATE; - /** - * {@code "none"} - */ - public static final String NONE = HttpHeaders.Values.NONE; - /** - * {@code "no-cache"} - */ - public static final String NO_CACHE = HttpHeaders.Values.NO_CACHE; - /** - * {@code "no-transform"} - */ - public static final String NO_TRANSFORM = HttpHeaders.Values.NO_TRANSFORM; - /** - * {@code "only-if-cached"} - */ - public static final String ONLY_IF_CACHED = HttpHeaders.Values.ONLY_IF_CACHED; - /** - * {@code "port"} - */ - public static final String PORT = "port"; - /** - * {@code "private"} - */ - public static final String PRIVATE = HttpHeaders.Values.PRIVATE; - /** - * {@code "proxy-revalidate"} - */ - public static final String PROXY_REVALIDATE = HttpHeaders.Values.PROXY_REVALIDATE; - /** - * {@code "public"} - */ - public static final String PUBLIC = HttpHeaders.Values.PUBLIC; - /** - * {@code "RTP"} - */ - public static final String RTP = "RTP"; - /** - * {@code "rtptime"} - */ - public static final String RTPTIME = "rtptime"; - /** - * {@code "seq"} - */ - public static final String SEQ = "seq"; - /** - * {@code "server_port"} - */ - public static final String SERVER_PORT = "server_port"; - /** - * {@code "ssrc"} - */ - public static final String SSRC = "ssrc"; - /** - * {@code "TCP"} - */ - public static final String TCP = "TCP"; - /** - * {@code "time"} - */ - public static final String TIME = "time"; - /** - * {@code "timeout"} - */ - public static final String TIMEOUT = "timeout"; - /** - * {@code "ttl"} - */ - public static final String TTL = "ttl"; - /** - * {@code "UDP"} - */ - public static final String UDP = "UDP"; - /** - * {@code "unicast"} - */ - public static final String UNICAST = "unicast"; - /** - * {@code "url"} - */ - public static final String URL = "url"; - - private Values() { } - } - - private RtspHeaders() { } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspMethods.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspMethods.java index 3e629280124..929eaa1ce6b 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspMethods.java +++ b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspMethods.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.rtsp; +import static io.netty.util.internal.ObjectUtil.checkNonEmptyAfterTrim; + import io.netty.handler.codec.http.HttpMethod; import java.util.HashMap; @@ -38,64 +40,64 @@ public final class RtspMethods { * The DESCRIBE getMethod retrieves the description of a presentation or * media object identified by the request URL from a server. */ - public static final HttpMethod DESCRIBE = new HttpMethod("DESCRIBE"); + public static final HttpMethod DESCRIBE = HttpMethod.valueOf("DESCRIBE"); /** * The ANNOUNCE posts the description of a presentation or media object * identified by the request URL to a server, or updates the client-side * session description in real-time. */ - public static final HttpMethod ANNOUNCE = new HttpMethod("ANNOUNCE"); + public static final HttpMethod ANNOUNCE = HttpMethod.valueOf("ANNOUNCE"); /** * The SETUP request for a URI specifies the transport mechanism to be * used for the streamed media. */ - public static final HttpMethod SETUP = new HttpMethod("SETUP"); + public static final HttpMethod SETUP = HttpMethod.valueOf("SETUP"); /** * The PLAY getMethod tells the server to start sending data via the * mechanism specified in SETUP. */ - public static final HttpMethod PLAY = new HttpMethod("PLAY"); + public static final HttpMethod PLAY = HttpMethod.valueOf("PLAY"); /** * The PAUSE request causes the stream delivery to be interrupted * (halted) temporarily. */ - public static final HttpMethod PAUSE = new HttpMethod("PAUSE"); + public static final HttpMethod PAUSE = HttpMethod.valueOf("PAUSE"); /** * The TEARDOWN request stops the stream delivery for the given URI, * freeing the resources associated with it. */ - public static final HttpMethod TEARDOWN = new HttpMethod("TEARDOWN"); + public static final HttpMethod TEARDOWN = HttpMethod.valueOf("TEARDOWN"); /** * The GET_PARAMETER request retrieves the value of a parameter of a * presentation or stream specified in the URI. */ - public static final HttpMethod GET_PARAMETER = new HttpMethod("GET_PARAMETER"); + public static final HttpMethod GET_PARAMETER = HttpMethod.valueOf("GET_PARAMETER"); /** * The SET_PARAMETER requests to set the value of a parameter for a * presentation or stream specified by the URI. */ - public static final HttpMethod SET_PARAMETER = new HttpMethod("SET_PARAMETER"); + public static final HttpMethod SET_PARAMETER = HttpMethod.valueOf("SET_PARAMETER"); /** * The REDIRECT request informs the client that it must connect to another * server location. */ - public static final HttpMethod REDIRECT = new HttpMethod("REDIRECT"); + public static final HttpMethod REDIRECT = HttpMethod.valueOf("REDIRECT"); /** * The RECORD getMethod initiates recording a range of media data according to * the presentation description. */ - public static final HttpMethod RECORD = new HttpMethod("RECORD"); + public static final HttpMethod RECORD = HttpMethod.valueOf("RECORD"); - private static final Map methodMap = new HashMap(); + private static final Map methodMap = new HashMap<>(); static { methodMap.put(DESCRIBE.toString(), DESCRIBE); @@ -117,20 +119,12 @@ public final class RtspMethods { * will be returned. Otherwise, a new instance will be returned. */ public static HttpMethod valueOf(String name) { - if (name == null) { - throw new NullPointerException("name"); - } - - name = name.trim().toUpperCase(); - if (name.isEmpty()) { - throw new IllegalArgumentException("empty name"); - } - + name = checkNonEmptyAfterTrim(name, "name").toUpperCase(); HttpMethod result = methodMap.get(name); if (result != null) { return result; } else { - return new HttpMethod(name); + return HttpMethod.valueOf(name); } } diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspObjectDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspObjectDecoder.java deleted file mode 100644 index e52c0ce51e5..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspObjectDecoder.java +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.rtsp; - -import io.netty.buffer.ByteBuf; -import io.netty.handler.codec.TooLongFrameException; -import io.netty.handler.codec.http.HttpMessage; -import io.netty.handler.codec.http.HttpObjectDecoder; - -/** - * Decodes {@link ByteBuf}s into RTSP messages represented in - * {@link HttpMessage}s. - *

    - *

    Parameters that prevents excessive memory consumption

    - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
    NameMeaning
    {@code maxInitialLineLength}The maximum length of the initial line - * (e.g. {@code "SETUP / RTSP/1.0"} or {@code "RTSP/1.0 200 OK"}) - * If the length of the initial line exceeds this value, a - * {@link TooLongFrameException} will be raised.
    {@code maxHeaderSize}The maximum length of all headers. If the sum of the length of each - * header exceeds this value, a {@link TooLongFrameException} will be raised.
    {@code maxContentLength}The maximum length of the content. If the content length exceeds this - * value, a {@link TooLongFrameException} will be raised.
    - * - * @deprecated Use {@link RtspDecoder} instead. - */ -@Deprecated -public abstract class RtspObjectDecoder extends HttpObjectDecoder { - - /** - * Creates a new instance with the default - * {@code maxInitialLineLength (4096)}, {@code maxHeaderSize (8192)}, and - * {@code maxContentLength (8192)}. - */ - protected RtspObjectDecoder() { - this(4096, 8192, 8192); - } - - /** - * Creates a new instance with the specified parameters. - */ - protected RtspObjectDecoder(int maxInitialLineLength, int maxHeaderSize, int maxContentLength) { - super(maxInitialLineLength, maxHeaderSize, maxContentLength * 2, false); - } - - protected RtspObjectDecoder( - int maxInitialLineLength, int maxHeaderSize, int maxContentLength, boolean validateHeaders) { - super(maxInitialLineLength, maxHeaderSize, maxContentLength * 2, false, validateHeaders); - } - - @Override - protected boolean isContentAlwaysEmpty(HttpMessage msg) { - // Unlike HTTP, RTSP always assumes zero-length body if Content-Length - // header is absent. - boolean empty = super.isContentAlwaysEmpty(msg); - if (empty) { - return true; - } - if (!msg.headers().contains(RtspHeaderNames.CONTENT_LENGTH)) { - return true; - } - return empty; - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspObjectEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspObjectEncoder.java deleted file mode 100644 index 030bde66b8f..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspObjectEncoder.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.rtsp; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandler.Sharable; -import io.netty.handler.codec.http.FullHttpMessage; -import io.netty.handler.codec.http.HttpMessage; -import io.netty.handler.codec.http.HttpObjectEncoder; - -/** - * Encodes an RTSP message represented in {@link FullHttpMessage} into - * a {@link ByteBuf}. - * - * @deprecated Use {@link RtspEncoder} instead. - */ -@Sharable -@Deprecated -public abstract class RtspObjectEncoder extends HttpObjectEncoder { - - /** - * Creates a new instance. - */ - protected RtspObjectEncoder() { - } - - @Override - public boolean acceptOutboundMessage(Object msg) throws Exception { - return msg instanceof FullHttpMessage; - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspRequestDecoder.java deleted file mode 100644 index 8d22e6d9aa0..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspRequestDecoder.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.rtsp; - -/** - * @deprecated Use {@link RtspDecoder} directly instead - */ -@Deprecated -public class RtspRequestDecoder extends RtspDecoder { -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspRequestEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspRequestEncoder.java deleted file mode 100644 index d37336d2ac0..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspRequestEncoder.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.rtsp; - -/** - * @deprecated Use {@link RtspEncoder} directly instead - */ -@Deprecated -public class RtspRequestEncoder extends RtspEncoder { -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseDecoder.java deleted file mode 100644 index 9073d86d2cd..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseDecoder.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.rtsp; - -/** - * @deprecated Use {@link RtspDecoder} directly instead - */ -@Deprecated -public class RtspResponseDecoder extends RtspDecoder { -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseEncoder.java deleted file mode 100644 index 1ffa74567fc..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseEncoder.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.rtsp; - -/** - * @deprecated Use {@link RtspEncoder} directly instead - */ -@Deprecated -public class RtspResponseEncoder extends RtspEncoder { -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseStatuses.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseStatuses.java index b428d996e01..8a2ad158a59 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseStatuses.java +++ b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspResponseStatuses.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspVersions.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspVersions.java index 94d03686547..d8f13c16b74 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspVersions.java +++ b/codec-http/src/main/java/io/netty/handler/codec/rtsp/RtspVersions.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.rtsp; +import static java.util.Objects.requireNonNull; + import io.netty.handler.codec.http.HttpVersion; /** @@ -34,9 +36,7 @@ public final class RtspVersions { * Otherwise, a new {@link HttpVersion} instance will be returned. */ public static HttpVersion valueOf(String text) { - if (text == null) { - throw new NullPointerException("text"); - } + requireNonNull(text, "text"); text = text.trim().toUpperCase(); if ("RTSP/1.0".equals(text)) { diff --git a/codec-http/src/main/java/io/netty/handler/codec/rtsp/package-info.java b/codec-http/src/main/java/io/netty/handler/codec/rtsp/package-info.java index b26411b2e5a..5af8ae0882d 100644 --- a/codec-http/src/main/java/io/netty/handler/codec/rtsp/package-info.java +++ b/codec-http/src/main/java/io/netty/handler/codec/rtsp/package-info.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,7 @@ */ /** - * An RTSP + * An RTSP * extension based on the HTTP codec. */ package io.netty.handler.codec.rtsp; diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyDataFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyDataFrame.java deleted file mode 100644 index a1e9d73d1f0..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyDataFrame.java +++ /dev/null @@ -1,161 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.util.IllegalReferenceCountException; -import io.netty.util.internal.StringUtil; - -/** - * The default {@link SpdyDataFrame} implementation. - */ -public class DefaultSpdyDataFrame extends DefaultSpdyStreamFrame implements SpdyDataFrame { - - private final ByteBuf data; - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - */ - public DefaultSpdyDataFrame(int streamId) { - this(streamId, Unpooled.buffer(0)); - } - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - * @param data the payload of the frame. Can not exceed {@link SpdyCodecUtil#SPDY_MAX_LENGTH} - */ - public DefaultSpdyDataFrame(int streamId, ByteBuf data) { - super(streamId); - if (data == null) { - throw new NullPointerException("data"); - } - this.data = validate(data); - } - - private static ByteBuf validate(ByteBuf data) { - if (data.readableBytes() > SpdyCodecUtil.SPDY_MAX_LENGTH) { - throw new IllegalArgumentException("data payload cannot exceed " - + SpdyCodecUtil.SPDY_MAX_LENGTH + " bytes"); - } - return data; - } - - @Override - public SpdyDataFrame setStreamId(int streamId) { - super.setStreamId(streamId); - return this; - } - - @Override - public SpdyDataFrame setLast(boolean last) { - super.setLast(last); - return this; - } - - @Override - public ByteBuf content() { - if (data.refCnt() <= 0) { - throw new IllegalReferenceCountException(data.refCnt()); - } - return data; - } - - @Override - public SpdyDataFrame copy() { - return replace(content().copy()); - } - - @Override - public SpdyDataFrame duplicate() { - return replace(content().duplicate()); - } - - @Override - public SpdyDataFrame retainedDuplicate() { - return replace(content().retainedDuplicate()); - } - - @Override - public SpdyDataFrame replace(ByteBuf content) { - SpdyDataFrame frame = new DefaultSpdyDataFrame(streamId(), content); - frame.setLast(isLast()); - return frame; - } - - @Override - public int refCnt() { - return data.refCnt(); - } - - @Override - public SpdyDataFrame retain() { - data.retain(); - return this; - } - - @Override - public SpdyDataFrame retain(int increment) { - data.retain(increment); - return this; - } - - @Override - public SpdyDataFrame touch() { - data.touch(); - return this; - } - - @Override - public SpdyDataFrame touch(Object hint) { - data.touch(hint); - return this; - } - - @Override - public boolean release() { - return data.release(); - } - - @Override - public boolean release(int decrement) { - return data.release(decrement); - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder() - .append(StringUtil.simpleClassName(this)) - .append("(last: ") - .append(isLast()) - .append(')') - .append(StringUtil.NEWLINE) - .append("--> Stream-ID = ") - .append(streamId()) - .append(StringUtil.NEWLINE) - .append("--> Size = "); - if (refCnt() == 0) { - buf.append("(freed)"); - } else { - buf.append(content().readableBytes()); - } - return buf.toString(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyGoAwayFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyGoAwayFrame.java deleted file mode 100644 index 4d88875a6e8..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyGoAwayFrame.java +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.util.internal.StringUtil; - -/** - * The default {@link SpdyGoAwayFrame} implementation. - */ -public class DefaultSpdyGoAwayFrame implements SpdyGoAwayFrame { - - private int lastGoodStreamId; - private SpdySessionStatus status; - - /** - * Creates a new instance. - * - * @param lastGoodStreamId the Last-good-stream-ID of this frame - */ - public DefaultSpdyGoAwayFrame(int lastGoodStreamId) { - this(lastGoodStreamId, 0); - } - - /** - * Creates a new instance. - * - * @param lastGoodStreamId the Last-good-stream-ID of this frame - * @param statusCode the Status code of this frame - */ - public DefaultSpdyGoAwayFrame(int lastGoodStreamId, int statusCode) { - this(lastGoodStreamId, SpdySessionStatus.valueOf(statusCode)); - } - - /** - * Creates a new instance. - * - * @param lastGoodStreamId the Last-good-stream-ID of this frame - * @param status the status of this frame - */ - public DefaultSpdyGoAwayFrame(int lastGoodStreamId, SpdySessionStatus status) { - setLastGoodStreamId(lastGoodStreamId); - setStatus(status); - } - - @Override - public int lastGoodStreamId() { - return lastGoodStreamId; - } - - @Override - public SpdyGoAwayFrame setLastGoodStreamId(int lastGoodStreamId) { - if (lastGoodStreamId < 0) { - throw new IllegalArgumentException("Last-good-stream-ID" - + " cannot be negative: " + lastGoodStreamId); - } - this.lastGoodStreamId = lastGoodStreamId; - return this; - } - - @Override - public SpdySessionStatus status() { - return status; - } - - @Override - public SpdyGoAwayFrame setStatus(SpdySessionStatus status) { - this.status = status; - return this; - } - - @Override - public String toString() { - return new StringBuilder() - .append(StringUtil.simpleClassName(this)) - .append(StringUtil.NEWLINE) - .append("--> Last-good-stream-ID = ") - .append(lastGoodStreamId()) - .append(StringUtil.NEWLINE) - .append("--> Status: ") - .append(status()) - .toString(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyHeaders.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyHeaders.java deleted file mode 100644 index 7eed710fa66..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyHeaders.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.handler.codec.CharSequenceValueConverter; -import io.netty.handler.codec.DefaultHeaders; -import io.netty.handler.codec.HeadersUtils; - -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; - -import static io.netty.util.AsciiString.CASE_INSENSITIVE_HASHER; -import static io.netty.util.AsciiString.CASE_SENSITIVE_HASHER; - -public class DefaultSpdyHeaders extends DefaultHeaders implements SpdyHeaders { - private static final NameValidator SpdyNameValidator = new NameValidator() { - @Override - public void validateName(CharSequence name) { - SpdyCodecUtil.validateHeaderName(name); - } - }; - - public DefaultSpdyHeaders() { - this(true); - } - - @SuppressWarnings("unchecked") - public DefaultSpdyHeaders(boolean validate) { - super(CASE_INSENSITIVE_HASHER, - validate ? HeaderValueConverterAndValidator.INSTANCE : CharSequenceValueConverter.INSTANCE, - validate ? SpdyNameValidator : NameValidator.NOT_NULL); - } - - @Override - public String getAsString(CharSequence name) { - return HeadersUtils.getAsString(this, name); - } - - @Override - public List getAllAsString(CharSequence name) { - return HeadersUtils.getAllAsString(this, name); - } - - @Override - public Iterator> iteratorAsString() { - return HeadersUtils.iteratorAsString(this); - } - - @Override - public boolean contains(CharSequence name, CharSequence value) { - return contains(name, value, false); - } - - @Override - public boolean contains(CharSequence name, CharSequence value, boolean ignoreCase) { - return contains(name, value, - ignoreCase ? CASE_INSENSITIVE_HASHER : CASE_SENSITIVE_HASHER); - } - - private static final class HeaderValueConverterAndValidator extends CharSequenceValueConverter { - public static final HeaderValueConverterAndValidator INSTANCE = new HeaderValueConverterAndValidator(); - - @Override - public CharSequence convertObject(Object value) { - final CharSequence seq = super.convertObject(value); - SpdyCodecUtil.validateHeaderValue(seq); - return seq; - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyHeadersFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyHeadersFrame.java deleted file mode 100644 index f177144393b..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyHeadersFrame.java +++ /dev/null @@ -1,120 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.util.internal.StringUtil; - -import java.util.Map; - -/** - * The default {@link SpdyHeadersFrame} implementation. - */ -public class DefaultSpdyHeadersFrame extends DefaultSpdyStreamFrame - implements SpdyHeadersFrame { - - private boolean invalid; - private boolean truncated; - private final SpdyHeaders headers; - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - */ - public DefaultSpdyHeadersFrame(int streamId) { - this(streamId, true); - } - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - * @param validate validate the header names and values when adding them to the {@link SpdyHeaders} - */ - public DefaultSpdyHeadersFrame(int streamId, boolean validate) { - super(streamId); - headers = new DefaultSpdyHeaders(validate); - } - - @Override - public SpdyHeadersFrame setStreamId(int streamId) { - super.setStreamId(streamId); - return this; - } - - @Override - public SpdyHeadersFrame setLast(boolean last) { - super.setLast(last); - return this; - } - - @Override - public boolean isInvalid() { - return invalid; - } - - @Override - public SpdyHeadersFrame setInvalid() { - invalid = true; - return this; - } - - @Override - public boolean isTruncated() { - return truncated; - } - - @Override - public SpdyHeadersFrame setTruncated() { - truncated = true; - return this; - } - - @Override - public SpdyHeaders headers() { - return headers; - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder() - .append(StringUtil.simpleClassName(this)) - .append("(last: ") - .append(isLast()) - .append(')') - .append(StringUtil.NEWLINE) - .append("--> Stream-ID = ") - .append(streamId()) - .append(StringUtil.NEWLINE) - .append("--> Headers:") - .append(StringUtil.NEWLINE); - appendHeaders(buf); - - // Remove the last newline. - buf.setLength(buf.length() - StringUtil.NEWLINE.length()); - return buf.toString(); - } - - protected void appendHeaders(StringBuilder buf) { - for (Map.Entry e: headers()) { - buf.append(" "); - buf.append(e.getKey()); - buf.append(": "); - buf.append(e.getValue()); - buf.append(StringUtil.NEWLINE); - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyPingFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyPingFrame.java deleted file mode 100644 index 37c8a413981..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyPingFrame.java +++ /dev/null @@ -1,56 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.util.internal.StringUtil; - -/** - * The default {@link SpdyPingFrame} implementation. - */ -public class DefaultSpdyPingFrame implements SpdyPingFrame { - - private int id; - - /** - * Creates a new instance. - * - * @param id the unique ID of this frame - */ - public DefaultSpdyPingFrame(int id) { - setId(id); - } - - @Override - public int id() { - return id; - } - - @Override - public SpdyPingFrame setId(int id) { - this.id = id; - return this; - } - - @Override - public String toString() { - return new StringBuilder() - .append(StringUtil.simpleClassName(this)) - .append(StringUtil.NEWLINE) - .append("--> ID = ") - .append(id()) - .toString(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyRstStreamFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyRstStreamFrame.java deleted file mode 100644 index a884295bdf7..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyRstStreamFrame.java +++ /dev/null @@ -1,84 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.util.internal.StringUtil; - -/** - * The default {@link SpdyRstStreamFrame} implementation. - */ -public class DefaultSpdyRstStreamFrame extends DefaultSpdyStreamFrame - implements SpdyRstStreamFrame { - - private SpdyStreamStatus status; - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - * @param statusCode the Status code of this frame - */ - public DefaultSpdyRstStreamFrame(int streamId, int statusCode) { - this(streamId, SpdyStreamStatus.valueOf(statusCode)); - } - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - * @param status the status of this frame - */ - public DefaultSpdyRstStreamFrame(int streamId, SpdyStreamStatus status) { - super(streamId); - setStatus(status); - } - - @Override - public SpdyRstStreamFrame setStreamId(int streamId) { - super.setStreamId(streamId); - return this; - } - - @Override - public SpdyRstStreamFrame setLast(boolean last) { - super.setLast(last); - return this; - } - - @Override - public SpdyStreamStatus status() { - return status; - } - - @Override - public SpdyRstStreamFrame setStatus(SpdyStreamStatus status) { - this.status = status; - return this; - } - - @Override - public String toString() { - return new StringBuilder() - .append(StringUtil.simpleClassName(this)) - .append(StringUtil.NEWLINE) - .append("--> Stream-ID = ") - .append(streamId()) - .append(StringUtil.NEWLINE) - .append("--> Status: ") - .append(status()) - .toString(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdySettingsFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdySettingsFrame.java deleted file mode 100644 index 5097048601b..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdySettingsFrame.java +++ /dev/null @@ -1,184 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.util.internal.StringUtil; - -import java.util.Map; -import java.util.Set; -import java.util.TreeMap; - -/** - * The default {@link SpdySettingsFrame} implementation. - */ -public class DefaultSpdySettingsFrame implements SpdySettingsFrame { - - private boolean clear; - private final Map settingsMap = new TreeMap(); - - @Override - public Set ids() { - return settingsMap.keySet(); - } - - @Override - public boolean isSet(int id) { - return settingsMap.containsKey(id); - } - - @Override - public int getValue(int id) { - final Setting setting = settingsMap.get(id); - return setting != null ? setting.getValue() : -1; - } - - @Override - public SpdySettingsFrame setValue(int id, int value) { - return setValue(id, value, false, false); - } - - @Override - public SpdySettingsFrame setValue(int id, int value, boolean persistValue, boolean persisted) { - if (id < 0 || id > SpdyCodecUtil.SPDY_SETTINGS_MAX_ID) { - throw new IllegalArgumentException("Setting ID is not valid: " + id); - } - final Integer key = Integer.valueOf(id); - final Setting setting = settingsMap.get(key); - if (setting != null) { - setting.setValue(value); - setting.setPersist(persistValue); - setting.setPersisted(persisted); - } else { - settingsMap.put(key, new Setting(value, persistValue, persisted)); - } - return this; - } - - @Override - public SpdySettingsFrame removeValue(int id) { - settingsMap.remove(id); - return this; - } - - @Override - public boolean isPersistValue(int id) { - final Setting setting = settingsMap.get(id); - return setting != null && setting.isPersist(); - } - - @Override - public SpdySettingsFrame setPersistValue(int id, boolean persistValue) { - final Setting setting = settingsMap.get(id); - if (setting != null) { - setting.setPersist(persistValue); - } - return this; - } - - @Override - public boolean isPersisted(int id) { - final Setting setting = settingsMap.get(id); - return setting != null && setting.isPersisted(); - } - - @Override - public SpdySettingsFrame setPersisted(int id, boolean persisted) { - final Setting setting = settingsMap.get(id); - if (setting != null) { - setting.setPersisted(persisted); - } - return this; - } - - @Override - public boolean clearPreviouslyPersistedSettings() { - return clear; - } - - @Override - public SpdySettingsFrame setClearPreviouslyPersistedSettings(boolean clear) { - this.clear = clear; - return this; - } - - private Set> getSettings() { - return settingsMap.entrySet(); - } - - private void appendSettings(StringBuilder buf) { - for (Map.Entry e: getSettings()) { - Setting setting = e.getValue(); - buf.append("--> "); - buf.append(e.getKey()); - buf.append(':'); - buf.append(setting.getValue()); - buf.append(" (persist value: "); - buf.append(setting.isPersist()); - buf.append("; persisted: "); - buf.append(setting.isPersisted()); - buf.append(')'); - buf.append(StringUtil.NEWLINE); - } - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder() - .append(StringUtil.simpleClassName(this)) - .append(StringUtil.NEWLINE); - appendSettings(buf); - - buf.setLength(buf.length() - StringUtil.NEWLINE.length()); - return buf.toString(); - } - - private static final class Setting { - - private int value; - private boolean persist; - private boolean persisted; - - Setting(int value, boolean persist, boolean persisted) { - this.value = value; - this.persist = persist; - this.persisted = persisted; - } - - int getValue() { - return value; - } - - void setValue(int value) { - this.value = value; - } - - boolean isPersist() { - return persist; - } - - void setPersist(boolean persist) { - this.persist = persist; - } - - boolean isPersisted() { - return persisted; - } - - void setPersisted(boolean persisted) { - this.persisted = persisted; - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyStreamFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyStreamFrame.java deleted file mode 100644 index 4618d4d4a95..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyStreamFrame.java +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * The default {@link SpdyStreamFrame} implementation. - */ -public abstract class DefaultSpdyStreamFrame implements SpdyStreamFrame { - - private int streamId; - private boolean last; - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - */ - protected DefaultSpdyStreamFrame(int streamId) { - setStreamId(streamId); - } - - @Override - public int streamId() { - return streamId; - } - - @Override - public SpdyStreamFrame setStreamId(int streamId) { - if (streamId <= 0) { - throw new IllegalArgumentException( - "Stream-ID must be positive: " + streamId); - } - this.streamId = streamId; - return this; - } - - @Override - public boolean isLast() { - return last; - } - - @Override - public SpdyStreamFrame setLast(boolean last) { - this.last = last; - return this; - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdySynReplyFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdySynReplyFrame.java deleted file mode 100644 index 7efc905641e..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdySynReplyFrame.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.util.internal.StringUtil; - -/** - * The default {@link SpdySynReplyFrame} implementation. - */ -public class DefaultSpdySynReplyFrame extends DefaultSpdyHeadersFrame - implements SpdySynReplyFrame { - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - */ - public DefaultSpdySynReplyFrame(int streamId) { - super(streamId); - } - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - * @param validateHeaders validate the header names and values when adding them to the {@link SpdyHeaders} - */ - public DefaultSpdySynReplyFrame(int streamId, boolean validateHeaders) { - super(streamId, validateHeaders); - } - - @Override - public SpdySynReplyFrame setStreamId(int streamId) { - super.setStreamId(streamId); - return this; - } - - @Override - public SpdySynReplyFrame setLast(boolean last) { - super.setLast(last); - return this; - } - - @Override - public SpdySynReplyFrame setInvalid() { - super.setInvalid(); - return this; - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder() - .append(StringUtil.simpleClassName(this)) - .append("(last: ") - .append(isLast()) - .append(')') - .append(StringUtil.NEWLINE) - .append("--> Stream-ID = ") - .append(streamId()) - .append(StringUtil.NEWLINE) - .append("--> Headers:") - .append(StringUtil.NEWLINE); - appendHeaders(buf); - - // Remove the last newline. - buf.setLength(buf.length() - StringUtil.NEWLINE.length()); - return buf.toString(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdySynStreamFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdySynStreamFrame.java deleted file mode 100644 index f8adc1c5f1c..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdySynStreamFrame.java +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.util.internal.StringUtil; - -/** - * The default {@link SpdySynStreamFrame} implementation. - */ -public class DefaultSpdySynStreamFrame extends DefaultSpdyHeadersFrame - implements SpdySynStreamFrame { - - private int associatedStreamId; - private byte priority; - private boolean unidirectional; - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - * @param associatedStreamId the Associated-To-Stream-ID of this frame - * @param priority the priority of the stream - */ - public DefaultSpdySynStreamFrame(int streamId, int associatedStreamId, byte priority) { - this(streamId, associatedStreamId, priority, true); - } - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - * @param associatedStreamId the Associated-To-Stream-ID of this frame - * @param priority the priority of the stream - * @param validateHeaders validate the header names and values when adding them to the {@link SpdyHeaders} - */ - public DefaultSpdySynStreamFrame(int streamId, int associatedStreamId, byte priority, boolean validateHeaders) { - super(streamId, validateHeaders); - setAssociatedStreamId(associatedStreamId); - setPriority(priority); - } - - @Override - public SpdySynStreamFrame setStreamId(int streamId) { - super.setStreamId(streamId); - return this; - } - - @Override - public SpdySynStreamFrame setLast(boolean last) { - super.setLast(last); - return this; - } - - @Override - public SpdySynStreamFrame setInvalid() { - super.setInvalid(); - return this; - } - - @Override - public int associatedStreamId() { - return associatedStreamId; - } - - @Override - public SpdySynStreamFrame setAssociatedStreamId(int associatedStreamId) { - if (associatedStreamId < 0) { - throw new IllegalArgumentException( - "Associated-To-Stream-ID cannot be negative: " + - associatedStreamId); - } - this.associatedStreamId = associatedStreamId; - return this; - } - - @Override - public byte priority() { - return priority; - } - - @Override - public SpdySynStreamFrame setPriority(byte priority) { - if (priority < 0 || priority > 7) { - throw new IllegalArgumentException( - "Priority must be between 0 and 7 inclusive: " + priority); - } - this.priority = priority; - return this; - } - - @Override - public boolean isUnidirectional() { - return unidirectional; - } - - @Override - public SpdySynStreamFrame setUnidirectional(boolean unidirectional) { - this.unidirectional = unidirectional; - return this; - } - - @Override - public String toString() { - StringBuilder buf = new StringBuilder() - .append(StringUtil.simpleClassName(this)) - .append("(last: ") - .append(isLast()) - .append("; unidirectional: ") - .append(isUnidirectional()) - .append(')') - .append(StringUtil.NEWLINE) - .append("--> Stream-ID = ") - .append(streamId()) - .append(StringUtil.NEWLINE); - if (associatedStreamId != 0) { - buf.append("--> Associated-To-Stream-ID = ") - .append(associatedStreamId()) - .append(StringUtil.NEWLINE); - } - buf.append("--> Priority = ") - .append(priority()) - .append(StringUtil.NEWLINE) - .append("--> Headers:") - .append(StringUtil.NEWLINE); - appendHeaders(buf); - - // Remove the last newline. - buf.setLength(buf.length() - StringUtil.NEWLINE.length()); - return buf.toString(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyWindowUpdateFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyWindowUpdateFrame.java deleted file mode 100644 index f14611bac61..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/DefaultSpdyWindowUpdateFrame.java +++ /dev/null @@ -1,82 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.util.internal.StringUtil; - -/** - * The default {@link SpdyWindowUpdateFrame} implementation. - */ -public class DefaultSpdyWindowUpdateFrame implements SpdyWindowUpdateFrame { - - private int streamId; - private int deltaWindowSize; - - /** - * Creates a new instance. - * - * @param streamId the Stream-ID of this frame - * @param deltaWindowSize the Delta-Window-Size of this frame - */ - public DefaultSpdyWindowUpdateFrame(int streamId, int deltaWindowSize) { - setStreamId(streamId); - setDeltaWindowSize(deltaWindowSize); - } - - @Override - public int streamId() { - return streamId; - } - - @Override - public SpdyWindowUpdateFrame setStreamId(int streamId) { - if (streamId < 0) { - throw new IllegalArgumentException( - "Stream-ID cannot be negative: " + streamId); - } - this.streamId = streamId; - return this; - } - - @Override - public int deltaWindowSize() { - return deltaWindowSize; - } - - @Override - public SpdyWindowUpdateFrame setDeltaWindowSize(int deltaWindowSize) { - if (deltaWindowSize <= 0) { - throw new IllegalArgumentException( - "Delta-Window-Size must be positive: " + - deltaWindowSize); - } - this.deltaWindowSize = deltaWindowSize; - return this; - } - - @Override - public String toString() { - return new StringBuilder() - .append(StringUtil.simpleClassName(this)) - .append(StringUtil.NEWLINE) - .append("--> Stream-ID = ") - .append(streamId()) - .append(StringUtil.NEWLINE) - .append("--> Delta-Window-Size = ") - .append(deltaWindowSize()) - .toString(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyCodecUtil.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyCodecUtil.java deleted file mode 100644 index b25167c3a9f..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyCodecUtil.java +++ /dev/null @@ -1,333 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; - -final class SpdyCodecUtil { - - static final int SPDY_SESSION_STREAM_ID = 0; - - static final int SPDY_HEADER_TYPE_OFFSET = 2; - static final int SPDY_HEADER_FLAGS_OFFSET = 4; - static final int SPDY_HEADER_LENGTH_OFFSET = 5; - static final int SPDY_HEADER_SIZE = 8; - - static final int SPDY_MAX_LENGTH = 0xFFFFFF; // Length is a 24-bit field - - static final byte SPDY_DATA_FLAG_FIN = 0x01; - - static final int SPDY_DATA_FRAME = 0; - static final int SPDY_SYN_STREAM_FRAME = 1; - static final int SPDY_SYN_REPLY_FRAME = 2; - static final int SPDY_RST_STREAM_FRAME = 3; - static final int SPDY_SETTINGS_FRAME = 4; - static final int SPDY_PUSH_PROMISE_FRAME = 5; - static final int SPDY_PING_FRAME = 6; - static final int SPDY_GOAWAY_FRAME = 7; - static final int SPDY_HEADERS_FRAME = 8; - static final int SPDY_WINDOW_UPDATE_FRAME = 9; - - static final byte SPDY_FLAG_FIN = 0x01; - static final byte SPDY_FLAG_UNIDIRECTIONAL = 0x02; - - static final byte SPDY_SETTINGS_CLEAR = 0x01; - static final byte SPDY_SETTINGS_PERSIST_VALUE = 0x01; - static final byte SPDY_SETTINGS_PERSISTED = 0x02; - - static final int SPDY_SETTINGS_MAX_ID = 0xFFFFFF; // ID is a 24-bit field - - static final int SPDY_MAX_NV_LENGTH = 0xFFFF; // Length is a 16-bit field - - // Zlib Dictionary - static final byte[] SPDY_DICT = { - 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, // - - - - o p t i - 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, // o n s - - - - h - 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, // e a d - - - - p - 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, // o s t - - - - p - 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, // u t - - - - d e - 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, // l e t e - - - - - 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, // t r a c e - - - - 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, // - a c c e p t - - 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, // - - - a c c e p - 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, // t - c h a r s e - 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, // t - - - - a c c - 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, // e p t - e n c o - 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, // d i n g - - - - - 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, // a c c e p t - l - 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, // a n g u a g e - - 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, // - - - a c c e p - 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, // t - r a n g e s - 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, // - - - - a g e - - 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, // - - - a l l o w - 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, // - - - - a u t h - 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, // o r i z a t i o - 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, // n - - - - c a c - 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, // h e - c o n t r - 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, // o l - - - - c o - 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, // n n e c t i o n - 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, // - - - - c o n t - 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, // e n t - b a s e - 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, // - - - - c o n t - 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, // e n t - e n c o - 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, // d i n g - - - - - 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, // c o n t e n t - - 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, // l a n g u a g e - 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, // - - - - c o n t - 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, // e n t - l e n g - 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, // t h - - - - c o - 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, // n t e n t - l o - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, // c a t i o n - - - 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, // - - c o n t e n - 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, // t - m d 5 - - - - 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, // - c o n t e n t - 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, // - r a n g e - - - 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, // - - c o n t e n - 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, // t - t y p e - - - 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, // - - d a t e - - - 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, // - - e t a g - - - 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, // - - e x p e c t - 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, // - - - - e x p i - 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, // r e s - - - - f - 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, // r o m - - - - h - 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, // o s t - - - - i - 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, // f - m a t c h - - 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, // - - - i f - m o - 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, // d i f i e d - s - 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, // i n c e - - - - - 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, // i f - n o n e - - 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, // m a t c h - - - - 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, // - i f - r a n g - 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, // e - - - - i f - - 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, // u n m o d i f i - 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, // e d - s i n c e - 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, // - - - - l a s t - 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, // - m o d i f i e - 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, // d - - - - l o c - 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, // a t i o n - - - - 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, // - m a x - f o r - 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, // w a r d s - - - - 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, // - p r a g m a - - 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, // - - - p r o x y - 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, // - a u t h e n t - 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, // i c a t e - - - - 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, // - p r o x y - a - 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, // u t h o r i z a - 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, // t i o n - - - - - 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, // r a n g e - - - - 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, // - r e f e r e r - 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, // - - - - r e t r - 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, // y - a f t e r - - 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, // - - - s e r v e - 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, // r - - - - t e - - 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, // - - - t r a i l - 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, // e r - - - - t r - 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, // a n s f e r - e - 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, // n c o d i n g - - 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, // - - - u p g r a - 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, // d e - - - - u s - 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, // e r - a g e n t - 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, // - - - - v a r y - 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, // - - - - v i a - - 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, // - - - w a r n i - 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, // n g - - - - w w - 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, // w - a u t h e n - 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, // t i c a t e - - - 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, // - - m e t h o d - 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, // - - - - g e t - - 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, // - - - s t a t u - 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, // s - - - - 2 0 0 - 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, // - O K - - - - v - 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, // e r s i o n - - - 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, // - - H T T P - 1 - 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, // - 1 - - - - u r - 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, // l - - - - p u b - 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, // l i c - - - - s - 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, // e t - c o o k i - 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, // e - - - - k e e - 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, // p - a l i v e - - 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, // - - - o r i g i - 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, // n 1 0 0 1 0 1 2 - 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, // 0 1 2 0 2 2 0 5 - 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, // 2 0 6 3 0 0 3 0 - 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, // 2 3 0 3 3 0 4 3 - 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, // 0 5 3 0 6 3 0 7 - 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, // 4 0 2 4 0 5 4 0 - 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, // 6 4 0 7 4 0 8 4 - 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, // 0 9 4 1 0 4 1 1 - 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, // 4 1 2 4 1 3 4 1 - 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, // 4 4 1 5 4 1 6 4 - 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, // 1 7 5 0 2 5 0 4 - 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, // 5 0 5 2 0 3 - N - 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, // o n - A u t h o - 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, // r i t a t i v e - 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, // - I n f o r m a - 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, // t i o n 2 0 4 - - 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, // N o - C o n t e - 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, // n t 3 0 1 - M o - 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, // v e d - P e r m - 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, // a n e n t l y 4 - 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, // 0 0 - B a d - R - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, // e q u e s t 4 0 - 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, // 1 - U n a u t h - 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, // o r i z e d 4 0 - 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, // 3 - F o r b i d - 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, // d e n 4 0 4 - N - 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, // o t - F o u n d - 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, // 5 0 0 - I n t e - 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, // r n a l - S e r - 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, // v e r - E r r o - 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, // r 5 0 1 - N o t - 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, // - I m p l e m e - 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, // n t e d 5 0 3 - - 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, // S e r v i c e - - 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, // U n a v a i l a - 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, // b l e J a n - F - 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, // e b - M a r - A - 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, // p r - M a y - J - 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, // u n - J u l - A - 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, // u g - S e p t - - 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, // O c t - N o v - - 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, // D e c - 0 0 - 0 - 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, // 0 - 0 0 - M o n - 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, // - - T u e - - W - 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, // e d - - T h u - - 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, // - F r i - - S a - 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, // t - - S u n - - - 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, // G M T c h u n k - 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, // e d - t e x t - - 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, // h t m l - i m a - 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, // g e - p n g - i - 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, // m a g e - j p g - 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, // - i m a g e - g - 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, // i f - a p p l i - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, // c a t i o n - x - 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, // m l - a p p l i - 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, // c a t i o n - x - 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, // h t m l - x m l - 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, // - t e x t - p l - 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, // a i n - t e x t - 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, // - j a v a s c r - 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, // i p t - p u b l - 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, // i c p r i v a t - 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, // e m a x - a g e - 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, // - g z i p - d e - 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, // f l a t e - s d - 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, // c h c h a r s e - 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, // t - u t f - 8 c - 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, // h a r s e t - i - 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, // s o - 8 8 5 9 - - 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, // 1 - u t f - - - - 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e // - e n q - 0 - - }; - - private SpdyCodecUtil() { - } - - /** - * Reads a big-endian unsigned short integer from the buffer. - */ - static int getUnsignedShort(ByteBuf buf, int offset) { - return (buf.getByte(offset) & 0xFF) << 8 | - buf.getByte(offset + 1) & 0xFF; - } - - /** - * Reads a big-endian unsigned medium integer from the buffer. - */ - static int getUnsignedMedium(ByteBuf buf, int offset) { - return (buf.getByte(offset) & 0xFF) << 16 | - (buf.getByte(offset + 1) & 0xFF) << 8 | - buf.getByte(offset + 2) & 0xFF; - } - - /** - * Reads a big-endian (31-bit) integer from the buffer. - */ - static int getUnsignedInt(ByteBuf buf, int offset) { - return (buf.getByte(offset) & 0x7F) << 24 | - (buf.getByte(offset + 1) & 0xFF) << 16 | - (buf.getByte(offset + 2) & 0xFF) << 8 | - buf.getByte(offset + 3) & 0xFF; - } - - /** - * Reads a big-endian signed integer from the buffer. - */ - static int getSignedInt(ByteBuf buf, int offset) { - return (buf.getByte(offset) & 0xFF) << 24 | - (buf.getByte(offset + 1) & 0xFF) << 16 | - (buf.getByte(offset + 2) & 0xFF) << 8 | - buf.getByte(offset + 3) & 0xFF; - } - - /** - * Returns {@code true} if ID is for a server initiated stream or ping. - */ - static boolean isServerId(int id) { - // Server initiated streams and pings have even IDs - return id % 2 == 0; - } - - /** - * Validate a SPDY header name. - */ - static void validateHeaderName(CharSequence name) { - if (name == null) { - throw new NullPointerException("name"); - } - if (name.length() == 0) { - throw new IllegalArgumentException( - "name cannot be length zero"); - } - // Since name may only contain ascii characters, for valid names - // name.length() returns the number of bytes when UTF-8 encoded. - if (name.length() > SPDY_MAX_NV_LENGTH) { - throw new IllegalArgumentException( - "name exceeds allowable length: " + name); - } - for (int i = 0; i < name.length(); i ++) { - char c = name.charAt(i); - if (c == 0) { - throw new IllegalArgumentException( - "name contains null character: " + name); - } - if (c >= 'A' && c <= 'Z') { - throw new IllegalArgumentException("name must be all lower case."); - } - if (c > 127) { - throw new IllegalArgumentException( - "name contains non-ascii character: " + name); - } - } - } - - /** - * Validate a SPDY header value. Does not validate max length. - */ - static void validateHeaderValue(CharSequence value) { - if (value == null) { - throw new NullPointerException("value"); - } - for (int i = 0; i < value.length(); i ++) { - char c = value.charAt(i); - if (c == 0) { - throw new IllegalArgumentException( - "value contains null character: " + value); - } - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyDataFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyDataFrame.java deleted file mode 100644 index b112c5ea669..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyDataFrame.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufHolder; -import io.netty.buffer.Unpooled; - -/** - * A SPDY Protocol DATA Frame - */ -public interface SpdyDataFrame extends ByteBufHolder, SpdyStreamFrame { - - @Override - SpdyDataFrame setStreamId(int streamID); - - @Override - SpdyDataFrame setLast(boolean last); - - /** - * Returns the data payload of this frame. If there is no data payload - * {@link Unpooled#EMPTY_BUFFER} is returned. - * - * The data payload cannot exceed 16777215 bytes. - */ - @Override - ByteBuf content(); - - @Override - SpdyDataFrame copy(); - - @Override - SpdyDataFrame duplicate(); - - @Override - SpdyDataFrame retainedDuplicate(); - - @Override - SpdyDataFrame replace(ByteBuf content); - - @Override - SpdyDataFrame retain(); - - @Override - SpdyDataFrame retain(int increment); - - @Override - SpdyDataFrame touch(); - - @Override - SpdyDataFrame touch(Object hint); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrame.java deleted file mode 100644 index df0b72f7ebd..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrame.java +++ /dev/null @@ -1,23 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * A SPDY Protocol Frame - */ -public interface SpdyFrame { - // Tag interface -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameCodec.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameCodec.java deleted file mode 100644 index 057c7507a17..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameCodec.java +++ /dev/null @@ -1,410 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelOutboundHandler; -import io.netty.channel.ChannelPromise; -import io.netty.handler.codec.ByteToMessageDecoder; -import io.netty.handler.codec.UnsupportedMessageTypeException; - -import java.net.SocketAddress; -import java.util.List; - -/** - * A {@link ChannelHandler} that encodes and decodes SPDY Frames. - */ -public class SpdyFrameCodec extends ByteToMessageDecoder - implements SpdyFrameDecoderDelegate, ChannelOutboundHandler { - - private static final SpdyProtocolException INVALID_FRAME = - new SpdyProtocolException("Received invalid frame"); - - private final SpdyFrameDecoder spdyFrameDecoder; - private final SpdyFrameEncoder spdyFrameEncoder; - private final SpdyHeaderBlockDecoder spdyHeaderBlockDecoder; - private final SpdyHeaderBlockEncoder spdyHeaderBlockEncoder; - - private SpdyHeadersFrame spdyHeadersFrame; - private SpdySettingsFrame spdySettingsFrame; - - private ChannelHandlerContext ctx; - private boolean read; - private final boolean validateHeaders; - - /** - * Creates a new instance with the specified {@code version}, - * {@code validateHeaders (true)}, and - * the default decoder and encoder options - * ({@code maxChunkSize (8192)}, {@code maxHeaderSize (16384)}, - * {@code compressionLevel (6)}, {@code windowBits (15)}, - * and {@code memLevel (8)}). - */ - public SpdyFrameCodec(SpdyVersion version) { - this(version, true); - } - - /** - * Creates a new instance with the specified {@code version}, - * {@code validateHeaders}, and - * the default decoder and encoder options - * ({@code maxChunkSize (8192)}, {@code maxHeaderSize (16384)}, - * {@code compressionLevel (6)}, {@code windowBits (15)}, - * and {@code memLevel (8)}). - */ - public SpdyFrameCodec(SpdyVersion version, boolean validateHeaders) { - this(version, 8192, 16384, 6, 15, 8, validateHeaders); - } - - /** - * Creates a new instance with the specified {@code version}, {@code validateHeaders (true)}, - * decoder and encoder options. - */ - public SpdyFrameCodec( - SpdyVersion version, int maxChunkSize, int maxHeaderSize, - int compressionLevel, int windowBits, int memLevel) { - this(version, maxChunkSize, maxHeaderSize, compressionLevel, windowBits, memLevel, true); - } - - /** - * Creates a new instance with the specified {@code version}, {@code validateHeaders}, - * decoder and encoder options. - */ - public SpdyFrameCodec( - SpdyVersion version, int maxChunkSize, int maxHeaderSize, - int compressionLevel, int windowBits, int memLevel, boolean validateHeaders) { - this(version, maxChunkSize, - SpdyHeaderBlockDecoder.newInstance(version, maxHeaderSize), - SpdyHeaderBlockEncoder.newInstance(version, compressionLevel, windowBits, memLevel), validateHeaders); - } - - protected SpdyFrameCodec(SpdyVersion version, int maxChunkSize, - SpdyHeaderBlockDecoder spdyHeaderBlockDecoder, SpdyHeaderBlockEncoder spdyHeaderBlockEncoder, - boolean validateHeaders) { - spdyFrameDecoder = new SpdyFrameDecoder(version, this, maxChunkSize); - spdyFrameEncoder = new SpdyFrameEncoder(version); - this.spdyHeaderBlockDecoder = spdyHeaderBlockDecoder; - this.spdyHeaderBlockEncoder = spdyHeaderBlockEncoder; - this.validateHeaders = validateHeaders; - } - - @Override - public void handlerAdded(ChannelHandlerContext ctx) throws Exception { - super.handlerAdded(ctx); - this.ctx = ctx; - ctx.channel().closeFuture().addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - spdyHeaderBlockDecoder.end(); - spdyHeaderBlockEncoder.end(); - } - }); - } - - @Override - protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { - spdyFrameDecoder.decode(in); - } - - @Override - public void channelReadComplete(ChannelHandlerContext ctx) throws Exception { - if (!read) { - if (!ctx.channel().config().isAutoRead()) { - ctx.read(); - } - } - read = false; - super.channelReadComplete(ctx); - } - - @Override - public void bind(ChannelHandlerContext ctx, SocketAddress localAddress, ChannelPromise promise) throws Exception { - ctx.bind(localAddress, promise); - } - - @Override - public void connect(ChannelHandlerContext ctx, SocketAddress remoteAddress, SocketAddress localAddress, - ChannelPromise promise) throws Exception { - ctx.connect(remoteAddress, localAddress, promise); - } - - @Override - public void disconnect(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { - ctx.disconnect(promise); - } - - @Override - public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { - ctx.close(promise); - } - - @Override - public void deregister(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { - ctx.deregister(promise); - } - - @Override - public void read(ChannelHandlerContext ctx) throws Exception { - ctx.read(); - } - - @Override - public void flush(ChannelHandlerContext ctx) throws Exception { - ctx.flush(); - } - - @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { - ByteBuf frame; - - if (msg instanceof SpdyDataFrame) { - - SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg; - frame = spdyFrameEncoder.encodeDataFrame( - ctx.alloc(), - spdyDataFrame.streamId(), - spdyDataFrame.isLast(), - spdyDataFrame.content() - ); - spdyDataFrame.release(); - ctx.write(frame, promise); - - } else if (msg instanceof SpdySynStreamFrame) { - - SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg; - ByteBuf headerBlock = spdyHeaderBlockEncoder.encode(ctx.alloc(), spdySynStreamFrame); - try { - frame = spdyFrameEncoder.encodeSynStreamFrame( - ctx.alloc(), - spdySynStreamFrame.streamId(), - spdySynStreamFrame.associatedStreamId(), - spdySynStreamFrame.priority(), - spdySynStreamFrame.isLast(), - spdySynStreamFrame.isUnidirectional(), - headerBlock - ); - } finally { - headerBlock.release(); - } - ctx.write(frame, promise); - - } else if (msg instanceof SpdySynReplyFrame) { - - SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg; - ByteBuf headerBlock = spdyHeaderBlockEncoder.encode(ctx.alloc(), spdySynReplyFrame); - try { - frame = spdyFrameEncoder.encodeSynReplyFrame( - ctx.alloc(), - spdySynReplyFrame.streamId(), - spdySynReplyFrame.isLast(), - headerBlock - ); - } finally { - headerBlock.release(); - } - ctx.write(frame, promise); - - } else if (msg instanceof SpdyRstStreamFrame) { - - SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg; - frame = spdyFrameEncoder.encodeRstStreamFrame( - ctx.alloc(), - spdyRstStreamFrame.streamId(), - spdyRstStreamFrame.status().code() - ); - ctx.write(frame, promise); - - } else if (msg instanceof SpdySettingsFrame) { - - SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg; - frame = spdyFrameEncoder.encodeSettingsFrame( - ctx.alloc(), - spdySettingsFrame - ); - ctx.write(frame, promise); - - } else if (msg instanceof SpdyPingFrame) { - - SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg; - frame = spdyFrameEncoder.encodePingFrame( - ctx.alloc(), - spdyPingFrame.id() - ); - ctx.write(frame, promise); - - } else if (msg instanceof SpdyGoAwayFrame) { - - SpdyGoAwayFrame spdyGoAwayFrame = (SpdyGoAwayFrame) msg; - frame = spdyFrameEncoder.encodeGoAwayFrame( - ctx.alloc(), - spdyGoAwayFrame.lastGoodStreamId(), - spdyGoAwayFrame.status().code() - ); - ctx.write(frame, promise); - - } else if (msg instanceof SpdyHeadersFrame) { - - SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg; - ByteBuf headerBlock = spdyHeaderBlockEncoder.encode(ctx.alloc(), spdyHeadersFrame); - try { - frame = spdyFrameEncoder.encodeHeadersFrame( - ctx.alloc(), - spdyHeadersFrame.streamId(), - spdyHeadersFrame.isLast(), - headerBlock - ); - } finally { - headerBlock.release(); - } - ctx.write(frame, promise); - - } else if (msg instanceof SpdyWindowUpdateFrame) { - - SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg; - frame = spdyFrameEncoder.encodeWindowUpdateFrame( - ctx.alloc(), - spdyWindowUpdateFrame.streamId(), - spdyWindowUpdateFrame.deltaWindowSize() - ); - ctx.write(frame, promise); - } else { - throw new UnsupportedMessageTypeException(msg); - } - } - - @Override - public void readDataFrame(int streamId, boolean last, ByteBuf data) { - read = true; - - SpdyDataFrame spdyDataFrame = new DefaultSpdyDataFrame(streamId, data); - spdyDataFrame.setLast(last); - ctx.fireChannelRead(spdyDataFrame); - } - - @Override - public void readSynStreamFrame( - int streamId, int associatedToStreamId, byte priority, boolean last, boolean unidirectional) { - SpdySynStreamFrame spdySynStreamFrame = - new DefaultSpdySynStreamFrame(streamId, associatedToStreamId, priority, validateHeaders); - spdySynStreamFrame.setLast(last); - spdySynStreamFrame.setUnidirectional(unidirectional); - spdyHeadersFrame = spdySynStreamFrame; - } - - @Override - public void readSynReplyFrame(int streamId, boolean last) { - SpdySynReplyFrame spdySynReplyFrame = new DefaultSpdySynReplyFrame(streamId, validateHeaders); - spdySynReplyFrame.setLast(last); - spdyHeadersFrame = spdySynReplyFrame; - } - - @Override - public void readRstStreamFrame(int streamId, int statusCode) { - read = true; - - SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamId, statusCode); - ctx.fireChannelRead(spdyRstStreamFrame); - } - - @Override - public void readSettingsFrame(boolean clearPersisted) { - read = true; - - spdySettingsFrame = new DefaultSpdySettingsFrame(); - spdySettingsFrame.setClearPreviouslyPersistedSettings(clearPersisted); - } - - @Override - public void readSetting(int id, int value, boolean persistValue, boolean persisted) { - spdySettingsFrame.setValue(id, value, persistValue, persisted); - } - - @Override - public void readSettingsEnd() { - read = true; - - Object frame = spdySettingsFrame; - spdySettingsFrame = null; - ctx.fireChannelRead(frame); - } - - @Override - public void readPingFrame(int id) { - read = true; - - SpdyPingFrame spdyPingFrame = new DefaultSpdyPingFrame(id); - ctx.fireChannelRead(spdyPingFrame); - } - - @Override - public void readGoAwayFrame(int lastGoodStreamId, int statusCode) { - read = true; - - SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamId, statusCode); - ctx.fireChannelRead(spdyGoAwayFrame); - } - - @Override - public void readHeadersFrame(int streamId, boolean last) { - spdyHeadersFrame = new DefaultSpdyHeadersFrame(streamId, validateHeaders); - spdyHeadersFrame.setLast(last); - } - - @Override - public void readWindowUpdateFrame(int streamId, int deltaWindowSize) { - read = true; - - SpdyWindowUpdateFrame spdyWindowUpdateFrame = new DefaultSpdyWindowUpdateFrame(streamId, deltaWindowSize); - ctx.fireChannelRead(spdyWindowUpdateFrame); - } - - @Override - public void readHeaderBlock(ByteBuf headerBlock) { - try { - spdyHeaderBlockDecoder.decode(ctx.alloc(), headerBlock, spdyHeadersFrame); - } catch (Exception e) { - ctx.fireExceptionCaught(e); - } finally { - headerBlock.release(); - } - } - - @Override - public void readHeaderBlockEnd() { - Object frame = null; - try { - spdyHeaderBlockDecoder.endHeaderBlock(spdyHeadersFrame); - frame = spdyHeadersFrame; - spdyHeadersFrame = null; - } catch (Exception e) { - ctx.fireExceptionCaught(e); - } - if (frame != null) { - read = true; - - ctx.fireChannelRead(frame); - } - } - - @Override - public void readFrameError(String message) { - ctx.fireExceptionCaught(INVALID_FRAME); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameDecoder.java deleted file mode 100644 index e0d1112813b..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameDecoder.java +++ /dev/null @@ -1,465 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_DATA_FLAG_FIN; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_DATA_FRAME; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_FLAG_FIN; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_FLAG_UNIDIRECTIONAL; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_GOAWAY_FRAME; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_HEADERS_FRAME; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_HEADER_FLAGS_OFFSET; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_HEADER_LENGTH_OFFSET; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_HEADER_SIZE; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_HEADER_TYPE_OFFSET; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_PING_FRAME; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_RST_STREAM_FRAME; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_SETTINGS_CLEAR; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_SETTINGS_FRAME; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_SETTINGS_PERSISTED; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_SETTINGS_PERSIST_VALUE; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_SYN_REPLY_FRAME; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_SYN_STREAM_FRAME; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_WINDOW_UPDATE_FRAME; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.getSignedInt; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.getUnsignedInt; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.getUnsignedMedium; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.getUnsignedShort; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; - -/** - * Decodes {@link ByteBuf}s into SPDY Frames. - */ -public class SpdyFrameDecoder { - - private final int spdyVersion; - private final int maxChunkSize; - - private final SpdyFrameDecoderDelegate delegate; - - private State state; - - // SPDY common header fields - private byte flags; - private int length; - private int streamId; - - private int numSettings; - - private enum State { - READ_COMMON_HEADER, - READ_DATA_FRAME, - READ_SYN_STREAM_FRAME, - READ_SYN_REPLY_FRAME, - READ_RST_STREAM_FRAME, - READ_SETTINGS_FRAME, - READ_SETTING, - READ_PING_FRAME, - READ_GOAWAY_FRAME, - READ_HEADERS_FRAME, - READ_WINDOW_UPDATE_FRAME, - READ_HEADER_BLOCK, - DISCARD_FRAME, - FRAME_ERROR - } - - /** - * Creates a new instance with the specified {@code version} - * and the default {@code maxChunkSize (8192)}. - */ - public SpdyFrameDecoder(SpdyVersion spdyVersion, SpdyFrameDecoderDelegate delegate) { - this(spdyVersion, delegate, 8192); - } - - /** - * Creates a new instance with the specified parameters. - */ - public SpdyFrameDecoder(SpdyVersion spdyVersion, SpdyFrameDecoderDelegate delegate, int maxChunkSize) { - if (spdyVersion == null) { - throw new NullPointerException("spdyVersion"); - } - if (delegate == null) { - throw new NullPointerException("delegate"); - } - if (maxChunkSize <= 0) { - throw new IllegalArgumentException( - "maxChunkSize must be a positive integer: " + maxChunkSize); - } - this.spdyVersion = spdyVersion.getVersion(); - this.delegate = delegate; - this.maxChunkSize = maxChunkSize; - state = State.READ_COMMON_HEADER; - } - - public void decode(ByteBuf buffer) { - boolean last; - int statusCode; - - while (true) { - switch(state) { - case READ_COMMON_HEADER: - if (buffer.readableBytes() < SPDY_HEADER_SIZE) { - return; - } - - int frameOffset = buffer.readerIndex(); - int flagsOffset = frameOffset + SPDY_HEADER_FLAGS_OFFSET; - int lengthOffset = frameOffset + SPDY_HEADER_LENGTH_OFFSET; - buffer.skipBytes(SPDY_HEADER_SIZE); - - boolean control = (buffer.getByte(frameOffset) & 0x80) != 0; - - int version; - int type; - if (control) { - // Decode control frame common header - version = getUnsignedShort(buffer, frameOffset) & 0x7FFF; - type = getUnsignedShort(buffer, frameOffset + SPDY_HEADER_TYPE_OFFSET); - streamId = 0; // Default to session Stream-ID - } else { - // Decode data frame common header - version = spdyVersion; // Default to expected version - type = SPDY_DATA_FRAME; - streamId = getUnsignedInt(buffer, frameOffset); - } - - flags = buffer.getByte(flagsOffset); - length = getUnsignedMedium(buffer, lengthOffset); - - // Check version first then validity - if (version != spdyVersion) { - state = State.FRAME_ERROR; - delegate.readFrameError("Invalid SPDY Version"); - } else if (!isValidFrameHeader(streamId, type, flags, length)) { - state = State.FRAME_ERROR; - delegate.readFrameError("Invalid Frame Error"); - } else { - state = getNextState(type, length); - } - break; - - case READ_DATA_FRAME: - if (length == 0) { - state = State.READ_COMMON_HEADER; - delegate.readDataFrame(streamId, hasFlag(flags, SPDY_DATA_FLAG_FIN), Unpooled.buffer(0)); - break; - } - - // Generate data frames that do not exceed maxChunkSize - int dataLength = Math.min(maxChunkSize, length); - - // Wait until entire frame is readable - if (buffer.readableBytes() < dataLength) { - return; - } - - ByteBuf data = buffer.alloc().buffer(dataLength); - data.writeBytes(buffer, dataLength); - length -= dataLength; - - if (length == 0) { - state = State.READ_COMMON_HEADER; - } - - last = length == 0 && hasFlag(flags, SPDY_DATA_FLAG_FIN); - - delegate.readDataFrame(streamId, last, data); - break; - - case READ_SYN_STREAM_FRAME: - if (buffer.readableBytes() < 10) { - return; - } - - int offset = buffer.readerIndex(); - streamId = getUnsignedInt(buffer, offset); - int associatedToStreamId = getUnsignedInt(buffer, offset + 4); - byte priority = (byte) (buffer.getByte(offset + 8) >> 5 & 0x07); - last = hasFlag(flags, SPDY_FLAG_FIN); - boolean unidirectional = hasFlag(flags, SPDY_FLAG_UNIDIRECTIONAL); - buffer.skipBytes(10); - length -= 10; - - if (streamId == 0) { - state = State.FRAME_ERROR; - delegate.readFrameError("Invalid SYN_STREAM Frame"); - } else { - state = State.READ_HEADER_BLOCK; - delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); - } - break; - - case READ_SYN_REPLY_FRAME: - if (buffer.readableBytes() < 4) { - return; - } - - streamId = getUnsignedInt(buffer, buffer.readerIndex()); - last = hasFlag(flags, SPDY_FLAG_FIN); - - buffer.skipBytes(4); - length -= 4; - - if (streamId == 0) { - state = State.FRAME_ERROR; - delegate.readFrameError("Invalid SYN_REPLY Frame"); - } else { - state = State.READ_HEADER_BLOCK; - delegate.readSynReplyFrame(streamId, last); - } - break; - - case READ_RST_STREAM_FRAME: - if (buffer.readableBytes() < 8) { - return; - } - - streamId = getUnsignedInt(buffer, buffer.readerIndex()); - statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); - buffer.skipBytes(8); - - if (streamId == 0 || statusCode == 0) { - state = State.FRAME_ERROR; - delegate.readFrameError("Invalid RST_STREAM Frame"); - } else { - state = State.READ_COMMON_HEADER; - delegate.readRstStreamFrame(streamId, statusCode); - } - break; - - case READ_SETTINGS_FRAME: - if (buffer.readableBytes() < 4) { - return; - } - - boolean clear = hasFlag(flags, SPDY_SETTINGS_CLEAR); - - numSettings = getUnsignedInt(buffer, buffer.readerIndex()); - buffer.skipBytes(4); - length -= 4; - - // Validate frame length against number of entries. Each ID/Value entry is 8 bytes. - if ((length & 0x07) != 0 || length >> 3 != numSettings) { - state = State.FRAME_ERROR; - delegate.readFrameError("Invalid SETTINGS Frame"); - } else { - state = State.READ_SETTING; - delegate.readSettingsFrame(clear); - } - break; - - case READ_SETTING: - if (numSettings == 0) { - state = State.READ_COMMON_HEADER; - delegate.readSettingsEnd(); - break; - } - - if (buffer.readableBytes() < 8) { - return; - } - - byte settingsFlags = buffer.getByte(buffer.readerIndex()); - int id = getUnsignedMedium(buffer, buffer.readerIndex() + 1); - int value = getSignedInt(buffer, buffer.readerIndex() + 4); - boolean persistValue = hasFlag(settingsFlags, SPDY_SETTINGS_PERSIST_VALUE); - boolean persisted = hasFlag(settingsFlags, SPDY_SETTINGS_PERSISTED); - buffer.skipBytes(8); - - --numSettings; - - delegate.readSetting(id, value, persistValue, persisted); - break; - - case READ_PING_FRAME: - if (buffer.readableBytes() < 4) { - return; - } - - int pingId = getSignedInt(buffer, buffer.readerIndex()); - buffer.skipBytes(4); - - state = State.READ_COMMON_HEADER; - delegate.readPingFrame(pingId); - break; - - case READ_GOAWAY_FRAME: - if (buffer.readableBytes() < 8) { - return; - } - - int lastGoodStreamId = getUnsignedInt(buffer, buffer.readerIndex()); - statusCode = getSignedInt(buffer, buffer.readerIndex() + 4); - buffer.skipBytes(8); - - state = State.READ_COMMON_HEADER; - delegate.readGoAwayFrame(lastGoodStreamId, statusCode); - break; - - case READ_HEADERS_FRAME: - if (buffer.readableBytes() < 4) { - return; - } - - streamId = getUnsignedInt(buffer, buffer.readerIndex()); - last = hasFlag(flags, SPDY_FLAG_FIN); - - buffer.skipBytes(4); - length -= 4; - - if (streamId == 0) { - state = State.FRAME_ERROR; - delegate.readFrameError("Invalid HEADERS Frame"); - } else { - state = State.READ_HEADER_BLOCK; - delegate.readHeadersFrame(streamId, last); - } - break; - - case READ_WINDOW_UPDATE_FRAME: - if (buffer.readableBytes() < 8) { - return; - } - - streamId = getUnsignedInt(buffer, buffer.readerIndex()); - int deltaWindowSize = getUnsignedInt(buffer, buffer.readerIndex() + 4); - buffer.skipBytes(8); - - if (deltaWindowSize == 0) { - state = State.FRAME_ERROR; - delegate.readFrameError("Invalid WINDOW_UPDATE Frame"); - } else { - state = State.READ_COMMON_HEADER; - delegate.readWindowUpdateFrame(streamId, deltaWindowSize); - } - break; - - case READ_HEADER_BLOCK: - if (length == 0) { - state = State.READ_COMMON_HEADER; - delegate.readHeaderBlockEnd(); - break; - } - - if (!buffer.isReadable()) { - return; - } - - int compressedBytes = Math.min(buffer.readableBytes(), length); - ByteBuf headerBlock = buffer.alloc().buffer(compressedBytes); - headerBlock.writeBytes(buffer, compressedBytes); - length -= compressedBytes; - - delegate.readHeaderBlock(headerBlock); - break; - - case DISCARD_FRAME: - int numBytes = Math.min(buffer.readableBytes(), length); - buffer.skipBytes(numBytes); - length -= numBytes; - if (length == 0) { - state = State.READ_COMMON_HEADER; - break; - } - return; - - case FRAME_ERROR: - buffer.skipBytes(buffer.readableBytes()); - return; - - default: - throw new Error("Shouldn't reach here."); - } - } - } - - private static boolean hasFlag(byte flags, byte flag) { - return (flags & flag) != 0; - } - - private static State getNextState(int type, int length) { - switch (type) { - case SPDY_DATA_FRAME: - return State.READ_DATA_FRAME; - - case SPDY_SYN_STREAM_FRAME: - return State.READ_SYN_STREAM_FRAME; - - case SPDY_SYN_REPLY_FRAME: - return State.READ_SYN_REPLY_FRAME; - - case SPDY_RST_STREAM_FRAME: - return State.READ_RST_STREAM_FRAME; - - case SPDY_SETTINGS_FRAME: - return State.READ_SETTINGS_FRAME; - - case SPDY_PING_FRAME: - return State.READ_PING_FRAME; - - case SPDY_GOAWAY_FRAME: - return State.READ_GOAWAY_FRAME; - - case SPDY_HEADERS_FRAME: - return State.READ_HEADERS_FRAME; - - case SPDY_WINDOW_UPDATE_FRAME: - return State.READ_WINDOW_UPDATE_FRAME; - - default: - if (length != 0) { - return State.DISCARD_FRAME; - } else { - return State.READ_COMMON_HEADER; - } - } - } - - private static boolean isValidFrameHeader(int streamId, int type, byte flags, int length) { - switch (type) { - case SPDY_DATA_FRAME: - return streamId != 0; - - case SPDY_SYN_STREAM_FRAME: - return length >= 10; - - case SPDY_SYN_REPLY_FRAME: - return length >= 4; - - case SPDY_RST_STREAM_FRAME: - return flags == 0 && length == 8; - - case SPDY_SETTINGS_FRAME: - return length >= 4; - - case SPDY_PING_FRAME: - return length == 4; - - case SPDY_GOAWAY_FRAME: - return length == 8; - - case SPDY_HEADERS_FRAME: - return length >= 4; - - case SPDY_WINDOW_UPDATE_FRAME: - return length == 8; - - default: - return true; - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameDecoderDelegate.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameDecoderDelegate.java deleted file mode 100644 index 52815c787ca..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameDecoderDelegate.java +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; - -/** - * Callback interface for {@link SpdyFrameDecoder}. - */ -public interface SpdyFrameDecoderDelegate { - - /** - * Called when a DATA frame is received. - */ - void readDataFrame(int streamId, boolean last, ByteBuf data); - - /** - * Called when a SYN_STREAM frame is received. - * The Name/Value Header Block is not included. See readHeaderBlock(). - */ - void readSynStreamFrame( - int streamId, int associatedToStreamId, byte priority, boolean last, boolean unidirectional); - - /** - * Called when a SYN_REPLY frame is received. - * The Name/Value Header Block is not included. See readHeaderBlock(). - */ - void readSynReplyFrame(int streamId, boolean last); - - /** - * Called when a RST_STREAM frame is received. - */ - void readRstStreamFrame(int streamId, int statusCode); - - /** - * Called when a SETTINGS frame is received. - * Settings are not included. See readSetting(). - */ - void readSettingsFrame(boolean clearPersisted); - - /** - * Called when an individual setting within a SETTINGS frame is received. - */ - void readSetting(int id, int value, boolean persistValue, boolean persisted); - - /** - * Called when the entire SETTINGS frame has been received. - */ - void readSettingsEnd(); - - /** - * Called when a PING frame is received. - */ - void readPingFrame(int id); - - /** - * Called when a GOAWAY frame is received. - */ - void readGoAwayFrame(int lastGoodStreamId, int statusCode); - - /** - * Called when a HEADERS frame is received. - * The Name/Value Header Block is not included. See readHeaderBlock(). - */ - void readHeadersFrame(int streamId, boolean last); - - /** - * Called when a WINDOW_UPDATE frame is received. - */ - void readWindowUpdateFrame(int streamId, int deltaWindowSize); - - /** - * Called when the header block within a SYN_STREAM, SYN_REPLY, or HEADERS frame is received. - */ - void readHeaderBlock(ByteBuf headerBlock); - - /** - * Called when an entire header block has been received. - */ - void readHeaderBlockEnd(); - - /** - * Called when an unrecoverable session error has occurred. - */ - void readFrameError(String message); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameEncoder.java deleted file mode 100644 index 1524f952342..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyFrameEncoder.java +++ /dev/null @@ -1,163 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; - -import java.nio.ByteOrder; -import java.util.Set; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.*; - -/** - * Encodes a SPDY Frame into a {@link ByteBuf}. - */ -public class SpdyFrameEncoder { - - private final int version; - - /** - * Creates a new instance with the specified {@code spdyVersion}. - */ - public SpdyFrameEncoder(SpdyVersion spdyVersion) { - if (spdyVersion == null) { - throw new NullPointerException("spdyVersion"); - } - version = spdyVersion.getVersion(); - } - - private void writeControlFrameHeader(ByteBuf buffer, int type, byte flags, int length) { - buffer.writeShort(version | 0x8000); - buffer.writeShort(type); - buffer.writeByte(flags); - buffer.writeMedium(length); - } - - public ByteBuf encodeDataFrame(ByteBufAllocator allocator, int streamId, boolean last, ByteBuf data) { - byte flags = last ? SPDY_DATA_FLAG_FIN : 0; - int length = data.readableBytes(); - ByteBuf frame = allocator.ioBuffer(SPDY_HEADER_SIZE + length).order(ByteOrder.BIG_ENDIAN); - frame.writeInt(streamId & 0x7FFFFFFF); - frame.writeByte(flags); - frame.writeMedium(length); - frame.writeBytes(data, data.readerIndex(), length); - return frame; - } - - public ByteBuf encodeSynStreamFrame(ByteBufAllocator allocator, int streamId, int associatedToStreamId, - byte priority, boolean last, boolean unidirectional, ByteBuf headerBlock) { - int headerBlockLength = headerBlock.readableBytes(); - byte flags = last ? SPDY_FLAG_FIN : 0; - if (unidirectional) { - flags |= SPDY_FLAG_UNIDIRECTIONAL; - } - int length = 10 + headerBlockLength; - ByteBuf frame = allocator.ioBuffer(SPDY_HEADER_SIZE + length).order(ByteOrder.BIG_ENDIAN); - writeControlFrameHeader(frame, SPDY_SYN_STREAM_FRAME, flags, length); - frame.writeInt(streamId); - frame.writeInt(associatedToStreamId); - frame.writeShort((priority & 0xFF) << 13); - frame.writeBytes(headerBlock, headerBlock.readerIndex(), headerBlockLength); - return frame; - } - - public ByteBuf encodeSynReplyFrame(ByteBufAllocator allocator, int streamId, boolean last, ByteBuf headerBlock) { - int headerBlockLength = headerBlock.readableBytes(); - byte flags = last ? SPDY_FLAG_FIN : 0; - int length = 4 + headerBlockLength; - ByteBuf frame = allocator.ioBuffer(SPDY_HEADER_SIZE + length).order(ByteOrder.BIG_ENDIAN); - writeControlFrameHeader(frame, SPDY_SYN_REPLY_FRAME, flags, length); - frame.writeInt(streamId); - frame.writeBytes(headerBlock, headerBlock.readerIndex(), headerBlockLength); - return frame; - } - - public ByteBuf encodeRstStreamFrame(ByteBufAllocator allocator, int streamId, int statusCode) { - byte flags = 0; - int length = 8; - ByteBuf frame = allocator.ioBuffer(SPDY_HEADER_SIZE + length).order(ByteOrder.BIG_ENDIAN); - writeControlFrameHeader(frame, SPDY_RST_STREAM_FRAME, flags, length); - frame.writeInt(streamId); - frame.writeInt(statusCode); - return frame; - } - - public ByteBuf encodeSettingsFrame(ByteBufAllocator allocator, SpdySettingsFrame spdySettingsFrame) { - Set ids = spdySettingsFrame.ids(); - int numSettings = ids.size(); - - byte flags = spdySettingsFrame.clearPreviouslyPersistedSettings() ? - SPDY_SETTINGS_CLEAR : 0; - int length = 4 + 8 * numSettings; - ByteBuf frame = allocator.ioBuffer(SPDY_HEADER_SIZE + length).order(ByteOrder.BIG_ENDIAN); - writeControlFrameHeader(frame, SPDY_SETTINGS_FRAME, flags, length); - frame.writeInt(numSettings); - for (Integer id : ids) { - flags = 0; - if (spdySettingsFrame.isPersistValue(id)) { - flags |= SPDY_SETTINGS_PERSIST_VALUE; - } - if (spdySettingsFrame.isPersisted(id)) { - flags |= SPDY_SETTINGS_PERSISTED; - } - frame.writeByte(flags); - frame.writeMedium(id); - frame.writeInt(spdySettingsFrame.getValue(id)); - } - return frame; - } - - public ByteBuf encodePingFrame(ByteBufAllocator allocator, int id) { - byte flags = 0; - int length = 4; - ByteBuf frame = allocator.ioBuffer(SPDY_HEADER_SIZE + length).order(ByteOrder.BIG_ENDIAN); - writeControlFrameHeader(frame, SPDY_PING_FRAME, flags, length); - frame.writeInt(id); - return frame; - } - - public ByteBuf encodeGoAwayFrame(ByteBufAllocator allocator, int lastGoodStreamId, int statusCode) { - byte flags = 0; - int length = 8; - ByteBuf frame = allocator.ioBuffer(SPDY_HEADER_SIZE + length).order(ByteOrder.BIG_ENDIAN); - writeControlFrameHeader(frame, SPDY_GOAWAY_FRAME, flags, length); - frame.writeInt(lastGoodStreamId); - frame.writeInt(statusCode); - return frame; - } - - public ByteBuf encodeHeadersFrame(ByteBufAllocator allocator, int streamId, boolean last, ByteBuf headerBlock) { - int headerBlockLength = headerBlock.readableBytes(); - byte flags = last ? SPDY_FLAG_FIN : 0; - int length = 4 + headerBlockLength; - ByteBuf frame = allocator.ioBuffer(SPDY_HEADER_SIZE + length).order(ByteOrder.BIG_ENDIAN); - writeControlFrameHeader(frame, SPDY_HEADERS_FRAME, flags, length); - frame.writeInt(streamId); - frame.writeBytes(headerBlock, headerBlock.readerIndex(), headerBlockLength); - return frame; - } - - public ByteBuf encodeWindowUpdateFrame(ByteBufAllocator allocator, int streamId, int deltaWindowSize) { - byte flags = 0; - int length = 8; - ByteBuf frame = allocator.ioBuffer(SPDY_HEADER_SIZE + length).order(ByteOrder.BIG_ENDIAN); - writeControlFrameHeader(frame, SPDY_WINDOW_UPDATE_FRAME, flags, length); - frame.writeInt(streamId); - frame.writeInt(deltaWindowSize); - return frame; - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyGoAwayFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyGoAwayFrame.java deleted file mode 100644 index d2abb19b37e..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyGoAwayFrame.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * A SPDY Protocol GOAWAY Frame - */ -public interface SpdyGoAwayFrame extends SpdyFrame { - - /** - * Returns the Last-good-stream-ID of this frame. - */ - int lastGoodStreamId(); - - /** - * Sets the Last-good-stream-ID of this frame. The Last-good-stream-ID - * cannot be negative. - */ - SpdyGoAwayFrame setLastGoodStreamId(int lastGoodStreamId); - - /** - * Returns the status of this frame. - */ - SpdySessionStatus status(); - - /** - * Sets the status of this frame. - */ - SpdyGoAwayFrame setStatus(SpdySessionStatus status); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockDecoder.java deleted file mode 100644 index dd2d8684c7a..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockDecoder.java +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; - -abstract class SpdyHeaderBlockDecoder { - - static SpdyHeaderBlockDecoder newInstance(SpdyVersion spdyVersion, int maxHeaderSize) { - return new SpdyHeaderBlockZlibDecoder(spdyVersion, maxHeaderSize); - } - - /** - * Decodes a SPDY Header Block, adding the Name/Value pairs to the given Headers frame. - * If the header block is malformed, the Headers frame will be marked as invalid. - * A stream error with status code PROTOCOL_ERROR must be issued in response to an invalid frame. - * - * @param alloc the {@link ByteBufAllocator} which can be used to allocate new {@link ByteBuf}s - * @param headerBlock the HeaderBlock to decode - * @param frame the Headers frame that receives the Name/Value pairs - * @throws Exception If the header block is malformed in a way that prevents any future - * decoding of any other header blocks, an exception will be thrown. - * A session error with status code PROTOCOL_ERROR must be issued. - */ - abstract void decode(ByteBufAllocator alloc, ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception; - - abstract void endHeaderBlock(SpdyHeadersFrame frame) throws Exception; - - abstract void end(); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockEncoder.java deleted file mode 100644 index 8f706b165e3..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockEncoder.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.util.internal.PlatformDependent; - -abstract class SpdyHeaderBlockEncoder { - - static SpdyHeaderBlockEncoder newInstance( - SpdyVersion version, int compressionLevel, int windowBits, int memLevel) { - - if (PlatformDependent.javaVersion() >= 7) { - return new SpdyHeaderBlockZlibEncoder( - version, compressionLevel); - } else { - return new SpdyHeaderBlockJZlibEncoder( - version, compressionLevel, windowBits, memLevel); - } - } - - abstract ByteBuf encode(ByteBufAllocator alloc, SpdyHeadersFrame frame) throws Exception; - abstract void end(); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockJZlibEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockJZlibEncoder.java deleted file mode 100644 index add9d765ae9..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockJZlibEncoder.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import com.jcraft.jzlib.Deflater; -import com.jcraft.jzlib.JZlib; -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.Unpooled; -import io.netty.handler.codec.compression.CompressionException; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.*; - -class SpdyHeaderBlockJZlibEncoder extends SpdyHeaderBlockRawEncoder { - - private final Deflater z = new Deflater(); - - private boolean finished; - - SpdyHeaderBlockJZlibEncoder( - SpdyVersion version, int compressionLevel, int windowBits, int memLevel) { - super(version); - if (compressionLevel < 0 || compressionLevel > 9) { - throw new IllegalArgumentException( - "compressionLevel: " + compressionLevel + " (expected: 0-9)"); - } - if (windowBits < 9 || windowBits > 15) { - throw new IllegalArgumentException( - "windowBits: " + windowBits + " (expected: 9-15)"); - } - if (memLevel < 1 || memLevel > 9) { - throw new IllegalArgumentException( - "memLevel: " + memLevel + " (expected: 1-9)"); - } - - int resultCode = z.deflateInit( - compressionLevel, windowBits, memLevel, JZlib.W_ZLIB); - if (resultCode != JZlib.Z_OK) { - throw new CompressionException( - "failed to initialize an SPDY header block deflater: " + resultCode); - } else { - resultCode = z.deflateSetDictionary(SPDY_DICT, SPDY_DICT.length); - if (resultCode != JZlib.Z_OK) { - throw new CompressionException( - "failed to set the SPDY dictionary: " + resultCode); - } - } - } - - private void setInput(ByteBuf decompressed) { - int len = decompressed.readableBytes(); - - byte[] in; - int offset; - if (decompressed.hasArray()) { - in = decompressed.array(); - offset = decompressed.arrayOffset() + decompressed.readerIndex(); - } else { - in = new byte[len]; - decompressed.getBytes(decompressed.readerIndex(), in); - offset = 0; - } - z.next_in = in; - z.next_in_index = offset; - z.avail_in = len; - } - - private ByteBuf encode(ByteBufAllocator alloc) { - boolean release = true; - ByteBuf out = null; - try { - int oldNextInIndex = z.next_in_index; - int oldNextOutIndex = z.next_out_index; - - int maxOutputLength = (int) Math.ceil(z.next_in.length * 1.001) + 12; - out = alloc.heapBuffer(maxOutputLength); - z.next_out = out.array(); - z.next_out_index = out.arrayOffset() + out.writerIndex(); - z.avail_out = maxOutputLength; - - int resultCode; - try { - resultCode = z.deflate(JZlib.Z_SYNC_FLUSH); - } finally { - out.skipBytes(z.next_in_index - oldNextInIndex); - } - if (resultCode != JZlib.Z_OK) { - throw new CompressionException("compression failure: " + resultCode); - } - - int outputLength = z.next_out_index - oldNextOutIndex; - if (outputLength > 0) { - out.writerIndex(out.writerIndex() + outputLength); - } - release = false; - return out; - } finally { - // Deference the external references explicitly to tell the VM that - // the allocated byte arrays are temporary so that the call stack - // can be utilized. - // I'm not sure if the modern VMs do this optimization though. - z.next_in = null; - z.next_out = null; - if (release && out != null) { - out.release(); - } - } - } - - @Override - public ByteBuf encode(ByteBufAllocator alloc, SpdyHeadersFrame frame) throws Exception { - if (frame == null) { - throw new IllegalArgumentException("frame"); - } - - if (finished) { - return Unpooled.EMPTY_BUFFER; - } - - ByteBuf decompressed = super.encode(alloc, frame); - try { - if (!decompressed.isReadable()) { - return Unpooled.EMPTY_BUFFER; - } - - setInput(decompressed); - return encode(alloc); - } finally { - decompressed.release(); - } - } - - @Override - public void end() { - if (finished) { - return; - } - finished = true; - z.deflateEnd(); - z.next_in = null; - z.next_out = null; - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockRawDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockRawDecoder.java deleted file mode 100644 index 70d4607ed03..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockRawDecoder.java +++ /dev/null @@ -1,311 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.*; - -public class SpdyHeaderBlockRawDecoder extends SpdyHeaderBlockDecoder { - - private static final int LENGTH_FIELD_SIZE = 4; - - private final int maxHeaderSize; - - private State state; - - private ByteBuf cumulation; - - private int headerSize; - private int numHeaders; - private int length; - private String name; - - private enum State { - READ_NUM_HEADERS, - READ_NAME_LENGTH, - READ_NAME, - SKIP_NAME, - READ_VALUE_LENGTH, - READ_VALUE, - SKIP_VALUE, - END_HEADER_BLOCK, - ERROR - } - - public SpdyHeaderBlockRawDecoder(SpdyVersion spdyVersion, int maxHeaderSize) { - if (spdyVersion == null) { - throw new NullPointerException("spdyVersion"); - } - this.maxHeaderSize = maxHeaderSize; - state = State.READ_NUM_HEADERS; - } - - private static int readLengthField(ByteBuf buffer) { - int length = getSignedInt(buffer, buffer.readerIndex()); - buffer.skipBytes(LENGTH_FIELD_SIZE); - return length; - } - - @Override - void decode(ByteBufAllocator alloc, ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception { - if (headerBlock == null) { - throw new NullPointerException("headerBlock"); - } - if (frame == null) { - throw new NullPointerException("frame"); - } - - if (cumulation == null) { - decodeHeaderBlock(headerBlock, frame); - if (headerBlock.isReadable()) { - cumulation = alloc.buffer(headerBlock.readableBytes()); - cumulation.writeBytes(headerBlock); - } - } else { - cumulation.writeBytes(headerBlock); - decodeHeaderBlock(cumulation, frame); - if (cumulation.isReadable()) { - cumulation.discardReadBytes(); - } else { - releaseBuffer(); - } - } - } - - protected void decodeHeaderBlock(ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception { - int skipLength; - while (headerBlock.isReadable()) { - switch(state) { - case READ_NUM_HEADERS: - if (headerBlock.readableBytes() < LENGTH_FIELD_SIZE) { - return; - } - - numHeaders = readLengthField(headerBlock); - - if (numHeaders < 0) { - state = State.ERROR; - frame.setInvalid(); - } else if (numHeaders == 0) { - state = State.END_HEADER_BLOCK; - } else { - state = State.READ_NAME_LENGTH; - } - break; - - case READ_NAME_LENGTH: - if (headerBlock.readableBytes() < LENGTH_FIELD_SIZE) { - return; - } - - length = readLengthField(headerBlock); - - // Recipients of a zero-length name must issue a stream error - if (length <= 0) { - state = State.ERROR; - frame.setInvalid(); - } else if (length > maxHeaderSize || headerSize > maxHeaderSize - length) { - headerSize = maxHeaderSize + 1; - state = State.SKIP_NAME; - frame.setTruncated(); - } else { - headerSize += length; - state = State.READ_NAME; - } - break; - - case READ_NAME: - if (headerBlock.readableBytes() < length) { - return; - } - - byte[] nameBytes = new byte[length]; - headerBlock.readBytes(nameBytes); - name = new String(nameBytes, "UTF-8"); - - // Check for identically named headers - if (frame.headers().contains(name)) { - state = State.ERROR; - frame.setInvalid(); - } else { - state = State.READ_VALUE_LENGTH; - } - break; - - case SKIP_NAME: - skipLength = Math.min(headerBlock.readableBytes(), length); - headerBlock.skipBytes(skipLength); - length -= skipLength; - - if (length == 0) { - state = State.READ_VALUE_LENGTH; - } - break; - - case READ_VALUE_LENGTH: - if (headerBlock.readableBytes() < LENGTH_FIELD_SIZE) { - return; - } - - length = readLengthField(headerBlock); - - // Recipients of illegal value fields must issue a stream error - if (length < 0) { - state = State.ERROR; - frame.setInvalid(); - } else if (length == 0) { - if (!frame.isTruncated()) { - // SPDY/3 allows zero-length (empty) header values - frame.headers().add(name, ""); - } - - name = null; - if (--numHeaders == 0) { - state = State.END_HEADER_BLOCK; - } else { - state = State.READ_NAME_LENGTH; - } - - } else if (length > maxHeaderSize || headerSize > maxHeaderSize - length) { - headerSize = maxHeaderSize + 1; - name = null; - state = State.SKIP_VALUE; - frame.setTruncated(); - } else { - headerSize += length; - state = State.READ_VALUE; - } - break; - - case READ_VALUE: - if (headerBlock.readableBytes() < length) { - return; - } - - byte[] valueBytes = new byte[length]; - headerBlock.readBytes(valueBytes); - - // Add Name/Value pair to headers - int index = 0; - int offset = 0; - - // Value must not start with a NULL character - if (valueBytes[0] == (byte) 0) { - state = State.ERROR; - frame.setInvalid(); - break; - } - - while (index < length) { - while (index < valueBytes.length && valueBytes[index] != (byte) 0) { - index ++; - } - if (index < valueBytes.length) { - // Received NULL character - if (index + 1 == valueBytes.length || valueBytes[index + 1] == (byte) 0) { - // Value field ended with a NULL character or - // received multiple, in-sequence NULL characters. - // Recipients of illegal value fields must issue a stream error - state = State.ERROR; - frame.setInvalid(); - break; - } - } - String value = new String(valueBytes, offset, index - offset, "UTF-8"); - - try { - frame.headers().add(name, value); - } catch (IllegalArgumentException e) { - // Name contains NULL or non-ascii characters - state = State.ERROR; - frame.setInvalid(); - break; - } - index ++; - offset = index; - } - - name = null; - - // If we broke out of the add header loop, break here - if (state == State.ERROR) { - break; - } - - if (--numHeaders == 0) { - state = State.END_HEADER_BLOCK; - } else { - state = State.READ_NAME_LENGTH; - } - break; - - case SKIP_VALUE: - skipLength = Math.min(headerBlock.readableBytes(), length); - headerBlock.skipBytes(skipLength); - length -= skipLength; - - if (length == 0) { - if (--numHeaders == 0) { - state = State.END_HEADER_BLOCK; - } else { - state = State.READ_NAME_LENGTH; - } - } - break; - - case END_HEADER_BLOCK: - state = State.ERROR; - frame.setInvalid(); - break; - - case ERROR: - headerBlock.skipBytes(headerBlock.readableBytes()); - return; - - default: - throw new Error("Shouldn't reach here."); - } - } - } - - @Override - void endHeaderBlock(SpdyHeadersFrame frame) throws Exception { - if (state != State.END_HEADER_BLOCK) { - frame.setInvalid(); - } - - releaseBuffer(); - - // Initialize header block decoding fields - headerSize = 0; - name = null; - state = State.READ_NUM_HEADERS; - } - - @Override - void end() { - releaseBuffer(); - } - - private void releaseBuffer() { - if (cumulation != null) { - cumulation.release(); - cumulation = null; - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockRawEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockRawEncoder.java deleted file mode 100644 index afd547966a7..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockRawEncoder.java +++ /dev/null @@ -1,91 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.ByteBufUtil; -import io.netty.buffer.Unpooled; - -import java.util.Set; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_MAX_NV_LENGTH; - -public class SpdyHeaderBlockRawEncoder extends SpdyHeaderBlockEncoder { - - private final int version; - - public SpdyHeaderBlockRawEncoder(SpdyVersion version) { - if (version == null) { - throw new NullPointerException("version"); - } - this.version = version.getVersion(); - } - - private static void setLengthField(ByteBuf buffer, int writerIndex, int length) { - buffer.setInt(writerIndex, length); - } - - private static void writeLengthField(ByteBuf buffer, int length) { - buffer.writeInt(length); - } - - @Override - public ByteBuf encode(ByteBufAllocator alloc, SpdyHeadersFrame frame) throws Exception { - Set names = frame.headers().names(); - int numHeaders = names.size(); - if (numHeaders == 0) { - return Unpooled.EMPTY_BUFFER; - } - if (numHeaders > SPDY_MAX_NV_LENGTH) { - throw new IllegalArgumentException( - "header block contains too many headers"); - } - ByteBuf headerBlock = alloc.heapBuffer(); - writeLengthField(headerBlock, numHeaders); - for (CharSequence name: names) { - writeLengthField(headerBlock, name.length()); - ByteBufUtil.writeAscii(headerBlock, name); - int savedIndex = headerBlock.writerIndex(); - int valueLength = 0; - writeLengthField(headerBlock, valueLength); - for (CharSequence value: frame.headers().getAll(name)) { - int length = value.length(); - if (length > 0) { - ByteBufUtil.writeAscii(headerBlock, value); - headerBlock.writeByte(0); - valueLength += length + 1; - } - } - if (valueLength != 0) { - valueLength --; - } - if (valueLength > SPDY_MAX_NV_LENGTH) { - throw new IllegalArgumentException( - "header exceeds allowable length: " + name); - } - if (valueLength > 0) { - setLengthField(headerBlock, savedIndex, valueLength); - headerBlock.writerIndex(headerBlock.writerIndex() - 1); - } - } - return headerBlock; - } - - @Override - void end() { - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockZlibDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockZlibDecoder.java deleted file mode 100644 index 3fb11ffde84..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockZlibDecoder.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; - -import java.util.zip.DataFormatException; -import java.util.zip.Inflater; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.*; - -final class SpdyHeaderBlockZlibDecoder extends SpdyHeaderBlockRawDecoder { - - private static final int DEFAULT_BUFFER_CAPACITY = 4096; - private static final SpdyProtocolException INVALID_HEADER_BLOCK = - new SpdyProtocolException("Invalid Header Block"); - - private final Inflater decompressor = new Inflater(); - - private ByteBuf decompressed; - - SpdyHeaderBlockZlibDecoder(SpdyVersion spdyVersion, int maxHeaderSize) { - super(spdyVersion, maxHeaderSize); - } - - @Override - void decode(ByteBufAllocator alloc, ByteBuf headerBlock, SpdyHeadersFrame frame) throws Exception { - int len = setInput(headerBlock); - - int numBytes; - do { - numBytes = decompress(alloc, frame); - } while (numBytes > 0); - - // z_stream has an internal 64-bit hold buffer - // it is always capable of consuming the entire input - if (decompressor.getRemaining() != 0) { - // we reached the end of the deflate stream - throw INVALID_HEADER_BLOCK; - } - - headerBlock.skipBytes(len); - } - - private int setInput(ByteBuf compressed) { - int len = compressed.readableBytes(); - - if (compressed.hasArray()) { - decompressor.setInput(compressed.array(), compressed.arrayOffset() + compressed.readerIndex(), len); - } else { - byte[] in = new byte[len]; - compressed.getBytes(compressed.readerIndex(), in); - decompressor.setInput(in, 0, in.length); - } - - return len; - } - - private int decompress(ByteBufAllocator alloc, SpdyHeadersFrame frame) throws Exception { - ensureBuffer(alloc); - byte[] out = decompressed.array(); - int off = decompressed.arrayOffset() + decompressed.writerIndex(); - try { - int numBytes = decompressor.inflate(out, off, decompressed.writableBytes()); - if (numBytes == 0 && decompressor.needsDictionary()) { - try { - decompressor.setDictionary(SPDY_DICT); - } catch (IllegalArgumentException ignored) { - throw INVALID_HEADER_BLOCK; - } - numBytes = decompressor.inflate(out, off, decompressed.writableBytes()); - } - if (frame != null) { - decompressed.writerIndex(decompressed.writerIndex() + numBytes); - decodeHeaderBlock(decompressed, frame); - decompressed.discardReadBytes(); - } - - return numBytes; - } catch (DataFormatException e) { - throw new SpdyProtocolException("Received invalid header block", e); - } - } - - private void ensureBuffer(ByteBufAllocator alloc) { - if (decompressed == null) { - decompressed = alloc.heapBuffer(DEFAULT_BUFFER_CAPACITY); - } - decompressed.ensureWritable(1); - } - - @Override - void endHeaderBlock(SpdyHeadersFrame frame) throws Exception { - super.endHeaderBlock(frame); - releaseBuffer(); - } - - @Override - public void end() { - super.end(); - releaseBuffer(); - decompressor.end(); - } - - private void releaseBuffer() { - if (decompressed != null) { - decompressed.release(); - decompressed = null; - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockZlibEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockZlibEncoder.java deleted file mode 100644 index 9e4cf31aea8..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaderBlockZlibEncoder.java +++ /dev/null @@ -1,114 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.Unpooled; - -import java.util.zip.Deflater; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.*; - -class SpdyHeaderBlockZlibEncoder extends SpdyHeaderBlockRawEncoder { - - private final Deflater compressor; - - private boolean finished; - - SpdyHeaderBlockZlibEncoder(SpdyVersion spdyVersion, int compressionLevel) { - super(spdyVersion); - if (compressionLevel < 0 || compressionLevel > 9) { - throw new IllegalArgumentException( - "compressionLevel: " + compressionLevel + " (expected: 0-9)"); - } - compressor = new Deflater(compressionLevel); - compressor.setDictionary(SPDY_DICT); - } - - private int setInput(ByteBuf decompressed) { - int len = decompressed.readableBytes(); - - if (decompressed.hasArray()) { - compressor.setInput(decompressed.array(), decompressed.arrayOffset() + decompressed.readerIndex(), len); - } else { - byte[] in = new byte[len]; - decompressed.getBytes(decompressed.readerIndex(), in); - compressor.setInput(in, 0, in.length); - } - - return len; - } - - private ByteBuf encode(ByteBufAllocator alloc, int len) { - ByteBuf compressed = alloc.heapBuffer(len); - boolean release = true; - try { - while (compressInto(compressed)) { - // Although unlikely, it's possible that the compressed size is larger than the decompressed size - compressed.ensureWritable(compressed.capacity() << 1); - } - release = false; - return compressed; - } finally { - if (release) { - compressed.release(); - } - } - } - - private boolean compressInto(ByteBuf compressed) { - byte[] out = compressed.array(); - int off = compressed.arrayOffset() + compressed.writerIndex(); - int toWrite = compressed.writableBytes(); - int numBytes = compressor.deflate(out, off, toWrite, Deflater.SYNC_FLUSH); - compressed.writerIndex(compressed.writerIndex() + numBytes); - return numBytes == toWrite; - } - - @Override - public ByteBuf encode(ByteBufAllocator alloc, SpdyHeadersFrame frame) throws Exception { - if (frame == null) { - throw new IllegalArgumentException("frame"); - } - - if (finished) { - return Unpooled.EMPTY_BUFFER; - } - - ByteBuf decompressed = super.encode(alloc, frame); - try { - if (!decompressed.isReadable()) { - return Unpooled.EMPTY_BUFFER; - } - - int len = setInput(decompressed); - return encode(alloc, len); - } finally { - decompressed.release(); - } - } - - @Override - public void end() { - if (finished) { - return; - } - finished = true; - compressor.end(); - super.end(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaders.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaders.java deleted file mode 100644 index f20fde59117..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeaders.java +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.handler.codec.Headers; -import io.netty.util.AsciiString; - -import java.util.Iterator; -import java.util.List; -import java.util.Map.Entry; - -/** - * Provides the constants for the standard SPDY HTTP header names and commonly - * used utility methods that access a {@link SpdyHeadersFrame}. - */ -public interface SpdyHeaders extends Headers { - - /** - * SPDY HTTP header names - */ - final class HttpNames { - /** - * {@code ":host"} - */ - public static final AsciiString HOST = AsciiString.cached(":host"); - /** - * {@code ":method"} - */ - public static final AsciiString METHOD = AsciiString.cached(":method"); - /** - * {@code ":path"} - */ - public static final AsciiString PATH = AsciiString.cached(":path"); - /** - * {@code ":scheme"} - */ - public static final AsciiString SCHEME = AsciiString.cached(":scheme"); - /** - * {@code ":status"} - */ - public static final AsciiString STATUS = AsciiString.cached(":status"); - /** - * {@code ":version"} - */ - public static final AsciiString VERSION = AsciiString.cached(":version"); - - private HttpNames() { } - } - - /** - * {@link Headers#get(Object)} and convert the result to a {@link String}. - * @param name the name of the header to retrieve - * @return the first header value if the header is found. {@code null} if there's no such header. - */ - String getAsString(CharSequence name); - - /** - * {@link Headers#getAll(Object)} and convert each element of {@link List} to a {@link String}. - * @param name the name of the header to retrieve - * @return a {@link List} of header values or an empty {@link List} if no values are found. - */ - List getAllAsString(CharSequence name); - - /** - * {@link #iterator()} that converts each {@link Entry}'s key and value to a {@link String}. - */ - Iterator> iteratorAsString(); - - /** - * Returns {@code true} if a header with the {@code name} and {@code value} exists, {@code false} otherwise. - *

    - * If {@code ignoreCase} is {@code true} then a case insensitive compare is done on the value. - * @param name the name of the header to find - * @param value the value of the header to find - * @param ignoreCase {@code true} then a case insensitive compare is run to compare values. - * otherwise a case sensitive compare is run to compare values. - */ - boolean contains(CharSequence name, CharSequence value, boolean ignoreCase); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeadersFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeadersFrame.java deleted file mode 100644 index b1f13034460..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHeadersFrame.java +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * A SPDY Protocol HEADERS Frame - */ -public interface SpdyHeadersFrame extends SpdyStreamFrame { - - /** - * Returns {@code true} if this header block is invalid. - * A RST_STREAM frame with code PROTOCOL_ERROR should be sent. - */ - boolean isInvalid(); - - /** - * Marks this header block as invalid. - */ - SpdyHeadersFrame setInvalid(); - - /** - * Returns {@code true} if this header block has been truncated due to - * length restrictions. - */ - boolean isTruncated(); - - /** - * Mark this header block as truncated. - */ - SpdyHeadersFrame setTruncated(); - - /** - * Returns the {@link SpdyHeaders}. - */ - SpdyHeaders headers(); - - @Override - SpdyHeadersFrame setStreamId(int streamID); - - @Override - SpdyHeadersFrame setLast(boolean last); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpCodec.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpCodec.java deleted file mode 100644 index 1575b0ddded..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpCodec.java +++ /dev/null @@ -1,37 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.channel.CombinedChannelDuplexHandler; - -/** - * A combination of {@link SpdyHttpDecoder} and {@link SpdyHttpEncoder} - */ -public final class SpdyHttpCodec extends CombinedChannelDuplexHandler { - /** - * Creates a new instance with the specified decoder options. - */ - public SpdyHttpCodec(SpdyVersion version, int maxContentLength) { - super(new SpdyHttpDecoder(version, maxContentLength), new SpdyHttpEncoder(version)); - } - - /** - * Creates a new instance with the specified decoder options. - */ - public SpdyHttpCodec(SpdyVersion version, int maxContentLength, boolean validateHttpHeaders) { - super(new SpdyHttpDecoder(version, maxContentLength, validateHttpHeaders), new SpdyHttpEncoder(version)); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpDecoder.java deleted file mode 100644 index 366ad15b662..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpDecoder.java +++ /dev/null @@ -1,437 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToMessageDecoder; -import io.netty.handler.codec.TooLongFrameException; -import io.netty.handler.codec.http.DefaultFullHttpRequest; -import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.FullHttpMessage; -import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpUtil; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpVersion; -import io.netty.handler.codec.spdy.SpdyHttpHeaders.Names; -import io.netty.util.ReferenceCountUtil; - -import java.util.HashMap; -import java.util.List; -import java.util.Map; - -import static io.netty.handler.codec.spdy.SpdyHeaders.HttpNames.*; - -/** - * Decodes {@link SpdySynStreamFrame}s, {@link SpdySynReplyFrame}s, - * and {@link SpdyDataFrame}s into {@link FullHttpRequest}s and {@link FullHttpResponse}s. - */ -public class SpdyHttpDecoder extends MessageToMessageDecoder { - - private final boolean validateHeaders; - private final int spdyVersion; - private final int maxContentLength; - private final Map messageMap; - - /** - * Creates a new instance. - * - * @param version the protocol version - * @param maxContentLength the maximum length of the message content. - * If the length of the message content exceeds this value, - * a {@link TooLongFrameException} will be raised. - */ - public SpdyHttpDecoder(SpdyVersion version, int maxContentLength) { - this(version, maxContentLength, new HashMap(), true); - } - - /** - * Creates a new instance. - * - * @param version the protocol version - * @param maxContentLength the maximum length of the message content. - * If the length of the message content exceeds this value, - * a {@link TooLongFrameException} will be raised. - * @param validateHeaders {@code true} if http headers should be validated - */ - public SpdyHttpDecoder(SpdyVersion version, int maxContentLength, boolean validateHeaders) { - this(version, maxContentLength, new HashMap(), validateHeaders); - } - - /** - * Creates a new instance with the specified parameters. - * - * @param version the protocol version - * @param maxContentLength the maximum length of the message content. - * If the length of the message content exceeds this value, - * a {@link TooLongFrameException} will be raised. - * @param messageMap the {@link Map} used to hold partially received messages. - */ - protected SpdyHttpDecoder(SpdyVersion version, int maxContentLength, Map messageMap) { - this(version, maxContentLength, messageMap, true); - } - - /** - * Creates a new instance with the specified parameters. - * - * @param version the protocol version - * @param maxContentLength the maximum length of the message content. - * If the length of the message content exceeds this value, - * a {@link TooLongFrameException} will be raised. - * @param messageMap the {@link Map} used to hold partially received messages. - * @param validateHeaders {@code true} if http headers should be validated - */ - protected SpdyHttpDecoder(SpdyVersion version, int maxContentLength, Map messageMap, boolean validateHeaders) { - if (version == null) { - throw new NullPointerException("version"); - } - if (maxContentLength <= 0) { - throw new IllegalArgumentException( - "maxContentLength must be a positive integer: " + maxContentLength); - } - spdyVersion = version.getVersion(); - this.maxContentLength = maxContentLength; - this.messageMap = messageMap; - this.validateHeaders = validateHeaders; - } - - @Override - public void channelInactive(ChannelHandlerContext ctx) throws Exception { - // Release any outstanding messages from the map - for (Map.Entry entry : messageMap.entrySet()) { - ReferenceCountUtil.safeRelease(entry.getValue()); - } - messageMap.clear(); - super.channelInactive(ctx); - } - - protected FullHttpMessage putMessage(int streamId, FullHttpMessage message) { - return messageMap.put(streamId, message); - } - - protected FullHttpMessage getMessage(int streamId) { - return messageMap.get(streamId); - } - - protected FullHttpMessage removeMessage(int streamId) { - return messageMap.remove(streamId); - } - - @Override - protected void decode(ChannelHandlerContext ctx, SpdyFrame msg, List out) - throws Exception { - if (msg instanceof SpdySynStreamFrame) { - - // HTTP requests/responses are mapped one-to-one to SPDY streams. - SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg; - int streamId = spdySynStreamFrame.streamId(); - - if (SpdyCodecUtil.isServerId(streamId)) { - // SYN_STREAM frames initiated by the server are pushed resources - int associatedToStreamId = spdySynStreamFrame.associatedStreamId(); - - // If a client receives a SYN_STREAM with an Associated-To-Stream-ID of 0 - // it must reply with a RST_STREAM with error code INVALID_STREAM. - if (associatedToStreamId == 0) { - SpdyRstStreamFrame spdyRstStreamFrame = - new DefaultSpdyRstStreamFrame(streamId, SpdyStreamStatus.INVALID_STREAM); - ctx.writeAndFlush(spdyRstStreamFrame); - return; - } - - // If a client receives a SYN_STREAM with isLast set, - // reply with a RST_STREAM with error code PROTOCOL_ERROR - // (we only support pushed resources divided into two header blocks). - if (spdySynStreamFrame.isLast()) { - SpdyRstStreamFrame spdyRstStreamFrame = - new DefaultSpdyRstStreamFrame(streamId, SpdyStreamStatus.PROTOCOL_ERROR); - ctx.writeAndFlush(spdyRstStreamFrame); - return; - } - - // If a client receives a response with a truncated header block, - // reply with a RST_STREAM with error code INTERNAL_ERROR. - if (spdySynStreamFrame.isTruncated()) { - SpdyRstStreamFrame spdyRstStreamFrame = - new DefaultSpdyRstStreamFrame(streamId, SpdyStreamStatus.INTERNAL_ERROR); - ctx.writeAndFlush(spdyRstStreamFrame); - return; - } - - try { - FullHttpRequest httpRequestWithEntity = createHttpRequest(spdySynStreamFrame, ctx.alloc()); - - // Set the Stream-ID, Associated-To-Stream-ID, and Priority as headers - httpRequestWithEntity.headers().setInt(Names.STREAM_ID, streamId); - httpRequestWithEntity.headers().setInt(Names.ASSOCIATED_TO_STREAM_ID, associatedToStreamId); - httpRequestWithEntity.headers().setInt(Names.PRIORITY, spdySynStreamFrame.priority()); - - out.add(httpRequestWithEntity); - - } catch (Throwable ignored) { - SpdyRstStreamFrame spdyRstStreamFrame = - new DefaultSpdyRstStreamFrame(streamId, SpdyStreamStatus.PROTOCOL_ERROR); - ctx.writeAndFlush(spdyRstStreamFrame); - } - } else { - // SYN_STREAM frames initiated by the client are HTTP requests - - // If a client sends a request with a truncated header block, the server must - // reply with a HTTP 431 REQUEST HEADER FIELDS TOO LARGE reply. - if (spdySynStreamFrame.isTruncated()) { - SpdySynReplyFrame spdySynReplyFrame = new DefaultSpdySynReplyFrame(streamId); - spdySynReplyFrame.setLast(true); - SpdyHeaders frameHeaders = spdySynReplyFrame.headers(); - frameHeaders.setInt(STATUS, HttpResponseStatus.REQUEST_HEADER_FIELDS_TOO_LARGE.code()); - frameHeaders.setObject(VERSION, HttpVersion.HTTP_1_0); - ctx.writeAndFlush(spdySynReplyFrame); - return; - } - - try { - FullHttpRequest httpRequestWithEntity = createHttpRequest(spdySynStreamFrame, ctx.alloc()); - - // Set the Stream-ID as a header - httpRequestWithEntity.headers().setInt(Names.STREAM_ID, streamId); - - if (spdySynStreamFrame.isLast()) { - out.add(httpRequestWithEntity); - } else { - // Request body will follow in a series of Data Frames - putMessage(streamId, httpRequestWithEntity); - } - } catch (Throwable t) { - // If a client sends a SYN_STREAM without all of the getMethod, url (host and path), - // scheme, and version headers the server must reply with a HTTP 400 BAD REQUEST reply. - // Also sends HTTP 400 BAD REQUEST reply if header name/value pairs are invalid - SpdySynReplyFrame spdySynReplyFrame = new DefaultSpdySynReplyFrame(streamId); - spdySynReplyFrame.setLast(true); - SpdyHeaders frameHeaders = spdySynReplyFrame.headers(); - frameHeaders.setInt(STATUS, HttpResponseStatus.BAD_REQUEST.code()); - frameHeaders.setObject(VERSION, HttpVersion.HTTP_1_0); - ctx.writeAndFlush(spdySynReplyFrame); - } - } - - } else if (msg instanceof SpdySynReplyFrame) { - - SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg; - int streamId = spdySynReplyFrame.streamId(); - - // If a client receives a SYN_REPLY with a truncated header block, - // reply with a RST_STREAM frame with error code INTERNAL_ERROR. - if (spdySynReplyFrame.isTruncated()) { - SpdyRstStreamFrame spdyRstStreamFrame = - new DefaultSpdyRstStreamFrame(streamId, SpdyStreamStatus.INTERNAL_ERROR); - ctx.writeAndFlush(spdyRstStreamFrame); - return; - } - - try { - FullHttpResponse httpResponseWithEntity = - createHttpResponse(spdySynReplyFrame, ctx.alloc(), validateHeaders); - - // Set the Stream-ID as a header - httpResponseWithEntity.headers().setInt(Names.STREAM_ID, streamId); - - if (spdySynReplyFrame.isLast()) { - HttpUtil.setContentLength(httpResponseWithEntity, 0); - out.add(httpResponseWithEntity); - } else { - // Response body will follow in a series of Data Frames - putMessage(streamId, httpResponseWithEntity); - } - } catch (Throwable t) { - // If a client receives a SYN_REPLY without valid getStatus and version headers - // the client must reply with a RST_STREAM frame indicating a PROTOCOL_ERROR - SpdyRstStreamFrame spdyRstStreamFrame = - new DefaultSpdyRstStreamFrame(streamId, SpdyStreamStatus.PROTOCOL_ERROR); - ctx.writeAndFlush(spdyRstStreamFrame); - } - - } else if (msg instanceof SpdyHeadersFrame) { - - SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg; - int streamId = spdyHeadersFrame.streamId(); - FullHttpMessage fullHttpMessage = getMessage(streamId); - - if (fullHttpMessage == null) { - // HEADERS frames may initiate a pushed response - if (SpdyCodecUtil.isServerId(streamId)) { - - // If a client receives a HEADERS with a truncated header block, - // reply with a RST_STREAM frame with error code INTERNAL_ERROR. - if (spdyHeadersFrame.isTruncated()) { - SpdyRstStreamFrame spdyRstStreamFrame = - new DefaultSpdyRstStreamFrame(streamId, SpdyStreamStatus.INTERNAL_ERROR); - ctx.writeAndFlush(spdyRstStreamFrame); - return; - } - - try { - fullHttpMessage = createHttpResponse(spdyHeadersFrame, ctx.alloc(), validateHeaders); - - // Set the Stream-ID as a header - fullHttpMessage.headers().setInt(Names.STREAM_ID, streamId); - - if (spdyHeadersFrame.isLast()) { - HttpUtil.setContentLength(fullHttpMessage, 0); - out.add(fullHttpMessage); - } else { - // Response body will follow in a series of Data Frames - putMessage(streamId, fullHttpMessage); - } - } catch (Throwable t) { - // If a client receives a SYN_REPLY without valid getStatus and version headers - // the client must reply with a RST_STREAM frame indicating a PROTOCOL_ERROR - SpdyRstStreamFrame spdyRstStreamFrame = - new DefaultSpdyRstStreamFrame(streamId, SpdyStreamStatus.PROTOCOL_ERROR); - ctx.writeAndFlush(spdyRstStreamFrame); - } - } - return; - } - - // Ignore trailers in a truncated HEADERS frame. - if (!spdyHeadersFrame.isTruncated()) { - for (Map.Entry e: spdyHeadersFrame.headers()) { - fullHttpMessage.headers().add(e.getKey(), e.getValue()); - } - } - - if (spdyHeadersFrame.isLast()) { - HttpUtil.setContentLength(fullHttpMessage, fullHttpMessage.content().readableBytes()); - removeMessage(streamId); - out.add(fullHttpMessage); - } - - } else if (msg instanceof SpdyDataFrame) { - - SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg; - int streamId = spdyDataFrame.streamId(); - FullHttpMessage fullHttpMessage = getMessage(streamId); - - // If message is not in map discard Data Frame. - if (fullHttpMessage == null) { - return; - } - - ByteBuf content = fullHttpMessage.content(); - if (content.readableBytes() > maxContentLength - spdyDataFrame.content().readableBytes()) { - removeMessage(streamId); - throw new TooLongFrameException( - "HTTP content length exceeded " + maxContentLength + " bytes."); - } - - ByteBuf spdyDataFrameData = spdyDataFrame.content(); - int spdyDataFrameDataLen = spdyDataFrameData.readableBytes(); - content.writeBytes(spdyDataFrameData, spdyDataFrameData.readerIndex(), spdyDataFrameDataLen); - - if (spdyDataFrame.isLast()) { - HttpUtil.setContentLength(fullHttpMessage, content.readableBytes()); - removeMessage(streamId); - out.add(fullHttpMessage); - } - - } else if (msg instanceof SpdyRstStreamFrame) { - - SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg; - int streamId = spdyRstStreamFrame.streamId(); - removeMessage(streamId); - } - } - - private static FullHttpRequest createHttpRequest(SpdyHeadersFrame requestFrame, ByteBufAllocator alloc) - throws Exception { - // Create the first line of the request from the name/value pairs - SpdyHeaders headers = requestFrame.headers(); - HttpMethod method = HttpMethod.valueOf(headers.getAsString(METHOD)); - String url = headers.getAsString(PATH); - HttpVersion httpVersion = HttpVersion.valueOf(headers.getAsString(VERSION)); - headers.remove(METHOD); - headers.remove(PATH); - headers.remove(VERSION); - - boolean release = true; - ByteBuf buffer = alloc.buffer(); - try { - FullHttpRequest req = new DefaultFullHttpRequest(httpVersion, method, url, buffer); - - // Remove the scheme header - headers.remove(SCHEME); - - // Replace the SPDY host header with the HTTP host header - CharSequence host = headers.get(HOST); - headers.remove(HOST); - req.headers().set(HttpHeaderNames.HOST, host); - - for (Map.Entry e : requestFrame.headers()) { - req.headers().add(e.getKey(), e.getValue()); - } - - // The Connection and Keep-Alive headers are no longer valid - HttpUtil.setKeepAlive(req, true); - - // Transfer-Encoding header is not valid - req.headers().remove(HttpHeaderNames.TRANSFER_ENCODING); - release = false; - return req; - } finally { - if (release) { - buffer.release(); - } - } - } - - private static FullHttpResponse createHttpResponse(SpdyHeadersFrame responseFrame, ByteBufAllocator alloc, - boolean validateHeaders) throws Exception { - - // Create the first line of the response from the name/value pairs - SpdyHeaders headers = responseFrame.headers(); - HttpResponseStatus status = HttpResponseStatus.parseLine(headers.get(STATUS)); - HttpVersion version = HttpVersion.valueOf(headers.getAsString(VERSION)); - headers.remove(STATUS); - headers.remove(VERSION); - - boolean release = true; - ByteBuf buffer = alloc.buffer(); - try { - FullHttpResponse res = new DefaultFullHttpResponse(version, status, buffer, validateHeaders); - for (Map.Entry e: responseFrame.headers()) { - res.headers().add(e.getKey(), e.getValue()); - } - - // The Connection and Keep-Alive headers are no longer valid - HttpUtil.setKeepAlive(res, true); - - // Transfer-Encoding header is not valid - res.headers().remove(HttpHeaderNames.TRANSFER_ENCODING); - res.headers().remove(HttpHeaderNames.TRAILER); - - release = false; - return res; - } finally { - if (release) { - buffer.release(); - } - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpEncoder.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpEncoder.java deleted file mode 100644 index d3c5d582473..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpEncoder.java +++ /dev/null @@ -1,332 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToMessageEncoder; -import io.netty.handler.codec.UnsupportedMessageTypeException; -import io.netty.handler.codec.http.FullHttpMessage; -import io.netty.handler.codec.http.HttpContent; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaders; -import io.netty.handler.codec.http.HttpMessage; -import io.netty.handler.codec.http.HttpObject; -import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpResponse; -import io.netty.handler.codec.http.LastHttpContent; -import io.netty.util.AsciiString; - -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Map.Entry; - -/** - * Encodes {@link HttpRequest}s, {@link HttpResponse}s, and {@link HttpContent}s - * into {@link SpdySynStreamFrame}s and {@link SpdySynReplyFrame}s. - * - *

    Request Annotations

    - * - * SPDY specific headers must be added to {@link HttpRequest}s: - * - * - * - * - * - * - * - * - * - * - * - * - *
    Header NameHeader Value
    {@code "X-SPDY-Stream-ID"}The Stream-ID for this request. - * Stream-IDs must be odd, positive integers, and must increase monotonically.
    {@code "X-SPDY-Priority"}The priority value for this request. - * The priority should be between 0 and 7 inclusive. - * 0 represents the highest priority and 7 represents the lowest. - * This header is optional and defaults to 0.
    - * - *

    Response Annotations

    - * - * SPDY specific headers must be added to {@link HttpResponse}s: - * - * - * - * - * - * - * - * - *
    Header NameHeader Value
    {@code "X-SPDY-Stream-ID"}The Stream-ID of the request corresponding to this response.
    - * - *

    Pushed Resource Annotations

    - * - * SPDY specific headers must be added to pushed {@link HttpRequest}s: - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
    Header NameHeader Value
    {@code "X-SPDY-Stream-ID"}The Stream-ID for this resource. - * Stream-IDs must be even, positive integers, and must increase monotonically.
    {@code "X-SPDY-Associated-To-Stream-ID"}The Stream-ID of the request that initiated this pushed resource.
    {@code "X-SPDY-Priority"}The priority value for this resource. - * The priority should be between 0 and 7 inclusive. - * 0 represents the highest priority and 7 represents the lowest. - * This header is optional and defaults to 0.
    - * - *

    Required Annotations

    - * - * SPDY requires that all Requests and Pushed Resources contain - * an HTTP "Host" header. - * - *

    Optional Annotations

    - * - * Requests and Pushed Resources must contain a SPDY scheme header. - * This can be set via the {@code "X-SPDY-Scheme"} header but otherwise - * defaults to "https" as that is the most common SPDY deployment. - * - *

    Chunked Content

    - * - * This encoder associates all {@link HttpContent}s that it receives - * with the most recently received 'chunked' {@link HttpRequest} - * or {@link HttpResponse}. - * - *

    Pushed Resources

    - * - * All pushed resources should be sent before sending the response - * that corresponds to the initial request. - */ -public class SpdyHttpEncoder extends MessageToMessageEncoder { - - private int currentStreamId; - - private final boolean validateHeaders; - private final boolean headersToLowerCase; - - /** - * Creates a new instance. - * - * @param version the protocol version - */ - public SpdyHttpEncoder(SpdyVersion version) { - this(version, true, true); - } - - /** - * Creates a new instance. - * - * @param version the protocol version - * @param headersToLowerCase convert header names to lowercase. In a controlled environment, - * one can disable the conversion. - * @param validateHeaders validate the header names and values when adding them to the {@link SpdyHeaders} - */ - public SpdyHttpEncoder(SpdyVersion version, boolean headersToLowerCase, boolean validateHeaders) { - if (version == null) { - throw new NullPointerException("version"); - } - this.headersToLowerCase = headersToLowerCase; - this.validateHeaders = validateHeaders; - } - - @Override - protected void encode(ChannelHandlerContext ctx, HttpObject msg, List out) throws Exception { - - boolean valid = false; - boolean last = false; - - if (msg instanceof HttpRequest) { - - HttpRequest httpRequest = (HttpRequest) msg; - SpdySynStreamFrame spdySynStreamFrame = createSynStreamFrame(httpRequest); - out.add(spdySynStreamFrame); - - last = spdySynStreamFrame.isLast() || spdySynStreamFrame.isUnidirectional(); - valid = true; - } - if (msg instanceof HttpResponse) { - - HttpResponse httpResponse = (HttpResponse) msg; - SpdyHeadersFrame spdyHeadersFrame = createHeadersFrame(httpResponse); - out.add(spdyHeadersFrame); - - last = spdyHeadersFrame.isLast(); - valid = true; - } - if (msg instanceof HttpContent && !last) { - - HttpContent chunk = (HttpContent) msg; - - chunk.content().retain(); - SpdyDataFrame spdyDataFrame = new DefaultSpdyDataFrame(currentStreamId, chunk.content()); - if (chunk instanceof LastHttpContent) { - LastHttpContent trailer = (LastHttpContent) chunk; - HttpHeaders trailers = trailer.trailingHeaders(); - if (trailers.isEmpty()) { - spdyDataFrame.setLast(true); - out.add(spdyDataFrame); - } else { - // Create SPDY HEADERS frame out of trailers - SpdyHeadersFrame spdyHeadersFrame = new DefaultSpdyHeadersFrame(currentStreamId, validateHeaders); - spdyHeadersFrame.setLast(true); - Iterator> itr = trailers.iteratorCharSequence(); - while (itr.hasNext()) { - Map.Entry entry = itr.next(); - final CharSequence headerName = - headersToLowerCase ? AsciiString.of(entry.getKey()).toLowerCase() : entry.getKey(); - spdyHeadersFrame.headers().add(headerName, entry.getValue()); - } - - // Write DATA frame and append HEADERS frame - out.add(spdyDataFrame); - out.add(spdyHeadersFrame); - } - } else { - out.add(spdyDataFrame); - } - - valid = true; - } - - if (!valid) { - throw new UnsupportedMessageTypeException(msg); - } - } - - @SuppressWarnings("deprecation") - private SpdySynStreamFrame createSynStreamFrame(HttpRequest httpRequest) throws Exception { - // Get the Stream-ID, Associated-To-Stream-ID, Priority, and scheme from the headers - final HttpHeaders httpHeaders = httpRequest.headers(); - int streamId = httpHeaders.getInt(SpdyHttpHeaders.Names.STREAM_ID); - int associatedToStreamId = httpHeaders.getInt(SpdyHttpHeaders.Names.ASSOCIATED_TO_STREAM_ID, 0); - byte priority = (byte) httpHeaders.getInt(SpdyHttpHeaders.Names.PRIORITY, 0); - CharSequence scheme = httpHeaders.get(SpdyHttpHeaders.Names.SCHEME); - httpHeaders.remove(SpdyHttpHeaders.Names.STREAM_ID); - httpHeaders.remove(SpdyHttpHeaders.Names.ASSOCIATED_TO_STREAM_ID); - httpHeaders.remove(SpdyHttpHeaders.Names.PRIORITY); - httpHeaders.remove(SpdyHttpHeaders.Names.SCHEME); - - // The Connection, Keep-Alive, Proxy-Connection, and Transfer-Encoding - // headers are not valid and MUST not be sent. - httpHeaders.remove(HttpHeaderNames.CONNECTION); - httpHeaders.remove("Keep-Alive"); - httpHeaders.remove("Proxy-Connection"); - httpHeaders.remove(HttpHeaderNames.TRANSFER_ENCODING); - - SpdySynStreamFrame spdySynStreamFrame = - new DefaultSpdySynStreamFrame(streamId, associatedToStreamId, priority, validateHeaders); - - // Unfold the first line of the message into name/value pairs - SpdyHeaders frameHeaders = spdySynStreamFrame.headers(); - frameHeaders.set(SpdyHeaders.HttpNames.METHOD, httpRequest.method().name()); - frameHeaders.set(SpdyHeaders.HttpNames.PATH, httpRequest.uri()); - frameHeaders.set(SpdyHeaders.HttpNames.VERSION, httpRequest.protocolVersion().text()); - - // Replace the HTTP host header with the SPDY host header - CharSequence host = httpHeaders.get(HttpHeaderNames.HOST); - httpHeaders.remove(HttpHeaderNames.HOST); - frameHeaders.set(SpdyHeaders.HttpNames.HOST, host); - - // Set the SPDY scheme header - if (scheme == null) { - scheme = "https"; - } - frameHeaders.set(SpdyHeaders.HttpNames.SCHEME, scheme); - - // Transfer the remaining HTTP headers - Iterator> itr = httpHeaders.iteratorCharSequence(); - while (itr.hasNext()) { - Map.Entry entry = itr.next(); - final CharSequence headerName = - headersToLowerCase ? AsciiString.of(entry.getKey()).toLowerCase() : entry.getKey(); - frameHeaders.add(headerName, entry.getValue()); - } - currentStreamId = spdySynStreamFrame.streamId(); - if (associatedToStreamId == 0) { - spdySynStreamFrame.setLast(isLast(httpRequest)); - } else { - spdySynStreamFrame.setUnidirectional(true); - } - - return spdySynStreamFrame; - } - - @SuppressWarnings("deprecation") - private SpdyHeadersFrame createHeadersFrame(HttpResponse httpResponse) throws Exception { - // Get the Stream-ID from the headers - final HttpHeaders httpHeaders = httpResponse.headers(); - int streamId = httpHeaders.getInt(SpdyHttpHeaders.Names.STREAM_ID); - httpHeaders.remove(SpdyHttpHeaders.Names.STREAM_ID); - - // The Connection, Keep-Alive, Proxy-Connection, and Transfer-Encoding - // headers are not valid and MUST not be sent. - httpHeaders.remove(HttpHeaderNames.CONNECTION); - httpHeaders.remove("Keep-Alive"); - httpHeaders.remove("Proxy-Connection"); - httpHeaders.remove(HttpHeaderNames.TRANSFER_ENCODING); - - SpdyHeadersFrame spdyHeadersFrame; - if (SpdyCodecUtil.isServerId(streamId)) { - spdyHeadersFrame = new DefaultSpdyHeadersFrame(streamId, validateHeaders); - } else { - spdyHeadersFrame = new DefaultSpdySynReplyFrame(streamId, validateHeaders); - } - SpdyHeaders frameHeaders = spdyHeadersFrame.headers(); - // Unfold the first line of the response into name/value pairs - frameHeaders.set(SpdyHeaders.HttpNames.STATUS, httpResponse.status().codeAsText()); - frameHeaders.set(SpdyHeaders.HttpNames.VERSION, httpResponse.protocolVersion().text()); - - // Transfer the remaining HTTP headers - Iterator> itr = httpHeaders.iteratorCharSequence(); - while (itr.hasNext()) { - Map.Entry entry = itr.next(); - final CharSequence headerName = - headersToLowerCase ? AsciiString.of(entry.getKey()).toLowerCase() : entry.getKey(); - spdyHeadersFrame.headers().add(headerName, entry.getValue()); - } - - currentStreamId = streamId; - spdyHeadersFrame.setLast(isLast(httpResponse)); - - return spdyHeadersFrame; - } - - /** - * Checks if the given HTTP message should be considered as a last SPDY frame. - * - * @param httpMessage check this HTTP message - * @return whether the given HTTP message should generate a last SPDY frame. - */ - private static boolean isLast(HttpMessage httpMessage) { - if (httpMessage instanceof FullHttpMessage) { - FullHttpMessage fullMessage = (FullHttpMessage) httpMessage; - if (fullMessage.trailingHeaders().isEmpty() && !fullMessage.content().isReadable()) { - return true; - } - } - - return false; - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpHeaders.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpHeaders.java deleted file mode 100644 index 92d5fa835b9..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpHeaders.java +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.util.AsciiString; - -/** - * Provides the constants for the header names and the utility methods - * used by the {@link SpdyHttpDecoder} and {@link SpdyHttpEncoder}. - */ -public final class SpdyHttpHeaders { - - /** - * SPDY HTTP header names - */ - public static final class Names { - /** - * {@code "x-spdy-stream-id"} - */ - public static final AsciiString STREAM_ID = AsciiString.cached("x-spdy-stream-id"); - /** - * {@code "x-spdy-associated-to-stream-id"} - */ - public static final AsciiString ASSOCIATED_TO_STREAM_ID = AsciiString.cached("x-spdy-associated-to-stream-id"); - /** - * {@code "x-spdy-priority"} - */ - public static final AsciiString PRIORITY = AsciiString.cached("x-spdy-priority"); - /** - * {@code "x-spdy-scheme"} - */ - public static final AsciiString SCHEME = AsciiString.cached("x-spdy-scheme"); - - private Names() { } - } - - private SpdyHttpHeaders() { } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpResponseStreamIdHandler.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpResponseStreamIdHandler.java deleted file mode 100644 index 4ad32e4dc2d..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyHttpResponseStreamIdHandler.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.MessageToMessageCodec; -import io.netty.handler.codec.http.HttpMessage; -import io.netty.handler.codec.spdy.SpdyHttpHeaders.Names; -import io.netty.util.ReferenceCountUtil; - -import java.util.LinkedList; -import java.util.List; -import java.util.Queue; - -/** - * {@link MessageToMessageCodec} that takes care of adding the right {@link SpdyHttpHeaders.Names#STREAM_ID} to the - * {@link HttpMessage} if one is not present. This makes it possible to just re-use plan handlers current used - * for HTTP. - */ -public class SpdyHttpResponseStreamIdHandler extends - MessageToMessageCodec { - private static final Integer NO_ID = -1; - private final Queue ids = new LinkedList(); - - @Override - public boolean acceptInboundMessage(Object msg) throws Exception { - return msg instanceof HttpMessage || msg instanceof SpdyRstStreamFrame; - } - - @Override - protected void encode(ChannelHandlerContext ctx, HttpMessage msg, List out) throws Exception { - Integer id = ids.poll(); - if (id != null && id.intValue() != NO_ID && !msg.headers().contains(SpdyHttpHeaders.Names.STREAM_ID)) { - msg.headers().setInt(Names.STREAM_ID, id); - } - - out.add(ReferenceCountUtil.retain(msg)); - } - - @Override - protected void decode(ChannelHandlerContext ctx, Object msg, List out) throws Exception { - if (msg instanceof HttpMessage) { - boolean contains = ((HttpMessage) msg).headers().contains(SpdyHttpHeaders.Names.STREAM_ID); - if (!contains) { - ids.add(NO_ID); - } else { - ids.add(((HttpMessage) msg).headers().getInt(Names.STREAM_ID)); - } - } else if (msg instanceof SpdyRstStreamFrame) { - ids.remove(((SpdyRstStreamFrame) msg).streamId()); - } - - out.add(ReferenceCountUtil.retain(msg)); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyPingFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyPingFrame.java deleted file mode 100644 index fc124f8d311..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyPingFrame.java +++ /dev/null @@ -1,32 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * A SPDY Protocol PING Frame - */ -public interface SpdyPingFrame extends SpdyFrame { - - /** - * Returns the ID of this frame. - */ - int id(); - - /** - * Sets the ID of this frame. - */ - SpdyPingFrame setId(int id); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyProtocolException.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyProtocolException.java deleted file mode 100644 index 2b5bcbcec2f..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyProtocolException.java +++ /dev/null @@ -1,47 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -public class SpdyProtocolException extends Exception { - - private static final long serialVersionUID = 7870000537743847264L; - - /** - * Creates a new instance. - */ - public SpdyProtocolException() { } - - /** - * Creates a new instance. - */ - public SpdyProtocolException(String message, Throwable cause) { - super(message, cause); - } - - /** - * Creates a new instance. - */ - public SpdyProtocolException(String message) { - super(message); - } - - /** - * Creates a new instance. - */ - public SpdyProtocolException(Throwable cause) { - super(cause); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyRstStreamFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyRstStreamFrame.java deleted file mode 100644 index 44cadb2c608..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyRstStreamFrame.java +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * A SPDY Protocol RST_STREAM Frame - */ -public interface SpdyRstStreamFrame extends SpdyStreamFrame { - - /** - * Returns the status of this frame. - */ - SpdyStreamStatus status(); - - /** - * Sets the status of this frame. - */ - SpdyRstStreamFrame setStatus(SpdyStreamStatus status); - - @Override - SpdyRstStreamFrame setStreamId(int streamId); - - @Override - SpdyRstStreamFrame setLast(boolean last); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySession.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySession.java deleted file mode 100644 index e34dc27f6d5..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySession.java +++ /dev/null @@ -1,357 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.channel.ChannelPromise; -import io.netty.util.internal.PlatformDependent; - -import java.util.Comparator; -import java.util.Map; -import java.util.Queue; -import java.util.TreeMap; -import java.util.concurrent.ConcurrentLinkedQueue; -import java.util.concurrent.atomic.AtomicInteger; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.*; - -final class SpdySession { - - private final AtomicInteger activeLocalStreams = new AtomicInteger(); - private final AtomicInteger activeRemoteStreams = new AtomicInteger(); - private final Map activeStreams = PlatformDependent.newConcurrentHashMap(); - private final StreamComparator streamComparator = new StreamComparator(); - private final AtomicInteger sendWindowSize; - private final AtomicInteger receiveWindowSize; - - SpdySession(int sendWindowSize, int receiveWindowSize) { - this.sendWindowSize = new AtomicInteger(sendWindowSize); - this.receiveWindowSize = new AtomicInteger(receiveWindowSize); - } - - int numActiveStreams(boolean remote) { - if (remote) { - return activeRemoteStreams.get(); - } else { - return activeLocalStreams.get(); - } - } - - boolean noActiveStreams() { - return activeStreams.isEmpty(); - } - - boolean isActiveStream(int streamId) { - return activeStreams.containsKey(streamId); - } - - // Stream-IDs should be iterated in priority order - Map activeStreams() { - Map streams = new TreeMap(streamComparator); - streams.putAll(activeStreams); - return streams; - } - - void acceptStream( - int streamId, byte priority, boolean remoteSideClosed, boolean localSideClosed, - int sendWindowSize, int receiveWindowSize, boolean remote) { - if (!remoteSideClosed || !localSideClosed) { - StreamState state = activeStreams.put(streamId, new StreamState( - priority, remoteSideClosed, localSideClosed, sendWindowSize, receiveWindowSize)); - if (state == null) { - if (remote) { - activeRemoteStreams.incrementAndGet(); - } else { - activeLocalStreams.incrementAndGet(); - } - } - } - } - - private StreamState removeActiveStream(int streamId, boolean remote) { - StreamState state = activeStreams.remove(streamId); - if (state != null) { - if (remote) { - activeRemoteStreams.decrementAndGet(); - } else { - activeLocalStreams.decrementAndGet(); - } - } - return state; - } - - void removeStream(int streamId, Throwable cause, boolean remote) { - StreamState state = removeActiveStream(streamId, remote); - if (state != null) { - state.clearPendingWrites(cause); - } - } - - boolean isRemoteSideClosed(int streamId) { - StreamState state = activeStreams.get(streamId); - return state == null || state.isRemoteSideClosed(); - } - - void closeRemoteSide(int streamId, boolean remote) { - StreamState state = activeStreams.get(streamId); - if (state != null) { - state.closeRemoteSide(); - if (state.isLocalSideClosed()) { - removeActiveStream(streamId, remote); - } - } - } - - boolean isLocalSideClosed(int streamId) { - StreamState state = activeStreams.get(streamId); - return state == null || state.isLocalSideClosed(); - } - - void closeLocalSide(int streamId, boolean remote) { - StreamState state = activeStreams.get(streamId); - if (state != null) { - state.closeLocalSide(); - if (state.isRemoteSideClosed()) { - removeActiveStream(streamId, remote); - } - } - } - - /* - * hasReceivedReply and receivedReply are only called from channelRead() - * no need to synchronize access to the StreamState - */ - boolean hasReceivedReply(int streamId) { - StreamState state = activeStreams.get(streamId); - return state != null && state.hasReceivedReply(); - } - - void receivedReply(int streamId) { - StreamState state = activeStreams.get(streamId); - if (state != null) { - state.receivedReply(); - } - } - - int getSendWindowSize(int streamId) { - if (streamId == SPDY_SESSION_STREAM_ID) { - return sendWindowSize.get(); - } - - StreamState state = activeStreams.get(streamId); - return state != null ? state.getSendWindowSize() : -1; - } - - int updateSendWindowSize(int streamId, int deltaWindowSize) { - if (streamId == SPDY_SESSION_STREAM_ID) { - return sendWindowSize.addAndGet(deltaWindowSize); - } - - StreamState state = activeStreams.get(streamId); - return state != null ? state.updateSendWindowSize(deltaWindowSize) : -1; - } - - int updateReceiveWindowSize(int streamId, int deltaWindowSize) { - if (streamId == SPDY_SESSION_STREAM_ID) { - return receiveWindowSize.addAndGet(deltaWindowSize); - } - - StreamState state = activeStreams.get(streamId); - if (state == null) { - return -1; - } - if (deltaWindowSize > 0) { - state.setReceiveWindowSizeLowerBound(0); - } - return state.updateReceiveWindowSize(deltaWindowSize); - } - - int getReceiveWindowSizeLowerBound(int streamId) { - if (streamId == SPDY_SESSION_STREAM_ID) { - return 0; - } - - StreamState state = activeStreams.get(streamId); - return state != null ? state.getReceiveWindowSizeLowerBound() : 0; - } - - void updateAllSendWindowSizes(int deltaWindowSize) { - for (StreamState state: activeStreams.values()) { - state.updateSendWindowSize(deltaWindowSize); - } - } - - void updateAllReceiveWindowSizes(int deltaWindowSize) { - for (StreamState state: activeStreams.values()) { - state.updateReceiveWindowSize(deltaWindowSize); - if (deltaWindowSize < 0) { - state.setReceiveWindowSizeLowerBound(deltaWindowSize); - } - } - } - - boolean putPendingWrite(int streamId, PendingWrite pendingWrite) { - StreamState state = activeStreams.get(streamId); - return state != null && state.putPendingWrite(pendingWrite); - } - - PendingWrite getPendingWrite(int streamId) { - if (streamId == SPDY_SESSION_STREAM_ID) { - for (Map.Entry e: activeStreams().entrySet()) { - StreamState state = e.getValue(); - if (state.getSendWindowSize() > 0) { - PendingWrite pendingWrite = state.getPendingWrite(); - if (pendingWrite != null) { - return pendingWrite; - } - } - } - return null; - } - - StreamState state = activeStreams.get(streamId); - return state != null ? state.getPendingWrite() : null; - } - - PendingWrite removePendingWrite(int streamId) { - StreamState state = activeStreams.get(streamId); - return state != null ? state.removePendingWrite() : null; - } - - private static final class StreamState { - - private final byte priority; - private boolean remoteSideClosed; - private boolean localSideClosed; - private boolean receivedReply; - private final AtomicInteger sendWindowSize; - private final AtomicInteger receiveWindowSize; - private int receiveWindowSizeLowerBound; - private final Queue pendingWriteQueue = new ConcurrentLinkedQueue(); - - StreamState( - byte priority, boolean remoteSideClosed, boolean localSideClosed, - int sendWindowSize, int receiveWindowSize) { - this.priority = priority; - this.remoteSideClosed = remoteSideClosed; - this.localSideClosed = localSideClosed; - this.sendWindowSize = new AtomicInteger(sendWindowSize); - this.receiveWindowSize = new AtomicInteger(receiveWindowSize); - } - - byte getPriority() { - return priority; - } - - boolean isRemoteSideClosed() { - return remoteSideClosed; - } - - void closeRemoteSide() { - remoteSideClosed = true; - } - - boolean isLocalSideClosed() { - return localSideClosed; - } - - void closeLocalSide() { - localSideClosed = true; - } - - boolean hasReceivedReply() { - return receivedReply; - } - - void receivedReply() { - receivedReply = true; - } - - int getSendWindowSize() { - return sendWindowSize.get(); - } - - int updateSendWindowSize(int deltaWindowSize) { - return sendWindowSize.addAndGet(deltaWindowSize); - } - - int updateReceiveWindowSize(int deltaWindowSize) { - return receiveWindowSize.addAndGet(deltaWindowSize); - } - - int getReceiveWindowSizeLowerBound() { - return receiveWindowSizeLowerBound; - } - - void setReceiveWindowSizeLowerBound(int receiveWindowSizeLowerBound) { - this.receiveWindowSizeLowerBound = receiveWindowSizeLowerBound; - } - - boolean putPendingWrite(PendingWrite msg) { - return pendingWriteQueue.offer(msg); - } - - PendingWrite getPendingWrite() { - return pendingWriteQueue.peek(); - } - - PendingWrite removePendingWrite() { - return pendingWriteQueue.poll(); - } - - void clearPendingWrites(Throwable cause) { - for (;;) { - PendingWrite pendingWrite = pendingWriteQueue.poll(); - if (pendingWrite == null) { - break; - } - pendingWrite.fail(cause); - } - } - } - - private final class StreamComparator implements Comparator { - - StreamComparator() { } - - @Override - public int compare(Integer id1, Integer id2) { - StreamState state1 = activeStreams.get(id1); - StreamState state2 = activeStreams.get(id2); - - int result = state1.getPriority() - state2.getPriority(); - if (result != 0) { - return result; - } - - return id1 - id2; - } - } - - public static final class PendingWrite { - final SpdyDataFrame spdyDataFrame; - final ChannelPromise promise; - - PendingWrite(SpdyDataFrame spdyDataFrame, ChannelPromise promise) { - this.spdyDataFrame = spdyDataFrame; - this.promise = promise; - } - - void fail(Throwable cause) { - spdyDataFrame.release(); - promise.setFailure(cause); - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySessionHandler.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySessionHandler.java deleted file mode 100644 index 394f6c2e9a3..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySessionHandler.java +++ /dev/null @@ -1,858 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.channel.ChannelDuplexHandler; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; -import io.netty.util.internal.ThrowableUtil; - -import java.util.concurrent.atomic.AtomicInteger; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_SESSION_STREAM_ID; -import static io.netty.handler.codec.spdy.SpdyCodecUtil.isServerId; - -/** - * Manages streams within a SPDY session. - */ -public class SpdySessionHandler extends ChannelDuplexHandler { - - private static final SpdyProtocolException PROTOCOL_EXCEPTION = ThrowableUtil.unknownStackTrace( - new SpdyProtocolException(), SpdySessionHandler.class, "handleOutboundMessage(...)"); - private static final SpdyProtocolException STREAM_CLOSED = ThrowableUtil.unknownStackTrace( - new SpdyProtocolException("Stream closed"), SpdySessionHandler.class, "removeStream(...)"); - - private static final int DEFAULT_WINDOW_SIZE = 64 * 1024; // 64 KB default initial window size - private int initialSendWindowSize = DEFAULT_WINDOW_SIZE; - private int initialReceiveWindowSize = DEFAULT_WINDOW_SIZE; - private volatile int initialSessionReceiveWindowSize = DEFAULT_WINDOW_SIZE; - - private final SpdySession spdySession = new SpdySession(initialSendWindowSize, initialReceiveWindowSize); - private int lastGoodStreamId; - - private static final int DEFAULT_MAX_CONCURRENT_STREAMS = Integer.MAX_VALUE; - private int remoteConcurrentStreams = DEFAULT_MAX_CONCURRENT_STREAMS; - private int localConcurrentStreams = DEFAULT_MAX_CONCURRENT_STREAMS; - - private final AtomicInteger pings = new AtomicInteger(); - - private boolean sentGoAwayFrame; - private boolean receivedGoAwayFrame; - - private ChannelFutureListener closeSessionFutureListener; - - private final boolean server; - private final int minorVersion; - - /** - * Creates a new session handler. - * - * @param version the protocol version - * @param server {@code true} if and only if this session handler should - * handle the server endpoint of the connection. - * {@code false} if and only if this session handler should - * handle the client endpoint of the connection. - */ - public SpdySessionHandler(SpdyVersion version, boolean server) { - if (version == null) { - throw new NullPointerException("version"); - } - this.server = server; - minorVersion = version.getMinorVersion(); - } - - public void setSessionReceiveWindowSize(int sessionReceiveWindowSize) { - if (sessionReceiveWindowSize < 0) { - throw new IllegalArgumentException("sessionReceiveWindowSize"); - } - // This will not send a window update frame immediately. - // If this value increases the allowed receive window size, - // a WINDOW_UPDATE frame will be sent when only half of the - // session window size remains during data frame processing. - // If this value decreases the allowed receive window size, - // the window will be reduced as data frames are processed. - initialSessionReceiveWindowSize = sessionReceiveWindowSize; - } - - @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - if (msg instanceof SpdyDataFrame) { - - /* - * SPDY Data frame processing requirements: - * - * If an endpoint receives a data frame for a Stream-ID which is not open - * and the endpoint has not sent a GOAWAY frame, it must issue a stream error - * with the error code INVALID_STREAM for the Stream-ID. - * - * If an endpoint which created the stream receives a data frame before receiving - * a SYN_REPLY on that stream, it is a protocol error, and the recipient must - * issue a stream error with the getStatus code PROTOCOL_ERROR for the Stream-ID. - * - * If an endpoint receives multiple data frames for invalid Stream-IDs, - * it may close the session. - * - * If an endpoint refuses a stream it must ignore any data frames for that stream. - * - * If an endpoint receives a data frame after the stream is half-closed from the - * sender, it must send a RST_STREAM frame with the getStatus STREAM_ALREADY_CLOSED. - * - * If an endpoint receives a data frame after the stream is closed, it must send - * a RST_STREAM frame with the getStatus PROTOCOL_ERROR. - */ - SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg; - int streamId = spdyDataFrame.streamId(); - - int deltaWindowSize = -1 * spdyDataFrame.content().readableBytes(); - int newSessionWindowSize = - spdySession.updateReceiveWindowSize(SPDY_SESSION_STREAM_ID, deltaWindowSize); - - // Check if session window size is reduced beyond allowable lower bound - if (newSessionWindowSize < 0) { - issueSessionError(ctx, SpdySessionStatus.PROTOCOL_ERROR); - return; - } - - // Send a WINDOW_UPDATE frame if less than half the session window size remains - if (newSessionWindowSize <= initialSessionReceiveWindowSize / 2) { - int sessionDeltaWindowSize = initialSessionReceiveWindowSize - newSessionWindowSize; - spdySession.updateReceiveWindowSize(SPDY_SESSION_STREAM_ID, sessionDeltaWindowSize); - SpdyWindowUpdateFrame spdyWindowUpdateFrame = - new DefaultSpdyWindowUpdateFrame(SPDY_SESSION_STREAM_ID, sessionDeltaWindowSize); - ctx.writeAndFlush(spdyWindowUpdateFrame); - } - - // Check if we received a data frame for a Stream-ID which is not open - - if (!spdySession.isActiveStream(streamId)) { - spdyDataFrame.release(); - if (streamId <= lastGoodStreamId) { - issueStreamError(ctx, streamId, SpdyStreamStatus.PROTOCOL_ERROR); - } else if (!sentGoAwayFrame) { - issueStreamError(ctx, streamId, SpdyStreamStatus.INVALID_STREAM); - } - return; - } - - // Check if we received a data frame for a stream which is half-closed - - if (spdySession.isRemoteSideClosed(streamId)) { - spdyDataFrame.release(); - issueStreamError(ctx, streamId, SpdyStreamStatus.STREAM_ALREADY_CLOSED); - return; - } - - // Check if we received a data frame before receiving a SYN_REPLY - if (!isRemoteInitiatedId(streamId) && !spdySession.hasReceivedReply(streamId)) { - spdyDataFrame.release(); - issueStreamError(ctx, streamId, SpdyStreamStatus.PROTOCOL_ERROR); - return; - } - - /* - * SPDY Data frame flow control processing requirements: - * - * Recipient should not send a WINDOW_UPDATE frame as it consumes the last data frame. - */ - - // Update receive window size - int newWindowSize = spdySession.updateReceiveWindowSize(streamId, deltaWindowSize); - - // Window size can become negative if we sent a SETTINGS frame that reduces the - // size of the transfer window after the peer has written data frames. - // The value is bounded by the length that SETTINGS frame decrease the window. - // This difference is stored for the session when writing the SETTINGS frame - // and is cleared once we send a WINDOW_UPDATE frame. - if (newWindowSize < spdySession.getReceiveWindowSizeLowerBound(streamId)) { - spdyDataFrame.release(); - issueStreamError(ctx, streamId, SpdyStreamStatus.FLOW_CONTROL_ERROR); - return; - } - - // Window size became negative due to sender writing frame before receiving SETTINGS - // Send data frames upstream in initialReceiveWindowSize chunks - if (newWindowSize < 0) { - while (spdyDataFrame.content().readableBytes() > initialReceiveWindowSize) { - SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame( - streamId, spdyDataFrame.content().readRetainedSlice(initialReceiveWindowSize)); - ctx.writeAndFlush(partialDataFrame); - } - } - - // Send a WINDOW_UPDATE frame if less than half the stream window size remains - if (newWindowSize <= initialReceiveWindowSize / 2 && !spdyDataFrame.isLast()) { - int streamDeltaWindowSize = initialReceiveWindowSize - newWindowSize; - spdySession.updateReceiveWindowSize(streamId, streamDeltaWindowSize); - SpdyWindowUpdateFrame spdyWindowUpdateFrame = - new DefaultSpdyWindowUpdateFrame(streamId, streamDeltaWindowSize); - ctx.writeAndFlush(spdyWindowUpdateFrame); - } - - // Close the remote side of the stream if this is the last frame - if (spdyDataFrame.isLast()) { - halfCloseStream(streamId, true, ctx.newSucceededFuture()); - } - - } else if (msg instanceof SpdySynStreamFrame) { - - /* - * SPDY SYN_STREAM frame processing requirements: - * - * If an endpoint receives a SYN_STREAM with a Stream-ID that is less than - * any previously received SYN_STREAM, it must issue a session error with - * the getStatus PROTOCOL_ERROR. - * - * If an endpoint receives multiple SYN_STREAM frames with the same active - * Stream-ID, it must issue a stream error with the getStatus code PROTOCOL_ERROR. - * - * The recipient can reject a stream by sending a stream error with the - * getStatus code REFUSED_STREAM. - */ - - SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg; - int streamId = spdySynStreamFrame.streamId(); - - // Check if we received a valid SYN_STREAM frame - if (spdySynStreamFrame.isInvalid() || - !isRemoteInitiatedId(streamId) || - spdySession.isActiveStream(streamId)) { - issueStreamError(ctx, streamId, SpdyStreamStatus.PROTOCOL_ERROR); - return; - } - - // Stream-IDs must be monotonically increasing - if (streamId <= lastGoodStreamId) { - issueSessionError(ctx, SpdySessionStatus.PROTOCOL_ERROR); - return; - } - - // Try to accept the stream - byte priority = spdySynStreamFrame.priority(); - boolean remoteSideClosed = spdySynStreamFrame.isLast(); - boolean localSideClosed = spdySynStreamFrame.isUnidirectional(); - if (!acceptStream(streamId, priority, remoteSideClosed, localSideClosed)) { - issueStreamError(ctx, streamId, SpdyStreamStatus.REFUSED_STREAM); - return; - } - - } else if (msg instanceof SpdySynReplyFrame) { - - /* - * SPDY SYN_REPLY frame processing requirements: - * - * If an endpoint receives multiple SYN_REPLY frames for the same active Stream-ID - * it must issue a stream error with the getStatus code STREAM_IN_USE. - */ - - SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg; - int streamId = spdySynReplyFrame.streamId(); - - // Check if we received a valid SYN_REPLY frame - if (spdySynReplyFrame.isInvalid() || - isRemoteInitiatedId(streamId) || - spdySession.isRemoteSideClosed(streamId)) { - issueStreamError(ctx, streamId, SpdyStreamStatus.INVALID_STREAM); - return; - } - - // Check if we have received multiple frames for the same Stream-ID - if (spdySession.hasReceivedReply(streamId)) { - issueStreamError(ctx, streamId, SpdyStreamStatus.STREAM_IN_USE); - return; - } - - spdySession.receivedReply(streamId); - - // Close the remote side of the stream if this is the last frame - if (spdySynReplyFrame.isLast()) { - halfCloseStream(streamId, true, ctx.newSucceededFuture()); - } - - } else if (msg instanceof SpdyRstStreamFrame) { - - /* - * SPDY RST_STREAM frame processing requirements: - * - * After receiving a RST_STREAM on a stream, the receiver must not send - * additional frames on that stream. - * - * An endpoint must not send a RST_STREAM in response to a RST_STREAM. - */ - - SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg; - removeStream(spdyRstStreamFrame.streamId(), ctx.newSucceededFuture()); - - } else if (msg instanceof SpdySettingsFrame) { - - SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg; - - int settingsMinorVersion = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MINOR_VERSION); - if (settingsMinorVersion >= 0 && settingsMinorVersion != minorVersion) { - // Settings frame had the wrong minor version - issueSessionError(ctx, SpdySessionStatus.PROTOCOL_ERROR); - return; - } - - int newConcurrentStreams = - spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS); - if (newConcurrentStreams >= 0) { - remoteConcurrentStreams = newConcurrentStreams; - } - - // Persistence flag are inconsistent with the use of SETTINGS to communicate - // the initial window size. Remove flags from the sender requesting that the - // value be persisted. Remove values that the sender indicates are persisted. - if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) { - spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE); - } - spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false); - - int newInitialWindowSize = - spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE); - if (newInitialWindowSize >= 0) { - updateInitialSendWindowSize(newInitialWindowSize); - } - - } else if (msg instanceof SpdyPingFrame) { - - /* - * SPDY PING frame processing requirements: - * - * Receivers of a PING frame should send an identical frame to the sender - * as soon as possible. - * - * Receivers of a PING frame must ignore frames that it did not initiate - */ - - SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg; - - if (isRemoteInitiatedId(spdyPingFrame.id())) { - ctx.writeAndFlush(spdyPingFrame); - return; - } - - // Note: only checks that there are outstanding pings since uniqueness is not enforced - if (pings.get() == 0) { - return; - } - pings.getAndDecrement(); - - } else if (msg instanceof SpdyGoAwayFrame) { - - receivedGoAwayFrame = true; - - } else if (msg instanceof SpdyHeadersFrame) { - - SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg; - int streamId = spdyHeadersFrame.streamId(); - - // Check if we received a valid HEADERS frame - if (spdyHeadersFrame.isInvalid()) { - issueStreamError(ctx, streamId, SpdyStreamStatus.PROTOCOL_ERROR); - return; - } - - if (spdySession.isRemoteSideClosed(streamId)) { - issueStreamError(ctx, streamId, SpdyStreamStatus.INVALID_STREAM); - return; - } - - // Close the remote side of the stream if this is the last frame - if (spdyHeadersFrame.isLast()) { - halfCloseStream(streamId, true, ctx.newSucceededFuture()); - } - - } else if (msg instanceof SpdyWindowUpdateFrame) { - - /* - * SPDY WINDOW_UPDATE frame processing requirements: - * - * Receivers of a WINDOW_UPDATE that cause the window size to exceed 2^31 - * must send a RST_STREAM with the getStatus code FLOW_CONTROL_ERROR. - * - * Sender should ignore all WINDOW_UPDATE frames associated with a stream - * after sending the last frame for the stream. - */ - - SpdyWindowUpdateFrame spdyWindowUpdateFrame = (SpdyWindowUpdateFrame) msg; - int streamId = spdyWindowUpdateFrame.streamId(); - int deltaWindowSize = spdyWindowUpdateFrame.deltaWindowSize(); - - // Ignore frames for half-closed streams - if (streamId != SPDY_SESSION_STREAM_ID && spdySession.isLocalSideClosed(streamId)) { - return; - } - - // Check for numerical overflow - if (spdySession.getSendWindowSize(streamId) > Integer.MAX_VALUE - deltaWindowSize) { - if (streamId == SPDY_SESSION_STREAM_ID) { - issueSessionError(ctx, SpdySessionStatus.PROTOCOL_ERROR); - } else { - issueStreamError(ctx, streamId, SpdyStreamStatus.FLOW_CONTROL_ERROR); - } - return; - } - - updateSendWindowSize(ctx, streamId, deltaWindowSize); - } - - ctx.fireChannelRead(msg); - } - - @Override - public void channelInactive(ChannelHandlerContext ctx) throws Exception { - for (Integer streamId: spdySession.activeStreams().keySet()) { - removeStream(streamId, ctx.newSucceededFuture()); - } - ctx.fireChannelInactive(); - } - - @Override - public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) throws Exception { - if (cause instanceof SpdyProtocolException) { - issueSessionError(ctx, SpdySessionStatus.PROTOCOL_ERROR); - } - - ctx.fireExceptionCaught(cause); - } - - @Override - public void close(ChannelHandlerContext ctx, ChannelPromise promise) throws Exception { - sendGoAwayFrame(ctx, promise); - } - - @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { - if (msg instanceof SpdyDataFrame || - msg instanceof SpdySynStreamFrame || - msg instanceof SpdySynReplyFrame || - msg instanceof SpdyRstStreamFrame || - msg instanceof SpdySettingsFrame || - msg instanceof SpdyPingFrame || - msg instanceof SpdyGoAwayFrame || - msg instanceof SpdyHeadersFrame || - msg instanceof SpdyWindowUpdateFrame) { - - handleOutboundMessage(ctx, msg, promise); - } else { - ctx.write(msg, promise); - } - } - - private void handleOutboundMessage(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { - if (msg instanceof SpdyDataFrame) { - - SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg; - int streamId = spdyDataFrame.streamId(); - - // Frames must not be sent on half-closed streams - if (spdySession.isLocalSideClosed(streamId)) { - spdyDataFrame.release(); - promise.setFailure(PROTOCOL_EXCEPTION); - return; - } - - /* - * SPDY Data frame flow control processing requirements: - * - * Sender must not send a data frame with data length greater - * than the transfer window size. - * - * After sending each data frame, the sender decrements its - * transfer window size by the amount of data transmitted. - * - * When the window size becomes less than or equal to 0, the - * sender must pause transmitting data frames. - */ - - int dataLength = spdyDataFrame.content().readableBytes(); - int sendWindowSize = spdySession.getSendWindowSize(streamId); - int sessionSendWindowSize = spdySession.getSendWindowSize(SPDY_SESSION_STREAM_ID); - sendWindowSize = Math.min(sendWindowSize, sessionSendWindowSize); - - if (sendWindowSize <= 0) { - // Stream is stalled -- enqueue Data frame and return - spdySession.putPendingWrite(streamId, new SpdySession.PendingWrite(spdyDataFrame, promise)); - return; - } else if (sendWindowSize < dataLength) { - // Stream is not stalled but we cannot send the entire frame - spdySession.updateSendWindowSize(streamId, -1 * sendWindowSize); - spdySession.updateSendWindowSize(SPDY_SESSION_STREAM_ID, -1 * sendWindowSize); - - // Create a partial data frame whose length is the current window size - SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame( - streamId, spdyDataFrame.content().readRetainedSlice(sendWindowSize)); - - // Enqueue the remaining data (will be the first frame queued) - spdySession.putPendingWrite(streamId, new SpdySession.PendingWrite(spdyDataFrame, promise)); - - // The transfer window size is pre-decremented when sending a data frame downstream. - // Close the session on write failures that leave the transfer window in a corrupt state. - final ChannelHandlerContext context = ctx; - ctx.write(partialDataFrame).addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { - issueSessionError(context, SpdySessionStatus.INTERNAL_ERROR); - } - } - }); - return; - } else { - // Window size is large enough to send entire data frame - spdySession.updateSendWindowSize(streamId, -1 * dataLength); - spdySession.updateSendWindowSize(SPDY_SESSION_STREAM_ID, -1 * dataLength); - - // The transfer window size is pre-decremented when sending a data frame downstream. - // Close the session on write failures that leave the transfer window in a corrupt state. - final ChannelHandlerContext context = ctx; - promise.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { - issueSessionError(context, SpdySessionStatus.INTERNAL_ERROR); - } - } - }); - } - - // Close the local side of the stream if this is the last frame - if (spdyDataFrame.isLast()) { - halfCloseStream(streamId, false, promise); - } - - } else if (msg instanceof SpdySynStreamFrame) { - - SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg; - int streamId = spdySynStreamFrame.streamId(); - - if (isRemoteInitiatedId(streamId)) { - promise.setFailure(PROTOCOL_EXCEPTION); - return; - } - - byte priority = spdySynStreamFrame.priority(); - boolean remoteSideClosed = spdySynStreamFrame.isUnidirectional(); - boolean localSideClosed = spdySynStreamFrame.isLast(); - if (!acceptStream(streamId, priority, remoteSideClosed, localSideClosed)) { - promise.setFailure(PROTOCOL_EXCEPTION); - return; - } - - } else if (msg instanceof SpdySynReplyFrame) { - - SpdySynReplyFrame spdySynReplyFrame = (SpdySynReplyFrame) msg; - int streamId = spdySynReplyFrame.streamId(); - - // Frames must not be sent on half-closed streams - if (!isRemoteInitiatedId(streamId) || spdySession.isLocalSideClosed(streamId)) { - promise.setFailure(PROTOCOL_EXCEPTION); - return; - } - - // Close the local side of the stream if this is the last frame - if (spdySynReplyFrame.isLast()) { - halfCloseStream(streamId, false, promise); - } - - } else if (msg instanceof SpdyRstStreamFrame) { - - SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg; - removeStream(spdyRstStreamFrame.streamId(), promise); - - } else if (msg instanceof SpdySettingsFrame) { - - SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg; - - int settingsMinorVersion = spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MINOR_VERSION); - if (settingsMinorVersion >= 0 && settingsMinorVersion != minorVersion) { - // Settings frame had the wrong minor version - promise.setFailure(PROTOCOL_EXCEPTION); - return; - } - - int newConcurrentStreams = - spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS); - if (newConcurrentStreams >= 0) { - localConcurrentStreams = newConcurrentStreams; - } - - // Persistence flag are inconsistent with the use of SETTINGS to communicate - // the initial window size. Remove flags from the sender requesting that the - // value be persisted. Remove values that the sender indicates are persisted. - if (spdySettingsFrame.isPersisted(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE)) { - spdySettingsFrame.removeValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE); - } - spdySettingsFrame.setPersistValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE, false); - - int newInitialWindowSize = - spdySettingsFrame.getValue(SpdySettingsFrame.SETTINGS_INITIAL_WINDOW_SIZE); - if (newInitialWindowSize >= 0) { - updateInitialReceiveWindowSize(newInitialWindowSize); - } - - } else if (msg instanceof SpdyPingFrame) { - - SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg; - if (isRemoteInitiatedId(spdyPingFrame.id())) { - ctx.fireExceptionCaught(new IllegalArgumentException( - "invalid PING ID: " + spdyPingFrame.id())); - return; - } - pings.getAndIncrement(); - - } else if (msg instanceof SpdyGoAwayFrame) { - - // Why is this being sent? Intercept it and fail the write. - // Should have sent a CLOSE ChannelStateEvent - promise.setFailure(PROTOCOL_EXCEPTION); - return; - - } else if (msg instanceof SpdyHeadersFrame) { - - SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg; - int streamId = spdyHeadersFrame.streamId(); - - // Frames must not be sent on half-closed streams - if (spdySession.isLocalSideClosed(streamId)) { - promise.setFailure(PROTOCOL_EXCEPTION); - return; - } - - // Close the local side of the stream if this is the last frame - if (spdyHeadersFrame.isLast()) { - halfCloseStream(streamId, false, promise); - } - - } else if (msg instanceof SpdyWindowUpdateFrame) { - - // Why is this being sent? Intercept it and fail the write. - promise.setFailure(PROTOCOL_EXCEPTION); - return; - } - - ctx.write(msg, promise); - } - - /* - * SPDY Session Error Handling: - * - * When a session error occurs, the endpoint encountering the error must first - * send a GOAWAY frame with the Stream-ID of the most recently received stream - * from the remote endpoint, and the error code for why the session is terminating. - * - * After sending the GOAWAY frame, the endpoint must close the TCP connection. - */ - private void issueSessionError( - ChannelHandlerContext ctx, SpdySessionStatus status) { - - sendGoAwayFrame(ctx, status).addListener(new ClosingChannelFutureListener(ctx, ctx.newPromise())); - } - - /* - * SPDY Stream Error Handling: - * - * Upon a stream error, the endpoint must send a RST_STREAM frame which contains - * the Stream-ID for the stream where the error occurred and the error getStatus which - * caused the error. - * - * After sending the RST_STREAM, the stream is closed to the sending endpoint. - * - * Note: this is only called by the worker thread - */ - private void issueStreamError(ChannelHandlerContext ctx, int streamId, SpdyStreamStatus status) { - boolean fireChannelRead = !spdySession.isRemoteSideClosed(streamId); - ChannelPromise promise = ctx.newPromise(); - removeStream(streamId, promise); - - SpdyRstStreamFrame spdyRstStreamFrame = new DefaultSpdyRstStreamFrame(streamId, status); - ctx.writeAndFlush(spdyRstStreamFrame, promise); - if (fireChannelRead) { - ctx.fireChannelRead(spdyRstStreamFrame); - } - } - - /* - * Helper functions - */ - - private boolean isRemoteInitiatedId(int id) { - boolean serverId = isServerId(id); - return server && !serverId || !server && serverId; - } - - // need to synchronize to prevent new streams from being created while updating active streams - private void updateInitialSendWindowSize(int newInitialWindowSize) { - int deltaWindowSize = newInitialWindowSize - initialSendWindowSize; - initialSendWindowSize = newInitialWindowSize; - spdySession.updateAllSendWindowSizes(deltaWindowSize); - } - - // need to synchronize to prevent new streams from being created while updating active streams - private void updateInitialReceiveWindowSize(int newInitialWindowSize) { - int deltaWindowSize = newInitialWindowSize - initialReceiveWindowSize; - initialReceiveWindowSize = newInitialWindowSize; - spdySession.updateAllReceiveWindowSizes(deltaWindowSize); - } - - // need to synchronize accesses to sentGoAwayFrame, lastGoodStreamId, and initial window sizes - private boolean acceptStream( - int streamId, byte priority, boolean remoteSideClosed, boolean localSideClosed) { - // Cannot initiate any new streams after receiving or sending GOAWAY - if (receivedGoAwayFrame || sentGoAwayFrame) { - return false; - } - - boolean remote = isRemoteInitiatedId(streamId); - int maxConcurrentStreams = remote ? localConcurrentStreams : remoteConcurrentStreams; - if (spdySession.numActiveStreams(remote) >= maxConcurrentStreams) { - return false; - } - spdySession.acceptStream( - streamId, priority, remoteSideClosed, localSideClosed, - initialSendWindowSize, initialReceiveWindowSize, remote); - if (remote) { - lastGoodStreamId = streamId; - } - return true; - } - - private void halfCloseStream(int streamId, boolean remote, ChannelFuture future) { - if (remote) { - spdySession.closeRemoteSide(streamId, isRemoteInitiatedId(streamId)); - } else { - spdySession.closeLocalSide(streamId, isRemoteInitiatedId(streamId)); - } - if (closeSessionFutureListener != null && spdySession.noActiveStreams()) { - future.addListener(closeSessionFutureListener); - } - } - - private void removeStream(int streamId, ChannelFuture future) { - spdySession.removeStream(streamId, STREAM_CLOSED, isRemoteInitiatedId(streamId)); - - if (closeSessionFutureListener != null && spdySession.noActiveStreams()) { - future.addListener(closeSessionFutureListener); - } - } - - private void updateSendWindowSize(final ChannelHandlerContext ctx, int streamId, int deltaWindowSize) { - spdySession.updateSendWindowSize(streamId, deltaWindowSize); - - while (true) { - // Check if we have unblocked a stalled stream - SpdySession.PendingWrite pendingWrite = spdySession.getPendingWrite(streamId); - if (pendingWrite == null) { - return; - } - - SpdyDataFrame spdyDataFrame = pendingWrite.spdyDataFrame; - int dataFrameSize = spdyDataFrame.content().readableBytes(); - int writeStreamId = spdyDataFrame.streamId(); - int sendWindowSize = spdySession.getSendWindowSize(writeStreamId); - int sessionSendWindowSize = spdySession.getSendWindowSize(SPDY_SESSION_STREAM_ID); - sendWindowSize = Math.min(sendWindowSize, sessionSendWindowSize); - - if (sendWindowSize <= 0) { - return; - } else if (sendWindowSize < dataFrameSize) { - // We can send a partial frame - spdySession.updateSendWindowSize(writeStreamId, -1 * sendWindowSize); - spdySession.updateSendWindowSize(SPDY_SESSION_STREAM_ID, -1 * sendWindowSize); - - // Create a partial data frame whose length is the current window size - SpdyDataFrame partialDataFrame = new DefaultSpdyDataFrame( - writeStreamId, spdyDataFrame.content().readRetainedSlice(sendWindowSize)); - - // The transfer window size is pre-decremented when sending a data frame downstream. - // Close the session on write failures that leave the transfer window in a corrupt state. - ctx.writeAndFlush(partialDataFrame).addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { - issueSessionError(ctx, SpdySessionStatus.INTERNAL_ERROR); - } - } - }); - } else { - // Window size is large enough to send entire data frame - spdySession.removePendingWrite(writeStreamId); - spdySession.updateSendWindowSize(writeStreamId, -1 * dataFrameSize); - spdySession.updateSendWindowSize(SPDY_SESSION_STREAM_ID, -1 * dataFrameSize); - - // Close the local side of the stream if this is the last frame - if (spdyDataFrame.isLast()) { - halfCloseStream(writeStreamId, false, pendingWrite.promise); - } - - // The transfer window size is pre-decremented when sending a data frame downstream. - // Close the session on write failures that leave the transfer window in a corrupt state. - ctx.writeAndFlush(spdyDataFrame, pendingWrite.promise).addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { - issueSessionError(ctx, SpdySessionStatus.INTERNAL_ERROR); - } - } - }); - } - } - } - - private void sendGoAwayFrame(ChannelHandlerContext ctx, ChannelPromise future) { - // Avoid NotYetConnectedException - if (!ctx.channel().isActive()) { - ctx.close(future); - return; - } - - ChannelFuture f = sendGoAwayFrame(ctx, SpdySessionStatus.OK); - if (spdySession.noActiveStreams()) { - f.addListener(new ClosingChannelFutureListener(ctx, future)); - } else { - closeSessionFutureListener = new ClosingChannelFutureListener(ctx, future); - } - // FIXME: Close the connection forcibly after timeout. - } - - private ChannelFuture sendGoAwayFrame( - ChannelHandlerContext ctx, SpdySessionStatus status) { - if (!sentGoAwayFrame) { - sentGoAwayFrame = true; - SpdyGoAwayFrame spdyGoAwayFrame = new DefaultSpdyGoAwayFrame(lastGoodStreamId, status); - return ctx.writeAndFlush(spdyGoAwayFrame); - } else { - return ctx.newSucceededFuture(); - } - } - - private static final class ClosingChannelFutureListener implements ChannelFutureListener { - private final ChannelHandlerContext ctx; - private final ChannelPromise promise; - - ClosingChannelFutureListener(ChannelHandlerContext ctx, ChannelPromise promise) { - this.ctx = ctx; - this.promise = promise; - } - - @Override - public void operationComplete(ChannelFuture sentGoAwayFuture) throws Exception { - ctx.close(promise); - } - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySessionStatus.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySessionStatus.java deleted file mode 100644 index fd79d1ed125..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySessionStatus.java +++ /dev/null @@ -1,113 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * The SPDY session status code and its description. - */ -public class SpdySessionStatus implements Comparable { - - /** - * 0 OK - */ - public static final SpdySessionStatus OK = - new SpdySessionStatus(0, "OK"); - - /** - * 1 Protocol Error - */ - public static final SpdySessionStatus PROTOCOL_ERROR = - new SpdySessionStatus(1, "PROTOCOL_ERROR"); - - /** - * 2 Internal Error - */ - public static final SpdySessionStatus INTERNAL_ERROR = - new SpdySessionStatus(2, "INTERNAL_ERROR"); - - /** - * Returns the {@link SpdySessionStatus} represented by the specified code. - * If the specified code is a defined SPDY status code, a cached instance - * will be returned. Otherwise, a new instance will be returned. - */ - public static SpdySessionStatus valueOf(int code) { - switch (code) { - case 0: - return OK; - case 1: - return PROTOCOL_ERROR; - case 2: - return INTERNAL_ERROR; - } - - return new SpdySessionStatus(code, "UNKNOWN (" + code + ')'); - } - - private final int code; - - private final String statusPhrase; - - /** - * Creates a new instance with the specified {@code code} and its - * {@code statusPhrase}. - */ - public SpdySessionStatus(int code, String statusPhrase) { - if (statusPhrase == null) { - throw new NullPointerException("statusPhrase"); - } - - this.code = code; - this.statusPhrase = statusPhrase; - } - - /** - * Returns the code of this status. - */ - public int code() { - return code; - } - - /** - * Returns the status phrase of this status. - */ - public String statusPhrase() { - return statusPhrase; - } - - @Override - public int hashCode() { - return code(); - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof SpdySessionStatus)) { - return false; - } - - return code() == ((SpdySessionStatus) o).code(); - } - - @Override - public String toString() { - return statusPhrase(); - } - - @Override - public int compareTo(SpdySessionStatus o) { - return code() - o.code(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySettingsFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySettingsFrame.java deleted file mode 100644 index e24f41a0200..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySettingsFrame.java +++ /dev/null @@ -1,107 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import java.util.Set; - -/** - * A SPDY Protocol SETTINGS Frame - */ -public interface SpdySettingsFrame extends SpdyFrame { - - int SETTINGS_MINOR_VERSION = 0; - int SETTINGS_UPLOAD_BANDWIDTH = 1; - int SETTINGS_DOWNLOAD_BANDWIDTH = 2; - int SETTINGS_ROUND_TRIP_TIME = 3; - int SETTINGS_MAX_CONCURRENT_STREAMS = 4; - int SETTINGS_CURRENT_CWND = 5; - int SETTINGS_DOWNLOAD_RETRANS_RATE = 6; - int SETTINGS_INITIAL_WINDOW_SIZE = 7; - int SETTINGS_CLIENT_CERTIFICATE_VECTOR_SIZE = 8; - - /** - * Returns a {@code Set} of the setting IDs. - * The set's iterator will return the IDs in ascending order. - */ - Set ids(); - - /** - * Returns {@code true} if the setting ID has a value. - */ - boolean isSet(int id); - - /** - * Returns the value of the setting ID. - * Returns -1 if the setting ID is not set. - */ - int getValue(int id); - - /** - * Sets the value of the setting ID. - * The ID cannot be negative and cannot exceed 16777215. - */ - SpdySettingsFrame setValue(int id, int value); - - /** - * Sets the value of the setting ID. - * Sets if the setting should be persisted (should only be set by the server). - * Sets if the setting is persisted (should only be set by the client). - * The ID cannot be negative and cannot exceed 16777215. - */ - SpdySettingsFrame setValue(int id, int value, boolean persistVal, boolean persisted); - - /** - * Removes the value of the setting ID. - * Removes all persistence information for the setting. - */ - SpdySettingsFrame removeValue(int id); - - /** - * Returns {@code true} if this setting should be persisted. - * Returns {@code false} if this setting should not be persisted - * or if the setting ID has no value. - */ - boolean isPersistValue(int id); - - /** - * Sets if this setting should be persisted. - * Has no effect if the setting ID has no value. - */ - SpdySettingsFrame setPersistValue(int id, boolean persistValue); - - /** - * Returns {@code true} if this setting is persisted. - * Returns {@code false} if this setting should not be persisted - * or if the setting ID has no value. - */ - boolean isPersisted(int id); - - /** - * Sets if this setting is persisted. - * Has no effect if the setting ID has no value. - */ - SpdySettingsFrame setPersisted(int id, boolean persisted); - - /** - * Returns {@code true} if previously persisted settings should be cleared. - */ - boolean clearPreviouslyPersistedSettings(); - - /** - * Sets if previously persisted settings should be cleared. - */ - SpdySettingsFrame setClearPreviouslyPersistedSettings(boolean clear); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyStreamFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyStreamFrame.java deleted file mode 100644 index 09c9f2a1791..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyStreamFrame.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * A SPDY Protocol Frame that is associated with an individual SPDY Stream - */ -public interface SpdyStreamFrame extends SpdyFrame { - - /** - * Returns the Stream-ID of this frame. - */ - int streamId(); - - /** - * Sets the Stream-ID of this frame. The Stream-ID must be positive. - */ - SpdyStreamFrame setStreamId(int streamID); - - /** - * Returns {@code true} if this frame is the last frame to be transmitted - * on the stream. - */ - boolean isLast(); - - /** - * Sets if this frame is the last frame to be transmitted on the stream. - */ - SpdyStreamFrame setLast(boolean last); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyStreamStatus.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyStreamStatus.java deleted file mode 100644 index 75ed740a2af..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyStreamStatus.java +++ /dev/null @@ -1,187 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * The SPDY stream status code and its description. - */ -public class SpdyStreamStatus implements Comparable { - - /** - * 1 Protocol Error - */ - public static final SpdyStreamStatus PROTOCOL_ERROR = - new SpdyStreamStatus(1, "PROTOCOL_ERROR"); - - /** - * 2 Invalid Stream - */ - public static final SpdyStreamStatus INVALID_STREAM = - new SpdyStreamStatus(2, "INVALID_STREAM"); - - /** - * 3 Refused Stream - */ - public static final SpdyStreamStatus REFUSED_STREAM = - new SpdyStreamStatus(3, "REFUSED_STREAM"); - - /** - * 4 Unsupported Version - */ - public static final SpdyStreamStatus UNSUPPORTED_VERSION = - new SpdyStreamStatus(4, "UNSUPPORTED_VERSION"); - - /** - * 5 Cancel - */ - public static final SpdyStreamStatus CANCEL = - new SpdyStreamStatus(5, "CANCEL"); - - /** - * 6 Internal Error - */ - public static final SpdyStreamStatus INTERNAL_ERROR = - new SpdyStreamStatus(6, "INTERNAL_ERROR"); - - /** - * 7 Flow Control Error - */ - public static final SpdyStreamStatus FLOW_CONTROL_ERROR = - new SpdyStreamStatus(7, "FLOW_CONTROL_ERROR"); - - /** - * 8 Stream In Use - */ - public static final SpdyStreamStatus STREAM_IN_USE = - new SpdyStreamStatus(8, "STREAM_IN_USE"); - - /** - * 9 Stream Already Closed - */ - public static final SpdyStreamStatus STREAM_ALREADY_CLOSED = - new SpdyStreamStatus(9, "STREAM_ALREADY_CLOSED"); - - /** - * 10 Invalid Credentials - */ - public static final SpdyStreamStatus INVALID_CREDENTIALS = - new SpdyStreamStatus(10, "INVALID_CREDENTIALS"); - - /** - * 11 Frame Too Large - */ - public static final SpdyStreamStatus FRAME_TOO_LARGE = - new SpdyStreamStatus(11, "FRAME_TOO_LARGE"); - - /** - * Returns the {@link SpdyStreamStatus} represented by the specified code. - * If the specified code is a defined SPDY status code, a cached instance - * will be returned. Otherwise, a new instance will be returned. - */ - public static SpdyStreamStatus valueOf(int code) { - if (code == 0) { - throw new IllegalArgumentException( - "0 is not a valid status code for a RST_STREAM"); - } - - switch (code) { - case 1: - return PROTOCOL_ERROR; - case 2: - return INVALID_STREAM; - case 3: - return REFUSED_STREAM; - case 4: - return UNSUPPORTED_VERSION; - case 5: - return CANCEL; - case 6: - return INTERNAL_ERROR; - case 7: - return FLOW_CONTROL_ERROR; - case 8: - return STREAM_IN_USE; - case 9: - return STREAM_ALREADY_CLOSED; - case 10: - return INVALID_CREDENTIALS; - case 11: - return FRAME_TOO_LARGE; - } - - return new SpdyStreamStatus(code, "UNKNOWN (" + code + ')'); - } - - private final int code; - - private final String statusPhrase; - - /** - * Creates a new instance with the specified {@code code} and its - * {@code statusPhrase}. - */ - public SpdyStreamStatus(int code, String statusPhrase) { - if (code == 0) { - throw new IllegalArgumentException( - "0 is not a valid status code for a RST_STREAM"); - } - - if (statusPhrase == null) { - throw new NullPointerException("statusPhrase"); - } - - this.code = code; - this.statusPhrase = statusPhrase; - } - - /** - * Returns the code of this status. - */ - public int code() { - return code; - } - - /** - * Returns the status phrase of this status. - */ - public String statusPhrase() { - return statusPhrase; - } - - @Override - public int hashCode() { - return code(); - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof SpdyStreamStatus)) { - return false; - } - - return code() == ((SpdyStreamStatus) o).code(); - } - - @Override - public String toString() { - return statusPhrase(); - } - - @Override - public int compareTo(SpdyStreamStatus o) { - return code() - o.code(); - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySynReplyFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySynReplyFrame.java deleted file mode 100644 index 4bce003620e..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySynReplyFrame.java +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * A SPDY Protocol SYN_REPLY Frame - */ -public interface SpdySynReplyFrame extends SpdyHeadersFrame { - - @Override - SpdySynReplyFrame setStreamId(int streamID); - - @Override - SpdySynReplyFrame setLast(boolean last); - - @Override - SpdySynReplyFrame setInvalid(); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySynStreamFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySynStreamFrame.java deleted file mode 100644 index f2efb8cabe4..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdySynStreamFrame.java +++ /dev/null @@ -1,65 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * A SPDY Protocol SYN_STREAM Frame - */ -public interface SpdySynStreamFrame extends SpdyHeadersFrame { - - /** - * Returns the Associated-To-Stream-ID of this frame. - */ - int associatedStreamId(); - - /** - * Sets the Associated-To-Stream-ID of this frame. - * The Associated-To-Stream-ID cannot be negative. - */ - SpdySynStreamFrame setAssociatedStreamId(int associatedStreamId); - - /** - * Returns the priority of the stream. - */ - byte priority(); - - /** - * Sets the priority of the stream. - * The priority must be between 0 and 7 inclusive. - */ - SpdySynStreamFrame setPriority(byte priority); - - /** - * Returns {@code true} if the stream created with this frame is to be - * considered half-closed to the receiver. - */ - boolean isUnidirectional(); - - /** - * Sets if the stream created with this frame is to be considered - * half-closed to the receiver. - */ - SpdySynStreamFrame setUnidirectional(boolean unidirectional); - - @Override - SpdySynStreamFrame setStreamId(int streamID); - - @Override - SpdySynStreamFrame setLast(boolean last); - - @Override - SpdySynStreamFrame setInvalid(); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyVersion.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyVersion.java deleted file mode 100644 index 030ee94ec10..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyVersion.java +++ /dev/null @@ -1,36 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -public enum SpdyVersion { - SPDY_3_1 (3, 1); - - private final int version; - private final int minorVersion; - - SpdyVersion(int version, int minorVersion) { - this.version = version; - this.minorVersion = minorVersion; - } - - int getVersion() { - return version; - } - - int getMinorVersion() { - return minorVersion; - } -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyWindowUpdateFrame.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyWindowUpdateFrame.java deleted file mode 100644 index bbd8730bd04..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/SpdyWindowUpdateFrame.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -/** - * A SPDY Protocol WINDOW_UPDATE Frame - */ -public interface SpdyWindowUpdateFrame extends SpdyFrame { - - /** - * Returns the Stream-ID of this frame. - */ - int streamId(); - - /** - * Sets the Stream-ID of this frame. The Stream-ID cannot be negative. - */ - SpdyWindowUpdateFrame setStreamId(int streamID); - - /** - * Returns the Delta-Window-Size of this frame. - */ - int deltaWindowSize(); - - /** - * Sets the Delta-Window-Size of this frame. - * The Delta-Window-Size must be positive. - */ - SpdyWindowUpdateFrame setDeltaWindowSize(int deltaWindowSize); -} diff --git a/codec-http/src/main/java/io/netty/handler/codec/spdy/package-info.java b/codec-http/src/main/java/io/netty/handler/codec/spdy/package-info.java deleted file mode 100644 index 30b4299d5d1..00000000000 --- a/codec-http/src/main/java/io/netty/handler/codec/spdy/package-info.java +++ /dev/null @@ -1,19 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -/** - * Encoder, decoder, session handler and their related message types for the SPDY protocol. - */ -package io.netty.handler.codec.spdy; diff --git a/codec-http/src/main/resources/META-INF/native-image/io.netty/codec-http/native-image.properties b/codec-http/src/main/resources/META-INF/native-image/io.netty/codec-http/native-image.properties new file mode 100644 index 00000000000..c20ae73746f --- /dev/null +++ b/codec-http/src/main/resources/META-INF/native-image/io.netty/codec-http/native-image.properties @@ -0,0 +1,16 @@ +# Copyright 2019 The Netty Project +# +# The Netty Project licenses this file to you under the Apache License, +# version 2.0 (the "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at: +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +Args = --initialize-at-build-time=io.netty \ + --initialize-at-run-time=io.netty.handler.codec.http.HttpObjectEncoder,io.netty.handler.codec.http.websocketx.WebSocket00FrameEncoder,io.netty.handler.codec.http.websocketx.extensions.compression.DeflateDecoder,io.netty.handler.codec.compression.BrotliDecoder diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/CombinedHttpHeadersTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/CombinedHttpHeadersTest.java index e0433efb97c..4aa78227f6a 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/CombinedHttpHeadersTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/CombinedHttpHeadersTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,16 +16,20 @@ package io.netty.handler.codec.http; import io.netty.handler.codec.http.HttpHeadersTestUtils.HeaderValue; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.Collections; import java.util.Iterator; +import static io.netty.handler.codec.http.HttpHeaderNames.SET_COOKIE; import static io.netty.util.AsciiString.contentEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.hasSize; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class CombinedHttpHeadersTest { private static final CharSequence HEADER_NAME = "testHeader"; @@ -52,7 +56,7 @@ public void addCombinedHeadersWhenEmpty() { otherHeaders.add(HEADER_NAME, "a"); otherHeaders.add(HEADER_NAME, "b"); headers.add(otherHeaders); - assertEquals("a,b", headers.get(HEADER_NAME).toString()); + assertEquals("a,b", headers.get(HEADER_NAME)); } @Test @@ -63,7 +67,29 @@ public void addCombinedHeadersWhenNotEmpty() { otherHeaders.add(HEADER_NAME, "b"); otherHeaders.add(HEADER_NAME, "c"); headers.add(otherHeaders); - assertEquals("a,b,c", headers.get(HEADER_NAME).toString()); + assertEquals("a,b,c", headers.get(HEADER_NAME)); + } + + @Test + public void dontCombineSetCookieHeaders() { + final CombinedHttpHeaders headers = newCombinedHttpHeaders(); + headers.add(SET_COOKIE, "a"); + final CombinedHttpHeaders otherHeaders = newCombinedHttpHeaders(); + otherHeaders.add(SET_COOKIE, "b"); + otherHeaders.add(SET_COOKIE, "c"); + headers.add(otherHeaders); + assertThat(headers.getAll(SET_COOKIE), hasSize(3)); + } + + @Test + public void dontCombineSetCookieHeadersRegardlessOfCase() { + final CombinedHttpHeaders headers = newCombinedHttpHeaders(); + headers.add("Set-Cookie", "a"); + final CombinedHttpHeaders otherHeaders = newCombinedHttpHeaders(); + otherHeaders.add("set-cookie", "b"); + otherHeaders.add("SET-COOKIE", "c"); + headers.add(otherHeaders); + assertThat(headers.getAll(SET_COOKIE), hasSize(3)); } @Test @@ -74,7 +100,7 @@ public void setCombinedHeadersWhenNotEmpty() { otherHeaders.add(HEADER_NAME, "b"); otherHeaders.add(HEADER_NAME, "c"); headers.set(otherHeaders); - assertEquals("b,c", headers.get(HEADER_NAME).toString()); + assertEquals("b,c", headers.get(HEADER_NAME)); } @Test @@ -85,7 +111,7 @@ public void addUncombinedHeaders() { otherHeaders.add(HEADER_NAME, "b"); otherHeaders.add(HEADER_NAME, "c"); headers.add(otherHeaders); - assertEquals("a,b,c", headers.get(HEADER_NAME).toString()); + assertEquals("a,b,c", headers.get(HEADER_NAME)); } @Test @@ -96,7 +122,7 @@ public void setUncombinedHeaders() { otherHeaders.add(HEADER_NAME, "b"); otherHeaders.add(HEADER_NAME, "c"); headers.set(otherHeaders); - assertEquals("b,c", headers.get(HEADER_NAME).toString()); + assertEquals("b,c", headers.get(HEADER_NAME)); } @Test @@ -115,11 +141,11 @@ public void addCharSequencesCsvWithValueContainingCommas() { assertEquals(HeaderValue.EIGHT.subset(6), headers.getAll(HEADER_NAME)); } - @Test (expected = NullPointerException.class) + @Test public void addCharSequencesCsvNullValue() { final CombinedHttpHeaders headers = newCombinedHttpHeaders(); final String value = null; - headers.add(HEADER_NAME, value); + assertThrows(NullPointerException.class, () -> headers.add(HEADER_NAME, value)); } @Test @@ -171,7 +197,7 @@ public void addIterableCsvSingleValue() { public void addIterableCsvEmpty() { final CombinedHttpHeaders headers = newCombinedHttpHeaders(); headers.add(HEADER_NAME, Collections.emptyList()); - assertEquals(Arrays.asList(""), headers.getAll(HEADER_NAME)); + assertEquals(Collections.singletonList(""), headers.getAll(HEADER_NAME)); } @Test @@ -269,9 +295,18 @@ public void testGetAll() { headers.set(HEADER_NAME, Arrays.asList("\"a\"", "\"b\"", "\"c\"")); assertEquals(Arrays.asList("a", "b", "c"), headers.getAll(HEADER_NAME)); headers.set(HEADER_NAME, "a,b,c"); - assertEquals(Arrays.asList("a,b,c"), headers.getAll(HEADER_NAME)); + assertEquals(Collections.singletonList("a,b,c"), headers.getAll(HEADER_NAME)); headers.set(HEADER_NAME, "\"a,b,c\""); - assertEquals(Arrays.asList("a,b,c"), headers.getAll(HEADER_NAME)); + assertEquals(Collections.singletonList("a,b,c"), headers.getAll(HEADER_NAME)); + } + + @Test + public void getAllDontCombineSetCookie() { + final CombinedHttpHeaders headers = newCombinedHttpHeaders(); + headers.add(SET_COOKIE, "a"); + headers.add(SET_COOKIE, "b"); + assertThat(headers.getAll(SET_COOKIE), hasSize(2)); + assertEquals(Arrays.asList("a", "b"), headers.getAll(SET_COOKIE)); } @Test @@ -314,6 +349,22 @@ public void valueIterator() { assertValueIterator(headers.valueCharSequenceIterator(HEADER_NAME)); } + @Test + public void nonCombinableHeaderIterator() { + final CombinedHttpHeaders headers = newCombinedHttpHeaders(); + headers.add(SET_COOKIE, "c"); + headers.add(SET_COOKIE, "b"); + headers.add(SET_COOKIE, "a"); + + final Iterator strItr = headers.valueStringIterator(SET_COOKIE); + assertTrue(strItr.hasNext()); + assertEquals("a", strItr.next()); + assertTrue(strItr.hasNext()); + assertEquals("b", strItr.next()); + assertTrue(strItr.hasNext()); + assertEquals("c", strItr.next()); + } + private static void assertValueIterator(Iterator strItr) { assertTrue(strItr.hasNext()); assertEquals("a", strItr.next()); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpHeadersTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpHeadersTest.java index 3f2a678ab52..37a8bb747c9 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpHeadersTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpHeadersTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,7 +18,7 @@ import io.netty.handler.codec.http.HttpHeadersTestUtils.HeaderValue; import io.netty.util.AsciiString; import io.netty.util.internal.StringUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.Iterator; @@ -29,19 +29,26 @@ import static io.netty.util.AsciiString.contentEquals; import static java.util.Arrays.asList; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.*; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class DefaultHttpHeadersTest { private static final CharSequence HEADER_NAME = "testHeader"; - @Test(expected = IllegalArgumentException.class) + @Test public void nullHeaderNameNotAllowed() { - new DefaultHttpHeaders().add(null, "foo"); + assertThrows(IllegalArgumentException.class, () -> new DefaultHttpHeaders().add(null, "foo")); } - @Test(expected = IllegalArgumentException.class) + @Test public void emptyHeaderNameNotAllowed() { - new DefaultHttpHeaders().add(StringUtil.EMPTY_STRING, "foo"); + assertThrows(IllegalArgumentException.class, + () -> new DefaultHttpHeaders().add(StringUtil.EMPTY_STRING, "foo")); } @Test @@ -145,16 +152,16 @@ public void testEqualsIgnoreCase() { assertThat(AsciiString.contentEqualsIgnoreCase("FoO", "fOo"), is(true)); } - @Test(expected = NullPointerException.class) + @Test public void testSetNullHeaderValueValidate() { HttpHeaders headers = new DefaultHttpHeaders(true); - headers.set(of("test"), (CharSequence) null); + assertThrows(NullPointerException.class, () -> headers.set(of("test"), (CharSequence) null)); } - @Test(expected = NullPointerException.class) + @Test public void testSetNullHeaderValueNotValidate() { HttpHeaders headers = new DefaultHttpHeaders(false); - headers.set(of("test"), (CharSequence) null); + assertThrows(NullPointerException.class, () -> headers.set(of("test"), (CharSequence) null)); } @Test @@ -232,7 +239,7 @@ public void providesHeaderNamesAsArray() throws Exception { .add(HttpHeaderNames.CONTENT_LENGTH, 10) .names(); - String[] namesArray = nettyHeaders.toArray(new String[nettyHeaders.size()]); + String[] namesArray = nettyHeaders.toArray(new String[0]); assertArrayEquals(namesArray, new String[] { HttpHeaderNames.CONTENT_LENGTH.toString() }); } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpRequestTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpRequestTest.java index cf0fa92512b..9ddb597ae9c 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpRequestTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpRequestTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,11 +16,11 @@ package io.netty.handler.codec.http; import io.netty.util.AsciiString; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class DefaultHttpRequestTest { diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpResponseTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpResponseTest.java new file mode 100644 index 00000000000..a7130598643 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/DefaultHttpResponseTest.java @@ -0,0 +1,40 @@ +/* + * Copyright 2018 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; + +public class DefaultHttpResponseTest { + + @Test + public void testNotEquals() { + HttpResponse ok = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + HttpResponse notFound = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.NOT_FOUND); + assertNotEquals(ok, notFound); + assertNotEquals(ok.hashCode(), notFound.hashCode()); + } + + @Test + public void testEquals() { + HttpResponse ok = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + HttpResponse ok2 = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + assertEquals(ok, ok2); + assertEquals(ok.hashCode(), ok2.hashCode()); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/EmptyHttpHeadersInitializationTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/EmptyHttpHeadersInitializationTest.java deleted file mode 100644 index a77e7ad8867..00000000000 --- a/codec-http/src/test/java/io/netty/handler/codec/http/EmptyHttpHeadersInitializationTest.java +++ /dev/null @@ -1,43 +0,0 @@ -/* - * Copyright 2017 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http; - -import org.junit.Test; - -import static org.junit.Assert.*; - -/** - * A test to validate that either order of initialization of the {@link EmptyHttpHeaders#INSTANCE} and - * {@link HttpHeaders#EMPTY_HEADERS} field results in both fields being non-null. - * - * Since this is testing static initialization, the tests might not actually test anything, except - * when run in isolation. - */ -public class EmptyHttpHeadersInitializationTest { - - @Test - public void testEmptyHttpHeadersFirst() { - assertNotNull(EmptyHttpHeaders.INSTANCE); - assertNotNull(HttpHeaders.EMPTY_HEADERS); - } - - @Test - public void testHttpHeadersFirst() { - assertNotNull(HttpHeaders.EMPTY_HEADERS); - assertNotNull(EmptyHttpHeaders.INSTANCE); - } - -} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpChunkedInputTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpChunkedInputTest.java index 002c8d08268..c0904c15a43 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpChunkedInputTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpChunkedInputTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -25,7 +25,8 @@ import io.netty.handler.stream.ChunkedNioStream; import io.netty.handler.stream.ChunkedStream; import io.netty.handler.stream.ChunkedWriteHandler; -import org.junit.Test; +import io.netty.util.internal.PlatformDependent; +import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; import java.io.File; @@ -33,7 +34,10 @@ import java.io.IOException; import java.nio.channels.Channels; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpChunkedInputTest { private static final byte[] BYTES = new byte[1024 * 64]; @@ -46,7 +50,7 @@ public class HttpChunkedInputTest { FileOutputStream out = null; try { - TMP = File.createTempFile("netty-chunk-", ".tmp"); + TMP = PlatformDependent.createTempFile("netty-chunk-", ".tmp", null); TMP.deleteOnExit(); out = new FileOutputStream(TMP); out.write(BYTES); @@ -138,7 +142,7 @@ private static void check(ChunkedInput... inputs) { break; } if (lastHttpContent != null) { - assertTrue("Chunk must be DefaultHttpContent", lastHttpContent instanceof DefaultHttpContent); + assertTrue(lastHttpContent instanceof DefaultHttpContent, "Chunk must be DefaultHttpContent"); } ByteBuf buffer = httpContent.content(); @@ -156,7 +160,7 @@ private static void check(ChunkedInput... inputs) { } assertEquals(BYTES.length * inputs.length, read); - assertSame("Last chunk must be LastHttpContent.EMPTY_LAST_CONTENT", - LastHttpContent.EMPTY_LAST_CONTENT, lastHttpContent); + assertSame(LastHttpContent.EMPTY_LAST_CONTENT, lastHttpContent, + "Last chunk must be LastHttpContent.EMPTY_LAST_CONTENT"); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpClientCodecTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpClientCodecTest.java index 16a6eff29de..972f2c5a17b 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpClientCodecTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpClientCodecTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,14 +20,13 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.Channel; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelInitializer; import io.netty.channel.ChannelOption; +import io.netty.channel.MultithreadEventLoopGroup; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.channel.nio.NioEventLoopGroup; +import io.netty.channel.nio.NioHandler; import io.netty.channel.socket.SocketChannel; import io.netty.channel.socket.nio.NioServerSocketChannel; import io.netty.channel.socket.nio.NioSocketChannel; @@ -35,16 +34,26 @@ import io.netty.handler.codec.PrematureChannelClosureException; import io.netty.util.CharsetUtil; import io.netty.util.NetUtil; -import org.junit.Test; +import io.netty.util.concurrent.Future; +import org.hamcrest.CoreMatchers; +import org.junit.jupiter.api.Test; import java.net.InetSocketAddress; import java.util.concurrent.CountDownLatch; import static io.netty.util.ReferenceCountUtil.release; import static java.util.concurrent.TimeUnit.SECONDS; -import static org.hamcrest.CoreMatchers.*; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.CoreMatchers.sameInstance; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.not; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class HttpClientCodecTest { @@ -59,7 +68,7 @@ public class HttpClientCodecTest { @Test public void testConnectWithResponseContent() { - HttpClientCodec codec = new HttpClientCodec(4096, 8192, 8192, true); + HttpClientCodec codec = new HttpClientCodec(4096, 8192, true); EmbeddedChannel ch = new EmbeddedChannel(codec); sendRequestAndReadResponse(ch, HttpMethod.CONNECT, RESPONSE); @@ -68,7 +77,7 @@ public void testConnectWithResponseContent() { @Test public void testFailsNotOnRequestResponseChunked() { - HttpClientCodec codec = new HttpClientCodec(4096, 8192, 8192, true); + HttpClientCodec codec = new HttpClientCodec(4096, 8192, true); EmbeddedChannel ch = new EmbeddedChannel(codec); sendRequestAndReadResponse(ch, HttpMethod.GET, CHUNKED_RESPONSE); @@ -77,7 +86,7 @@ public void testFailsNotOnRequestResponseChunked() { @Test public void testFailsOnMissingResponse() { - HttpClientCodec codec = new HttpClientCodec(4096, 8192, 8192, true); + HttpClientCodec codec = new HttpClientCodec(4096, 8192, true); EmbeddedChannel ch = new EmbeddedChannel(codec); assertTrue(ch.writeOutbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, @@ -95,7 +104,7 @@ public void testFailsOnMissingResponse() { @Test public void testFailsOnIncompleteChunkedResponse() { - HttpClientCodec codec = new HttpClientCodec(4096, 8192, 8192, true); + HttpClientCodec codec = new HttpClientCodec(4096, 8192, true); EmbeddedChannel ch = new EmbeddedChannel(codec); ch.writeOutbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "http://localhost/")); @@ -118,79 +127,73 @@ public void testFailsOnIncompleteChunkedResponse() { } @Test - public void testServerCloseSocketInputProvidesData() throws InterruptedException { + public void testServerCloseSocketInputProvidesData() throws Exception { ServerBootstrap sb = new ServerBootstrap(); Bootstrap cb = new Bootstrap(); final CountDownLatch serverChannelLatch = new CountDownLatch(1); final CountDownLatch responseReceivedLatch = new CountDownLatch(1); try { - sb.group(new NioEventLoopGroup(2)); + sb.group(new MultithreadEventLoopGroup(2, NioHandler.newFactory())); sb.channel(NioServerSocketChannel.class); sb.childHandler(new ChannelInitializer() { @Override protected void initChannel(Channel ch) throws Exception { // Don't use the HttpServerCodec, because we don't want to have content-length or anything added. - ch.pipeline().addLast(new HttpRequestDecoder(4096, 8192, 8192, true)); + ch.pipeline().addLast(new HttpRequestDecoder(4096, 8192, true)); ch.pipeline().addLast(new HttpObjectAggregator(4096)); ch.pipeline().addLast(new SimpleChannelInboundHandler() { @Override - protected void channelRead0(ChannelHandlerContext ctx, FullHttpRequest msg) { + protected void messageReceived(ChannelHandlerContext ctx, FullHttpRequest msg) { // This is just a simple demo...don't block in IO assertTrue(ctx.channel() instanceof SocketChannel); final SocketChannel sChannel = (SocketChannel) ctx.channel(); - /** - * The point of this test is to not add any content-length or content-encoding headers - * and the client should still handle this. - * See RFC 7230, 3.3.3. + /* + The point of this test is to not add any content-length or content-encoding headers + and the client should still handle this. + See RFC 7230, 3.3.3: https://tools.ietf.org/html/rfc7230#section-3.3.3. */ sChannel.writeAndFlush(Unpooled.wrappedBuffer(("HTTP/1.0 200 OK\r\n" + "Date: Fri, 31 Dec 1999 23:59:59 GMT\r\n" + "Content-Type: text/html\r\n\r\n").getBytes(CharsetUtil.ISO_8859_1))) - .addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - assertTrue(future.isSuccess()); - sChannel.writeAndFlush(Unpooled.wrappedBuffer( - "hello half closed!\r\n" - .getBytes(CharsetUtil.ISO_8859_1))) - .addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - assertTrue(future.isSuccess()); - sChannel.shutdownOutput(); - } + .addListener(future -> { + assertTrue(future.isSuccess()); + sChannel.writeAndFlush(Unpooled.wrappedBuffer( + "hello half closed!\r\n" + .getBytes(CharsetUtil.ISO_8859_1))) + .addListener(future1 -> { + assertTrue(future1.isSuccess()); + sChannel.shutdownOutput(); + }); }); - } - }); } }); serverChannelLatch.countDown(); } }); - cb.group(new NioEventLoopGroup(1)); + cb.group(new MultithreadEventLoopGroup(1, NioHandler.newFactory())); cb.channel(NioSocketChannel.class); cb.option(ChannelOption.ALLOW_HALF_CLOSURE, true); cb.handler(new ChannelInitializer() { @Override protected void initChannel(Channel ch) throws Exception { - ch.pipeline().addLast(new HttpClientCodec(4096, 8192, 8192, true, true)); + ch.pipeline().addLast(new HttpClientCodec(4096, 8192, true, true)); ch.pipeline().addLast(new HttpObjectAggregator(4096)); ch.pipeline().addLast(new SimpleChannelInboundHandler() { @Override - protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg) { + protected void messageReceived(ChannelHandlerContext ctx, FullHttpResponse msg) { responseReceivedLatch.countDown(); } }); } }); - Channel serverChannel = sb.bind(new InetSocketAddress(0)).sync().channel(); + Channel serverChannel = sb.bind(new InetSocketAddress(0)).get(); int port = ((InetSocketAddress) serverChannel.localAddress()).getPort(); - ChannelFuture ccf = cb.connect(new InetSocketAddress(NetUtil.LOCALHOST, port)); + Future ccf = cb.connect(new InetSocketAddress(NetUtil.LOCALHOST, port)); assertTrue(ccf.awaitUninterruptibly().isSuccess()); - Channel clientChannel = ccf.channel(); + Channel clientChannel = ccf.get(); assertTrue(serverChannelLatch.await(5, SECONDS)); clientChannel.writeAndFlush(new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/")); assertTrue(responseReceivedLatch.await(5, SECONDS)); @@ -212,11 +215,11 @@ public void testPassThroughAfterConnect() throws Exception { } private static void testAfterConnect(final boolean parseAfterConnect) throws Exception { - EmbeddedChannel ch = new EmbeddedChannel(new HttpClientCodec(4096, 8192, 8192, true, true, parseAfterConnect)); + EmbeddedChannel ch = new EmbeddedChannel(new HttpClientCodec(4096, 8192, true, true, parseAfterConnect)); Consumer connectResponseConsumer = new Consumer(); sendRequestAndReadResponse(ch, HttpMethod.CONNECT, EMPTY_RESPONSE, connectResponseConsumer); - assertTrue("No connect response messages received.", connectResponseConsumer.getReceivedCount() > 0); + assertTrue(connectResponseConsumer.getReceivedCount() > 0, "No connect response messages received."); Consumer responseConsumer = new Consumer() { @Override void accept(Object object) { @@ -228,8 +231,8 @@ void accept(Object object) { } }; sendRequestAndReadResponse(ch, HttpMethod.GET, RESPONSE, responseConsumer); - assertTrue("No response messages received.", responseConsumer.getReceivedCount() > 0); - assertFalse("Channel finish failed.", ch.finish()); + assertTrue(responseConsumer.getReceivedCount() > 0, "No response messages received."); + assertFalse(ch.finish(), "Channel finish failed."); } private static void sendRequestAndReadResponse(EmbeddedChannel ch, HttpMethod httpMethod, String response) { @@ -238,10 +241,10 @@ private static void sendRequestAndReadResponse(EmbeddedChannel ch, HttpMethod ht private static void sendRequestAndReadResponse(EmbeddedChannel ch, HttpMethod httpMethod, String response, Consumer responseConsumer) { - assertTrue("Channel outbound write failed.", - ch.writeOutbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, httpMethod, "http://localhost/"))); - assertTrue("Channel inbound write failed.", - ch.writeInbound(Unpooled.copiedBuffer(response, CharsetUtil.ISO_8859_1))); + assertTrue(ch.writeOutbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, httpMethod, "http://localhost/")), + "Channel outbound write failed."); + assertTrue(ch.writeInbound(Unpooled.copiedBuffer(response, CharsetUtil.ISO_8859_1)), + "Channel inbound write failed."); for (;;) { Object msg = ch.readOutbound(); @@ -284,28 +287,28 @@ public void testDecodesFinalResponseAfterSwitchingProtocols() { "Connection: Upgrade\r\n" + "Upgrade: TLS/1.2, HTTP/1.1\r\n\r\n"; - HttpClientCodec codec = new HttpClientCodec(4096, 8192, 8192, true); + HttpClientCodec codec = new HttpClientCodec(4096, 8192, true); EmbeddedChannel ch = new EmbeddedChannel(codec, new HttpObjectAggregator(1024)); HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "http://localhost/"); request.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE); request.headers().set(HttpHeaderNames.UPGRADE, "TLS/1.2"); - assertTrue("Channel outbound write failed.", ch.writeOutbound(request)); + assertTrue(ch.writeOutbound(request), "Channel outbound write failed."); - assertTrue("Channel inbound write failed.", - ch.writeInbound(Unpooled.copiedBuffer(SWITCHING_PROTOCOLS_RESPONSE, CharsetUtil.ISO_8859_1))); + assertTrue(ch.writeInbound(Unpooled.copiedBuffer(SWITCHING_PROTOCOLS_RESPONSE, CharsetUtil.ISO_8859_1)), + "Channel inbound write failed."); Object switchingProtocolsResponse = ch.readInbound(); - assertNotNull("No response received", switchingProtocolsResponse); + assertNotNull(switchingProtocolsResponse, "No response received"); assertThat("Response was not decoded", switchingProtocolsResponse, instanceOf(FullHttpResponse.class)); ((FullHttpResponse) switchingProtocolsResponse).release(); - assertTrue("Channel inbound write failed", - ch.writeInbound(Unpooled.copiedBuffer(RESPONSE, CharsetUtil.ISO_8859_1))); + assertTrue(ch.writeInbound(Unpooled.copiedBuffer(RESPONSE, CharsetUtil.ISO_8859_1)), + "Channel inbound write failed"); Object finalResponse = ch.readInbound(); - assertNotNull("No response received", finalResponse); + assertNotNull(finalResponse, "No response received"); assertThat("Response was not decoded", finalResponse, instanceOf(FullHttpResponse.class)); ((FullHttpResponse) finalResponse).release(); - assertTrue("Channel finish failed", ch.finishAndReleaseAll()); + assertTrue(ch.finishAndReleaseAll(), "Channel finish failed"); } @Test @@ -331,4 +334,90 @@ public void testWebSocket00Response() { assertThat(ch.readInbound(), is(nullValue())); } + + @Test + public void testWebDavResponse() { + byte[] data = ("HTTP/1.1 102 Processing\r\n" + + "Status-URI: Status-URI:http://status.com; 404\r\n" + + "\r\n" + + "1234567812345678").getBytes(); + EmbeddedChannel ch = new EmbeddedChannel(new HttpClientCodec()); + assertTrue(ch.writeInbound(Unpooled.wrappedBuffer(data))); + + HttpResponse res = ch.readInbound(); + assertThat(res.protocolVersion(), sameInstance(HttpVersion.HTTP_1_1)); + assertThat(res.status(), is(HttpResponseStatus.PROCESSING)); + HttpContent content = ch.readInbound(); + // HTTP 102 is not allowed to have content. + assertThat(content.content().readableBytes(), is(0)); + content.release(); + + assertThat(ch.finish(), is(false)); + } + + @Test + public void testInformationalResponseKeepsPairsInSync() { + byte[] data = ("HTTP/1.1 102 Processing\r\n" + + "Status-URI: Status-URI:http://status.com; 404\r\n" + + "\r\n").getBytes(); + byte[] data2 = ("HTTP/1.1 200 OK\r\n" + + "Content-Length: 8\r\n" + + "\r\n" + + "12345678").getBytes(); + EmbeddedChannel ch = new EmbeddedChannel(new HttpClientCodec()); + assertTrue(ch.writeOutbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.HEAD, "/"))); + ByteBuf buffer = ch.readOutbound(); + buffer.release(); + assertNull(ch.readOutbound()); + assertTrue(ch.writeInbound(Unpooled.wrappedBuffer(data))); + HttpResponse res = ch.readInbound(); + assertThat(res.protocolVersion(), sameInstance(HttpVersion.HTTP_1_1)); + assertThat(res.status(), is(HttpResponseStatus.PROCESSING)); + HttpContent content = ch.readInbound(); + // HTTP 102 is not allowed to have content. + assertThat(content.content().readableBytes(), is(0)); + assertThat(content, CoreMatchers.instanceOf(LastHttpContent.class)); + content.release(); + + assertTrue(ch.writeOutbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"))); + buffer = ch.readOutbound(); + buffer.release(); + assertNull(ch.readOutbound()); + assertTrue(ch.writeInbound(Unpooled.wrappedBuffer(data2))); + + res = ch.readInbound(); + assertThat(res.protocolVersion(), sameInstance(HttpVersion.HTTP_1_1)); + assertThat(res.status(), is(HttpResponseStatus.OK)); + content = ch.readInbound(); + // HTTP 200 has content. + assertThat(content.content().readableBytes(), is(8)); + assertThat(content, CoreMatchers.instanceOf(LastHttpContent.class)); + content.release(); + + assertThat(ch.finish(), is(false)); + } + + @Test + public void testMultipleResponses() { + String response = "HTTP/1.1 200 OK\r\n" + + "Content-Length: 0\r\n\r\n"; + + HttpClientCodec codec = new HttpClientCodec(4096, 8192, true); + EmbeddedChannel ch = new EmbeddedChannel(codec, new HttpObjectAggregator(1024)); + + HttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "http://localhost/"); + assertTrue(ch.writeOutbound(request)); + + assertTrue(ch.writeInbound(Unpooled.copiedBuffer(response, CharsetUtil.UTF_8))); + assertTrue(ch.writeInbound(Unpooled.copiedBuffer(response, CharsetUtil.UTF_8))); + FullHttpResponse resp = ch.readInbound(); + assertTrue(resp.decoderResult().isSuccess()); + resp.release(); + + resp = ch.readInbound(); + assertTrue(resp.decoderResult().isSuccess()); + resp.release(); + assertTrue(ch.finishAndReleaseAll()); + } + } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpClientUpgradeHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpClientUpgradeHandlerTest.java index e342ef73e28..dde84466cc5 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpClientUpgradeHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpClientUpgradeHandlerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,19 +15,20 @@ */ package io.netty.handler.codec.http; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.embedded.EmbeddedChannel; import java.util.Collection; import java.util.Collections; +import java.util.List; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpClientUpgradeHandlerTest { @@ -57,7 +58,7 @@ public void upgradeTo(ChannelHandlerContext ctx, FullHttpResponse upgradeRespons } } - private static final class UserEventCatcher extends ChannelInboundHandlerAdapter { + private static final class UserEventCatcher implements ChannelHandler { private Object evt; public Object getUserEvent() { @@ -174,4 +175,25 @@ public void testEarlyBailout() { assertEquals(HttpResponseStatus.OK, response.status()); assertFalse(channel.finish()); } + + @Test + public void dontStripConnectionHeaders() { + HttpClientUpgradeHandler.SourceCodec sourceCodec = new FakeSourceCodec(); + HttpClientUpgradeHandler.UpgradeCodec upgradeCodec = new FakeUpgradeCodec(); + HttpClientUpgradeHandler handler = new HttpClientUpgradeHandler(sourceCodec, upgradeCodec, 1024); + UserEventCatcher catcher = new UserEventCatcher(); + EmbeddedChannel channel = new EmbeddedChannel(catcher); + channel.pipeline().addFirst("upgrade", handler); + + DefaultFullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "netty.io"); + request.headers().add("connection", "extra"); + request.headers().add("extra", "value"); + assertTrue(channel.writeOutbound(request)); + FullHttpRequest readRequest = channel.readOutbound(); + + List connectionHeaders = readRequest.headers().getAll("connection"); + assertTrue(connectionHeaders.contains("extra")); + assertTrue(readRequest.release()); + assertFalse(channel.finish()); + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorOptionsTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorOptionsTest.java new file mode 100644 index 00000000000..285be33f50c --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorOptionsTest.java @@ -0,0 +1,121 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http; + +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.compression.StandardCompressionOptions; + +import org.junit.jupiter.api.Test; + +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.CoreMatchers.not; +import static org.hamcrest.CoreMatchers.nullValue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; + +class HttpContentCompressorOptionsTest { + + @Test + void testGetBrTargetContentEncoding() { + HttpContentCompressor compressor = new HttpContentCompressor( + StandardCompressionOptions.gzip(), + StandardCompressionOptions.deflate(), + StandardCompressionOptions.brotli(), + StandardCompressionOptions.zstd() + ); + + String[] tests = { + // Accept-Encoding -> Content-Encoding + "", null, + "*", "br", + "*;q=0.0", null, + "br", "br", + "compress, br;q=0.5", "br", + "br; q=0.5, identity", "br", + "br; q=0, deflate", "br", + }; + for (int i = 0; i < tests.length; i += 2) { + String acceptEncoding = tests[i]; + String contentEncoding = tests[i + 1]; + String targetEncoding = compressor.determineEncoding(acceptEncoding); + assertEquals(contentEncoding, targetEncoding); + } + } + + @Test + void testGetZstdTargetContentEncoding() { + HttpContentCompressor compressor = new HttpContentCompressor( + StandardCompressionOptions.gzip(), + StandardCompressionOptions.deflate(), + StandardCompressionOptions.brotli(), + StandardCompressionOptions.zstd() + ); + + String[] tests = { + // Accept-Encoding -> Content-Encoding + "", null, + "*;q=0.0", null, + "zstd", "zstd", + "compress, zstd;q=0.5", "zstd", + "zstd; q=0.5, identity", "zstd", + "zstd; q=0, deflate", "zstd", + }; + for (int i = 0; i < tests.length; i += 2) { + String acceptEncoding = tests[i]; + String contentEncoding = tests[i + 1]; + String targetEncoding = compressor.determineEncoding(acceptEncoding); + assertEquals(contentEncoding, targetEncoding); + } + } + + @Test + void testAcceptEncodingHttpRequest() { + EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor(null)); + ch.writeInbound(newRequest()); + FullHttpRequest fullHttpRequest = ch.readInbound(); + fullHttpRequest.release(); + + HttpResponse res = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + res.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); + ch.writeOutbound(res); + + assertEncodedResponse(ch); + + assertTrue(ch.close().isSuccess()); + } + + private static void assertEncodedResponse(EmbeddedChannel ch) { + Object o = ch.readOutbound(); + assertThat(o, is(instanceOf(HttpResponse.class))); + + assertEncodedResponse((HttpResponse) o); + } + + private static void assertEncodedResponse(HttpResponse res) { + assertThat(res, is(not(instanceOf(HttpContent.class)))); + assertThat(res.headers().get(HttpHeaderNames.TRANSFER_ENCODING), is("chunked")); + assertThat(res.headers().get(HttpHeaderNames.CONTENT_LENGTH), is(nullValue())); + assertThat(res.headers().get(HttpHeaderNames.CONTENT_ENCODING), is("br")); + } + + private static FullHttpRequest newRequest() { + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); + req.headers().set(HttpHeaderNames.ACCEPT_ENCODING, "br, zstd, gzip, deflate"); + return req; + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java index 6ab2ab88c62..be1a91ced04 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentCompressorTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,21 +15,30 @@ */ package io.netty.handler.codec.http; +import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.DecoderResult; import io.netty.handler.codec.EncoderException; import io.netty.handler.codec.compression.ZlibWrapper; import io.netty.util.CharsetUtil; import io.netty.util.ReferenceCountUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import java.nio.charset.StandardCharsets; import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.*; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class HttpContentCompressorTest { @@ -149,6 +158,106 @@ public void testChunkedContent() throws Exception { assertThat(ch.readOutbound(), is(nullValue())); } + @Test + public void testChunkedContentWithAssembledResponse() throws Exception { + EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor()); + ch.writeInbound(newRequest()); + + HttpResponse res = new AssembledHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, + Unpooled.copiedBuffer("Hell", CharsetUtil.US_ASCII)); + res.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); + ch.writeOutbound(res); + + assertAssembledEncodedResponse(ch); + + ch.writeOutbound(new DefaultHttpContent(Unpooled.copiedBuffer("o, w", CharsetUtil.US_ASCII))); + ch.writeOutbound(new DefaultLastHttpContent(Unpooled.copiedBuffer("orld", CharsetUtil.US_ASCII))); + + HttpContent chunk; + chunk = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(chunk.content()), is("1f8b0800000000000000f248cdc901000000ffff")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(chunk.content()), is("cad7512807000000ffff")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(chunk.content()), is("ca2fca4901000000ffff")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(chunk.content()), is("0300c2a99ae70c000000")); + assertThat(chunk, is(instanceOf(HttpContent.class))); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().isReadable(), is(false)); + assertThat(chunk, is(instanceOf(LastHttpContent.class))); + chunk.release(); + + assertThat(ch.readOutbound(), is(nullValue())); + } + + @Test + public void testChunkedContentWithAssembledResponseIdentityEncoding() throws Exception { + EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor()); + ch.writeInbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/")); + + HttpResponse res = new AssembledHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK, + Unpooled.copiedBuffer("Hell", CharsetUtil.US_ASCII)); + res.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); + ch.writeOutbound(res); + + ch.writeOutbound(new DefaultHttpContent(Unpooled.copiedBuffer("o, w", CharsetUtil.US_ASCII))); + ch.writeOutbound(new DefaultLastHttpContent(Unpooled.copiedBuffer("orld", CharsetUtil.US_ASCII))); + + HttpContent chunk; + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("Hell")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("o, w")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("orld")); + assertThat(chunk, is(instanceOf(LastHttpContent.class))); + chunk.release(); + + assertThat(ch.readOutbound(), is(nullValue())); + } + + @Test + public void testContentWithAssembledResponseIdentityEncodingHttp10() throws Exception { + EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor()); + ch.writeInbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET, "/")); + + HttpResponse res = new AssembledHttpResponse(HttpVersion.HTTP_1_0, HttpResponseStatus.OK, + Unpooled.copiedBuffer("Hell", CharsetUtil.US_ASCII)); + ch.writeOutbound(res); + + ch.writeOutbound(new DefaultHttpContent(Unpooled.copiedBuffer("o, w", CharsetUtil.US_ASCII))); + ch.writeOutbound(new DefaultLastHttpContent(Unpooled.copiedBuffer("orld", CharsetUtil.US_ASCII))); + + HttpContent chunk; + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("Hell")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("o, w")); + chunk.release(); + + chunk = ch.readOutbound(); + assertThat(chunk.content().toString(StandardCharsets.UTF_8), is("orld")); + assertThat(chunk, is(instanceOf(LastHttpContent.class))); + chunk.release(); + + assertThat(ch.readOutbound(), is(nullValue())); + } + @Test public void testChunkedContentWithTrailingHeader() throws Exception { EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor()); @@ -188,6 +297,7 @@ public void testChunkedContentWithTrailingHeader() throws Exception { assertThat(chunk.content().isReadable(), is(false)); assertThat(chunk, is(instanceOf(LastHttpContent.class))); assertEquals("Netty", ((LastHttpContent) chunk).trailingHeaders().get(of("X-Test"))); + assertEquals(DecoderResult.SUCCESS, chunk.decoderResult()); chunk.release(); assertThat(ch.readOutbound(), is(nullValue())); @@ -270,7 +380,7 @@ public void testEmptySplitContent() throws Exception { assertEncodedResponse(ch); ch.writeOutbound(LastHttpContent.EMPTY_LAST_CONTENT); - HttpContent chunk = (HttpContent) ch.readOutbound(); + HttpContent chunk = ch.readOutbound(); assertThat(ByteBufUtil.hexDump(chunk.content()), is("1f8b080000000000000003000000000000000000")); assertThat(chunk, is(instanceOf(HttpContent.class))); chunk.release(); @@ -331,6 +441,7 @@ public void testEmptyFullContentWithTrailer() throws Exception { assertThat(res.content().readableBytes(), is(0)); assertThat(res.content().toString(CharsetUtil.US_ASCII), is("")); assertEquals("Netty", res.trailingHeaders().get(of("X-Test"))); + assertEquals(DecoderResult.SUCCESS, res.decoderResult()); assertThat(ch.readOutbound(), is(nullValue())); } @@ -370,6 +481,7 @@ public void test100Continue() throws Exception { assertThat(res.content().readableBytes(), is(0)); assertThat(res.content().toString(CharsetUtil.US_ASCII), is("")); assertEquals("Netty", res.trailingHeaders().get(of("X-Test"))); + assertEquals(DecoderResult.SUCCESS, res.decoderResult()); assertThat(ch.readOutbound(), is(nullValue())); } @@ -419,7 +531,7 @@ public void testIdentity() throws Exception { res.headers().set(HttpHeaderNames.CONTENT_ENCODING, HttpHeaderValues.IDENTITY); assertTrue(ch.writeOutbound(res)); - FullHttpResponse response = (FullHttpResponse) ch.readOutbound(); + FullHttpResponse response = ch.readOutbound(); assertEquals(String.valueOf(len), response.headers().get(HttpHeaderNames.CONTENT_LENGTH)); assertEquals(HttpHeaderValues.IDENTITY.toString(), response.headers().get(HttpHeaderNames.CONTENT_ENCODING)); assertEquals("Hello, World", response.content().toString(CharsetUtil.US_ASCII)); @@ -441,7 +553,7 @@ public void testCustomEncoding() throws Exception { res.headers().set(HttpHeaderNames.CONTENT_ENCODING, "ascii"); assertTrue(ch.writeOutbound(res)); - FullHttpResponse response = (FullHttpResponse) ch.readOutbound(); + FullHttpResponse response = ch.readOutbound(); assertEquals(String.valueOf(len), response.headers().get(HttpHeaderNames.CONTENT_LENGTH)); assertEquals("ascii", response.headers().get(HttpHeaderNames.CONTENT_ENCODING)); assertEquals("Hello, World", response.content().toString(CharsetUtil.US_ASCII)); @@ -496,6 +608,39 @@ public void testCompressThresholdNotCompress() throws Exception { assertTrue(ch.finishAndReleaseAll()); } + @Test + public void testMultipleAcceptEncodingHeaders() { + FullHttpRequest request = newRequest(); + request.headers().set(HttpHeaderNames.ACCEPT_ENCODING, "unknown; q=1.0") + .add(HttpHeaderNames.ACCEPT_ENCODING, "gzip; q=0.5") + .add(HttpHeaderNames.ACCEPT_ENCODING, "deflate; q=0"); + + EmbeddedChannel ch = new EmbeddedChannel(new HttpContentCompressor()); + + assertTrue(ch.writeInbound(request)); + + FullHttpResponse res = new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, HttpResponseStatus.OK, + Unpooled.copiedBuffer("Gzip Win", CharsetUtil.US_ASCII)); + assertTrue(ch.writeOutbound(res)); + + assertEncodedResponse(ch); + HttpContent c = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(c.content()), is("1f8b080000000000000072afca2c5008cfcc03000000ffff")); + c.release(); + + c = ch.readOutbound(); + assertThat(ByteBufUtil.hexDump(c.content()), is("03001f2ebf0f08000000")); + c.release(); + + LastHttpContent last = ch.readOutbound(); + assertThat(last.content().readableBytes(), is(0)); + last.release(); + + assertThat(ch.readOutbound(), is(nullValue())); + assertTrue(ch.finishAndReleaseAll()); + } + private static FullHttpRequest newRequest() { FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"); req.headers().set(HttpHeaderNames.ACCEPT_ENCODING, "gzip"); @@ -512,4 +657,93 @@ private static void assertEncodedResponse(EmbeddedChannel ch) { assertThat(res.headers().get(HttpHeaderNames.CONTENT_LENGTH), is(nullValue())); assertThat(res.headers().get(HttpHeaderNames.CONTENT_ENCODING), is("gzip")); } + + private static void assertAssembledEncodedResponse(EmbeddedChannel ch) { + Object o = ch.readOutbound(); + assertThat(o, is(instanceOf(AssembledHttpResponse.class))); + + AssembledHttpResponse res = (AssembledHttpResponse) o; + try { + assertThat(res, is(instanceOf(HttpContent.class))); + assertThat(res.headers().get(HttpHeaderNames.TRANSFER_ENCODING), is("chunked")); + assertThat(res.headers().get(HttpHeaderNames.CONTENT_LENGTH), is(nullValue())); + assertThat(res.headers().get(HttpHeaderNames.CONTENT_ENCODING), is("gzip")); + } finally { + res.release(); + } + } + + static class AssembledHttpResponse extends DefaultHttpResponse implements HttpContent { + + private final ByteBuf content; + + AssembledHttpResponse(HttpVersion version, HttpResponseStatus status, ByteBuf content) { + super(version, status); + this.content = content; + } + + @Override + public HttpContent copy() { + throw new UnsupportedOperationException(); + } + + @Override + public HttpContent duplicate() { + throw new UnsupportedOperationException(); + } + + @Override + public HttpContent retainedDuplicate() { + throw new UnsupportedOperationException(); + } + + @Override + public HttpContent replace(ByteBuf content) { + throw new UnsupportedOperationException(); + } + + @Override + public AssembledHttpResponse retain() { + content.retain(); + return this; + } + + @Override + public AssembledHttpResponse retain(int increment) { + content.retain(increment); + return this; + } + + @Override + public ByteBuf content() { + return content; + } + + @Override + public int refCnt() { + return content.refCnt(); + } + + @Override + public boolean release() { + return content.release(); + } + + @Override + public boolean release(int decrement) { + return content.release(decrement); + } + + @Override + public AssembledHttpResponse touch() { + content.touch(); + return this; + } + + @Override + public AssembledHttpResponse touch(Object hint) { + content.touch(hint); + return this; + } + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecoderTest.java index 7a27a4c08ac..00e0fbfae5a 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,18 +18,21 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.CodecException; import io.netty.handler.codec.DecoderException; +import io.netty.handler.codec.compression.Brotli; import io.netty.handler.codec.compression.ZlibCodecFactory; import io.netty.handler.codec.compression.ZlibDecoder; import io.netty.handler.codec.compression.ZlibEncoder; import io.netty.handler.codec.compression.ZlibWrapper; import io.netty.util.CharsetUtil; import io.netty.util.ReferenceCountUtil; -import org.junit.Test; +import io.netty.util.internal.PlatformDependent; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.condition.DisabledIf; import java.util.ArrayList; import java.util.List; @@ -39,7 +42,13 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.*; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class HttpContentDecoderTest { private static final String HELLO_WORLD = "hello, world"; @@ -47,6 +56,12 @@ public class HttpContentDecoderTest { 31, -117, 8, 8, 12, 3, -74, 84, 0, 3, 50, 0, -53, 72, -51, -55, -55, -41, 81, 40, -49, 47, -54, 73, 1, 0, 58, 114, -85, -1, 12, 0, 0, 0 }; + private static final String SAMPLE_STRING = "Hello, I am Meow!. A small kitten. :)" + + "I sleep all day, and meow all night."; + private static final byte[] SAMPLE_BZ_BYTES = new byte[]{27, 72, 0, 0, -60, -102, 91, -86, 103, 20, + -28, -23, 54, -101, 11, -106, -16, -32, -95, -61, -37, 94, -16, 97, -40, -93, -56, 18, 21, 86, + -110, 82, -41, 102, -89, 20, 11, 10, -68, -31, 96, -116, -55, -80, -31, -91, 96, -64, 83, 51, + -39, 13, -21, 92, -16, -119, 124, -31, 18, 78, -1, 91, 82, 105, -116, -95, -22, -11, -70, -45, 0}; @Test public void testBinaryDecompression() throws Exception { @@ -89,6 +104,48 @@ public void testRequestDecompression() { assertFalse(channel.finish()); // assert that no messages are left in channel } + @Test + public void testChunkedRequestDecompression() { + HttpResponseDecoder decoder = new HttpResponseDecoder(); + HttpContentDecoder decompressor = new HttpContentDecompressor(); + + EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor, null); + + String headers = "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n" + + "Trailer: My-Trailer\r\n" + + "Content-Encoding: gzip\r\n\r\n"; + + channel.writeInbound(Unpooled.copiedBuffer(headers.getBytes(CharsetUtil.US_ASCII))); + + String chunkLength = Integer.toHexString(GZ_HELLO_WORLD.length); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(chunkLength + "\r\n", CharsetUtil.US_ASCII))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(GZ_HELLO_WORLD))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("\r\n".getBytes(CharsetUtil.US_ASCII)))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("0\r\n", CharsetUtil.US_ASCII))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("My-Trailer: 42\r\n\r\n\r\n", CharsetUtil.US_ASCII))); + + Object ob1 = channel.readInbound(); + assertThat(ob1, is(instanceOf(DefaultHttpResponse.class))); + + Object ob2 = channel.readInbound(); + assertThat(ob2, is(instanceOf(HttpContent.class))); + HttpContent content = (HttpContent) ob2; + assertEquals(HELLO_WORLD, content.content().toString(CharsetUtil.US_ASCII)); + content.release(); + + Object ob3 = channel.readInbound(); + assertThat(ob3, is(instanceOf(LastHttpContent.class))); + LastHttpContent lastContent = (LastHttpContent) ob3; + assertNotNull(lastContent.decoderResult()); + assertTrue(lastContent.decoderResult().isSuccess()); + assertFalse(lastContent.trailingHeaders().isEmpty()); + assertEquals("42", lastContent.trailingHeaders().get("My-Trailer")); + assertHasInboundMessages(channel, false); + assertHasOutboundMessages(channel, false); + assertFalse(channel.finish()); + } + @Test public void testResponseDecompression() { // baseline test: response decoder, content decompressor && request aggregator work as expected @@ -116,6 +173,77 @@ public void testResponseDecompression() { assertFalse(channel.finish()); // assert that no messages are left in channel } + @DisabledIf(value = "isNotSupported", disabledReason = "Brotli is not supported on this platform") + @Test + public void testResponseBrotliDecompression() throws Throwable { + Brotli.ensureAvailability(); + + HttpResponseDecoder decoder = new HttpResponseDecoder(); + HttpContentDecoder decompressor = new HttpContentDecompressor(); + HttpObjectAggregator aggregator = new HttpObjectAggregator(Integer.MAX_VALUE); + EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor, aggregator); + + String headers = "HTTP/1.1 200 OK\r\n" + + "Content-Length: " + SAMPLE_BZ_BYTES.length + "\r\n" + + "Content-Encoding: br\r\n" + + "\r\n"; + ByteBuf buf = Unpooled.wrappedBuffer(headers.getBytes(CharsetUtil.US_ASCII), SAMPLE_BZ_BYTES); + assertTrue(channel.writeInbound(buf)); + + Object o = channel.readInbound(); + assertThat(o, is(instanceOf(FullHttpResponse.class))); + FullHttpResponse resp = (FullHttpResponse) o; + assertNull(resp.headers().get(HttpHeaderNames.CONTENT_ENCODING), "Content-Encoding header should be removed"); + assertEquals(SAMPLE_STRING, resp.content().toString(CharsetUtil.UTF_8), + "Response body should match uncompressed string"); + resp.release(); + + assertHasInboundMessages(channel, false); + assertHasOutboundMessages(channel, false); + assertFalse(channel.finish()); // assert that no messages are left in channel + } + + @DisabledIf(value = "isNotSupported", disabledReason = "Brotli is not supported on this platform") + @Test + public void testResponseChunksBrotliDecompression() throws Throwable { + Brotli.ensureAvailability(); + + HttpResponseDecoder decoder = new HttpResponseDecoder(); + HttpContentDecoder decompressor = new HttpContentDecompressor(); + HttpObjectAggregator aggregator = new HttpObjectAggregator(Integer.MAX_VALUE); + EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor, aggregator); + + String headers = "HTTP/1.1 200 OK\r\n" + + "Content-Length: " + SAMPLE_BZ_BYTES.length + "\r\n" + + "Content-Encoding: br\r\n" + + "\r\n"; + + assertFalse(channel.writeInbound(Unpooled.wrappedBuffer(headers.getBytes(CharsetUtil.US_ASCII)))); + + int offset = 0; + while (offset < SAMPLE_BZ_BYTES.length) { + int len = Math.min(1500, SAMPLE_BZ_BYTES.length - offset); + boolean available = channel.writeInbound(Unpooled.wrappedBuffer(SAMPLE_BZ_BYTES, offset, len)); + offset += 1500; + if (offset < SAMPLE_BZ_BYTES.length) { + assertFalse(available); + } else { + assertTrue(available); + } + } + + Object o = channel.readInbound(); + assertThat(o, is(instanceOf(FullHttpResponse.class))); + FullHttpResponse resp = (FullHttpResponse) o; + assertEquals(SAMPLE_STRING, resp.content().toString(CharsetUtil.UTF_8), + "Response body should match uncompressed string"); + resp.release(); + + assertHasInboundMessages(channel, false); + assertHasOutboundMessages(channel, false); + assertFalse(channel.finish()); // assert that no messages are left in channel + } + @Test public void testExpectContinueResponse1() { // request with header "Expect: 100-continue" must be replied with one "100 Continue" response @@ -136,7 +264,7 @@ public void testExpectContinueResponse1() { assertThat(o, is(instanceOf(FullHttpResponse.class))); FullHttpResponse r = (FullHttpResponse) o; assertEquals(100, r.status().code()); - assertTrue(channel.writeInbound(Unpooled.wrappedBuffer(GZ_HELLO_WORLD))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(GZ_HELLO_WORLD))); r.release(); assertHasInboundMessages(channel, true); @@ -163,7 +291,7 @@ public void testExpectContinueResponse2() { FullHttpResponse r = (FullHttpResponse) o; assertEquals(100, r.status().code()); r.release(); - assertTrue(channel.writeInbound(Unpooled.wrappedBuffer(GZ_HELLO_WORLD))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(GZ_HELLO_WORLD))); assertHasInboundMessages(channel, true); assertHasOutboundMessages(channel, false); @@ -190,7 +318,7 @@ public void testExpectContinueResponse3() { FullHttpResponse r = (FullHttpResponse) o; assertEquals(100, r.status().code()); r.release(); - assertTrue(channel.writeInbound(Unpooled.wrappedBuffer(GZ_HELLO_WORLD))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(GZ_HELLO_WORLD))); assertHasInboundMessages(channel, true); assertHasOutboundMessages(channel, false); @@ -217,7 +345,7 @@ public void testExpectContinueResponse4() { FullHttpResponse r = (FullHttpResponse) o; assertEquals(100, r.status().code()); r.release(); - assertTrue(channel.writeInbound(Unpooled.wrappedBuffer(GZ_HELLO_WORLD))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(GZ_HELLO_WORLD))); assertHasInboundMessages(channel, true); assertHasOutboundMessages(channel, false); @@ -231,8 +359,8 @@ public void testExpectContinueResetHttpObjectDecoder() { HttpRequestDecoder decoder = new HttpRequestDecoder(); final int maxBytes = 10; HttpObjectAggregator aggregator = new HttpObjectAggregator(maxBytes); - final AtomicReference secondRequestRef = new AtomicReference(); - EmbeddedChannel channel = new EmbeddedChannel(decoder, aggregator, new ChannelInboundHandlerAdapter() { + final AtomicReference secondRequestRef = new AtomicReference<>(); + EmbeddedChannel channel = new EmbeddedChannel(decoder, aggregator, new ChannelHandler() { @Override public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { if (msg instanceof FullHttpRequest) { @@ -284,7 +412,7 @@ public void testRequestContentLength1() { // or removes it completely (handlers down the chain must rely on LastHttpContent object) // force content to be in more than one chunk (5 bytes/chunk) - HttpRequestDecoder decoder = new HttpRequestDecoder(4096, 4096, 5); + HttpRequestDecoder decoder = new HttpRequestDecoder(4096, 4096); HttpContentDecoder decompressor = new HttpContentDecompressor(); EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor); String headers = "POST / HTTP/1.1\r\n" + @@ -313,7 +441,7 @@ public void testRequestContentLength2() { // case 2: if HttpObjectAggregator is down the chain, then correct Content-Length header must be set // force content to be in more than one chunk (5 bytes/chunk) - HttpRequestDecoder decoder = new HttpRequestDecoder(4096, 4096, 5); + HttpRequestDecoder decoder = new HttpRequestDecoder(4096, 4096); HttpContentDecoder decompressor = new HttpContentDecompressor(); HttpObjectAggregator aggregator = new HttpObjectAggregator(1024); EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor, aggregator); @@ -345,7 +473,7 @@ public void testResponseContentLength1() { // or removes it completely (handlers down the chain must rely on LastHttpContent object) // force content to be in more than one chunk (5 bytes/chunk) - HttpResponseDecoder decoder = new HttpResponseDecoder(4096, 4096, 5); + HttpResponseDecoder decoder = new HttpResponseDecoder(4096, 4096); HttpContentDecoder decompressor = new HttpContentDecompressor(); EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor); String headers = "HTTP/1.1 200 OK\r\n" + @@ -361,11 +489,11 @@ public void testResponseContentLength1() { assertThat(o, is(instanceOf(HttpResponse.class))); HttpResponse r = (HttpResponse) o; - assertFalse("Content-Length header not removed.", r.headers().contains(HttpHeaderNames.CONTENT_LENGTH)); + assertFalse(r.headers().contains(HttpHeaderNames.CONTENT_LENGTH), "Content-Length header not removed."); String transferEncoding = r.headers().get(HttpHeaderNames.TRANSFER_ENCODING); - assertNotNull("Content-length as well as transfer-encoding not set.", transferEncoding); - assertEquals("Unexpected transfer-encoding value.", HttpHeaderValues.CHUNKED.toString(), transferEncoding); + assertNotNull(transferEncoding, "Content-length as well as transfer-encoding not set."); + assertEquals(HttpHeaderValues.CHUNKED.toString(), transferEncoding, "Unexpected transfer-encoding value."); assertHasInboundMessages(channel, true); assertHasOutboundMessages(channel, false); @@ -377,7 +505,7 @@ public void testResponseContentLength2() { // case 2: if HttpObjectAggregator is down the chain, then correct Content-Length header must be set // force content to be in more than one chunk (5 bytes/chunk) - HttpResponseDecoder decoder = new HttpResponseDecoder(4096, 4096, 5); + HttpResponseDecoder decoder = new HttpResponseDecoder(4096, 4096); HttpContentDecoder decompressor = new HttpContentDecompressor(); HttpObjectAggregator aggregator = new HttpObjectAggregator(1024); EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor, aggregator); @@ -405,7 +533,7 @@ public void testResponseContentLength2() { @Test public void testFullHttpRequest() { // test that ContentDecoder can be used after the ObjectAggregator - HttpRequestDecoder decoder = new HttpRequestDecoder(4096, 4096, 5); + HttpRequestDecoder decoder = new HttpRequestDecoder(4096, 4096); HttpObjectAggregator aggregator = new HttpObjectAggregator(1024); HttpContentDecoder decompressor = new HttpContentDecompressor(); EmbeddedChannel channel = new EmbeddedChannel(decoder, aggregator, decompressor); @@ -432,7 +560,7 @@ public void testFullHttpRequest() { @Test public void testFullHttpResponse() { // test that ContentDecoder can be used after the ObjectAggregator - HttpResponseDecoder decoder = new HttpResponseDecoder(4096, 4096, 5); + HttpResponseDecoder decoder = new HttpResponseDecoder(4096, 4096); HttpObjectAggregator aggregator = new HttpObjectAggregator(1024); HttpContentDecoder decompressor = new HttpContentDecompressor(); EmbeddedChannel channel = new EmbeddedChannel(decoder, aggregator, decompressor); @@ -460,7 +588,7 @@ public void testFullHttpResponse() { @Test public void testFullHttpResponseEOF() { // test that ContentDecoder can be used after the ObjectAggregator - HttpResponseDecoder decoder = new HttpResponseDecoder(4096, 4096, 5); + HttpResponseDecoder decoder = new HttpResponseDecoder(4096, 4096); HttpContentDecoder decompressor = new HttpContentDecompressor(); EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor); String headers = "HTTP/1.1 200 OK\r\n" + @@ -489,7 +617,7 @@ public void testCleanupThrows() { HttpContentDecoder decoder = new HttpContentDecoder() { @Override protected EmbeddedChannel newContentDecoder(String contentEncoding) throws Exception { - return new EmbeddedChannel(new ChannelInboundHandlerAdapter() { + return new EmbeddedChannel(new ChannelHandler() { @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { ctx.fireExceptionCaught(new DecoderException()); @@ -500,11 +628,11 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { }; final AtomicBoolean channelInactiveCalled = new AtomicBoolean(); - EmbeddedChannel channel = new EmbeddedChannel(decoder, new ChannelInboundHandlerAdapter() { + EmbeddedChannel channel = new EmbeddedChannel(decoder, new ChannelHandler() { @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { assertTrue(channelInactiveCalled.compareAndSet(false, true)); - super.channelInactive(ctx); + ctx.fireChannelInactive(); } }); assertTrue(channel.writeInbound(new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"))); @@ -521,15 +649,87 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { assertEquals(0, content.refCnt()); } + @Test + public void testTransferCodingGZIP() { + String requestStr = "POST / HTTP/1.1\r\n" + + "Content-Length: " + GZ_HELLO_WORLD.length + "\r\n" + + "Transfer-Encoding: gzip\r\n" + + "\r\n"; + HttpRequestDecoder decoder = new HttpRequestDecoder(); + HttpContentDecoder decompressor = new HttpContentDecompressor(); + EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor); + + channel.writeInbound(Unpooled.copiedBuffer(requestStr.getBytes())); + channel.writeInbound(Unpooled.copiedBuffer(GZ_HELLO_WORLD)); + + HttpRequest request = channel.readInbound(); + assertTrue(request.decoderResult().isSuccess()); + assertFalse(request.headers().contains(HttpHeaderNames.CONTENT_LENGTH)); + + HttpContent content = channel.readInbound(); + assertTrue(content.decoderResult().isSuccess()); + assertEquals(HELLO_WORLD, content.content().toString(CharsetUtil.US_ASCII)); + content.release(); + + LastHttpContent lastHttpContent = channel.readInbound(); + assertTrue(lastHttpContent.decoderResult().isSuccess()); + lastHttpContent.release(); + + assertHasInboundMessages(channel, false); + assertHasOutboundMessages(channel, false); + assertFalse(channel.finish()); + channel.releaseInbound(); + } + + @Test + public void testTransferCodingGZIPAndChunked() { + String requestStr = "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "Content-Type: application/x-www-form-urlencoded\r\n" + + "Trailer: My-Trailer\r\n" + + "Transfer-Encoding: gzip, chunked\r\n" + + "\r\n"; + HttpRequestDecoder decoder = new HttpRequestDecoder(); + HttpContentDecoder decompressor = new HttpContentDecompressor(); + EmbeddedChannel channel = new EmbeddedChannel(decoder, decompressor); + + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); + + String chunkLength = Integer.toHexString(GZ_HELLO_WORLD.length); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(chunkLength + "\r\n", CharsetUtil.US_ASCII))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(GZ_HELLO_WORLD))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("\r\n".getBytes(CharsetUtil.US_ASCII)))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("0\r\n", CharsetUtil.US_ASCII))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("My-Trailer: 42\r\n\r\n", CharsetUtil.US_ASCII))); + + HttpRequest request = channel.readInbound(); + assertTrue(request.decoderResult().isSuccess()); + assertTrue(request.headers().containsValue(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED, true)); + assertFalse(request.headers().contains(HttpHeaderNames.CONTENT_LENGTH)); + + HttpContent chunk1 = channel.readInbound(); + assertTrue(chunk1.decoderResult().isSuccess()); + assertEquals(HELLO_WORLD, chunk1.content().toString(CharsetUtil.US_ASCII)); + chunk1.release(); + + LastHttpContent chunk2 = channel.readInbound(); + assertTrue(chunk2.decoderResult().isSuccess()); + assertEquals("42", chunk2.trailingHeaders().get("My-Trailer")); + chunk2.release(); + + assertFalse(channel.finish()); + channel.releaseInbound(); + } + private static byte[] gzDecompress(byte[] input) { ZlibDecoder decoder = ZlibCodecFactory.newZlibDecoder(ZlibWrapper.GZIP); EmbeddedChannel channel = new EmbeddedChannel(decoder); - assertTrue(channel.writeInbound(Unpooled.wrappedBuffer(input))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(input))); assertTrue(channel.finish()); // close the channel to indicate end-of-data int outputSize = 0; ByteBuf o; - List inbound = new ArrayList(); + List inbound = new ArrayList<>(); while ((o = channel.readInbound()) != null) { inbound.add(o); outputSize += o.readableBytes(); @@ -584,7 +784,7 @@ private static byte[] gzCompress(byte[] input) { int outputSize = 0; ByteBuf o; - List outbound = new ArrayList(); + List outbound = new ArrayList<>(); while ((o = channel.readOutbound()) != null) { outbound.add(o); outputSize += o.readableBytes(); @@ -635,4 +835,8 @@ private static void assertHasOutboundMessages(EmbeddedChannel channel, boolean h assertNull(o); } } + + static boolean isNotSupported() { + return PlatformDependent.isOsx() && "aarch_64".equals(PlatformDependent.normalizedArch()); + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java new file mode 100644 index 00000000000..557ee17ba1e --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java @@ -0,0 +1,72 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http; + +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.embedded.EmbeddedChannel; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.atomic.AtomicInteger; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class HttpContentDecompressorTest { + + // See https://github.com/netty/netty/issues/8915. + @Test + public void testInvokeReadWhenNotProduceMessage() { + final AtomicInteger readCalled = new AtomicInteger(); + EmbeddedChannel channel = new EmbeddedChannel(new ChannelHandler() { + @Override + public void read(ChannelHandlerContext ctx) { + readCalled.incrementAndGet(); + ctx.read(); + } + }, new HttpContentDecompressor(), new ChannelHandler() { + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + ctx.fireChannelRead(msg); + ctx.read(); + } + }); + + channel.config().setAutoRead(false); + + readCalled.set(0); + HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + response.headers().set(HttpHeaderNames.CONTENT_ENCODING, "gzip"); + response.headers().set(HttpHeaderNames.CONTENT_TYPE, "application/json;charset=UTF-8"); + response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); + + assertTrue(channel.writeInbound(response)); + + // we triggered read explicitly + assertEquals(1, readCalled.get()); + + assertTrue(channel.readInbound() instanceof HttpResponse); + + assertFalse(channel.writeInbound(new DefaultHttpContent(Unpooled.EMPTY_BUFFER))); + + // read was triggered by the HttpContentDecompressor itself as it did not produce any message to the next + // inbound handler. + assertEquals(2, readCalled.get()); + assertFalse(channel.finishAndReleaseAll()); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentEncoderTest.java index 9242cf87283..bba88b57559 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,14 +18,16 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.CodecException; +import io.netty.handler.codec.DecoderResult; import io.netty.handler.codec.EncoderException; import io.netty.handler.codec.MessageToByteEncoder; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; import java.util.concurrent.atomic.AtomicBoolean; @@ -34,13 +36,18 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.*; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpContentEncoderTest { private static final class TestEncoder extends HttpContentEncoder { @Override - protected Result beginEncode(HttpResponse headers, String acceptEncoding) { + protected Result beginEncode(HttpResponse httpResponse, String acceptEncoding) { return new Result("test", new EmbeddedChannel(new MessageToByteEncoder() { @Override protected void encode(ChannelHandlerContext ctx, ByteBuf in, ByteBuf out) throws Exception { @@ -156,6 +163,7 @@ public void testChunkedContentWithTrailingHeader() throws Exception { assertThat(chunk.content().isReadable(), is(false)); assertThat(chunk, is(instanceOf(LastHttpContent.class))); assertEquals("Netty", ((LastHttpContent) chunk).trailingHeaders().get(of("X-Test"))); + assertEquals(DecoderResult.SUCCESS, res.decoderResult()); chunk.release(); assertThat(ch.readOutbound(), is(nullValue())); @@ -285,6 +293,7 @@ public void testEmptyFullContentWithTrailer() throws Exception { assertThat(res.content().readableBytes(), is(0)); assertThat(res.content().toString(CharsetUtil.US_ASCII), is("")); assertEquals("Netty", res.trailingHeaders().get(of("X-Test"))); + assertEquals(DecoderResult.SUCCESS, res.decoderResult()); assertThat(ch.readOutbound(), is(nullValue())); } @@ -392,9 +401,9 @@ public void testHttp1_0() throws Exception { public void testCleanupThrows() { HttpContentEncoder encoder = new HttpContentEncoder() { @Override - protected Result beginEncode(HttpResponse headers, String acceptEncoding) throws Exception { + protected Result beginEncode(HttpResponse httpResponse, String acceptEncoding) throws Exception { return new Result("myencoding", new EmbeddedChannel( - new ChannelInboundHandlerAdapter() { + new ChannelHandler() { @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { ctx.fireExceptionCaught(new EncoderException()); @@ -405,11 +414,11 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { }; final AtomicBoolean channelInactiveCalled = new AtomicBoolean(); - EmbeddedChannel channel = new EmbeddedChannel(encoder, new ChannelInboundHandlerAdapter() { + EmbeddedChannel channel = new EmbeddedChannel(encoder, new ChannelHandler() { @Override public void channelInactive(ChannelHandlerContext ctx) throws Exception { assertTrue(channelInactiveCalled.compareAndSet(false, true)); - super.channelInactive(ctx); + ctx.fireChannelInactive(); } }); assertTrue(channel.writeInbound(new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/"))); @@ -417,12 +426,13 @@ public void channelInactive(ChannelHandlerContext ctx) throws Exception { HttpContent content = new DefaultHttpContent(Unpooled.buffer().writeZero(10)); assertTrue(channel.writeOutbound(content)); assertEquals(1, content.refCnt()); - try { - channel.finishAndReleaseAll(); - fail(); - } catch (CodecException expected) { - // expected - } + assertThrows(CodecException.class, new Executable() { + @Override + public void execute() { + channel.finishAndReleaseAll(); + } + }); + assertTrue(channelInactiveCalled.get()); assertEquals(0, content.refCnt()); } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeaderDateFormatTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeaderDateFormatTest.java deleted file mode 100644 index 4f4a6e0fb39..00000000000 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeaderDateFormatTest.java +++ /dev/null @@ -1,67 +0,0 @@ -/* - * Copyright 2012 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.http; - -import org.junit.Test; - -import java.text.ParseException; -import java.util.Date; - -import static org.junit.Assert.*; - -public class HttpHeaderDateFormatTest { - /** - * This date is set at "06 Nov 1994 08:49:37 GMT" (same used in example in - * RFC documentation) - *

    - * http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html - */ - private static final Date DATE = new Date(784111777000L); - - @Test - public void testParse() throws ParseException { - HttpHeaderDateFormat format = HttpHeaderDateFormat.get(); - - final Date parsedDateWithSingleDigitDay = format.parse("Sun, 6 Nov 1994 08:49:37 GMT"); - assertNotNull(parsedDateWithSingleDigitDay); - assertEquals(DATE, parsedDateWithSingleDigitDay); - - final Date parsedDateWithDoubleDigitDay = format.parse("Sun, 06 Nov 1994 08:49:37 GMT"); - assertNotNull(parsedDateWithDoubleDigitDay); - assertEquals(DATE, parsedDateWithDoubleDigitDay); - - final Date parsedDateWithDashSeparatorSingleDigitDay = format.parse("Sunday, 06-Nov-94 08:49:37 GMT"); - assertNotNull(parsedDateWithDashSeparatorSingleDigitDay); - assertEquals(DATE, parsedDateWithDashSeparatorSingleDigitDay); - - final Date parsedDateWithSingleDoubleDigitDay = format.parse("Sunday, 6-Nov-94 08:49:37 GMT"); - assertNotNull(parsedDateWithSingleDoubleDigitDay); - assertEquals(DATE, parsedDateWithSingleDoubleDigitDay); - - final Date parsedDateWithoutGMT = format.parse("Sun Nov 6 08:49:37 1994"); - assertNotNull(parsedDateWithoutGMT); - assertEquals(DATE, parsedDateWithoutGMT); - } - - @Test - public void testFormat() { - HttpHeaderDateFormat format = HttpHeaderDateFormat.get(); - - final String formatted = format.format(DATE); - assertNotNull(formatted); - assertEquals("Sun, 06 Nov 1994 08:49:37 GMT", formatted); - } -} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeadersTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeadersTest.java index 2f44f6dd5b2..e7134d5a2ea 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeadersTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeadersTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,16 +16,17 @@ package io.netty.handler.codec.http; import io.netty.util.AsciiString; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.List; import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpHeadersTest { @@ -61,22 +62,22 @@ public void testEqualsIgnoreCase() { assertThat(AsciiString.contentEqualsIgnoreCase("FoO", "fOo"), is(true)); } - @Test(expected = NullPointerException.class) + @Test public void testSetNullHeaderValueValidate() { HttpHeaders headers = new DefaultHttpHeaders(true); - headers.set(of("test"), (CharSequence) null); + assertThrows(NullPointerException.class, () -> headers.set(of("test"), (CharSequence) null)); } - @Test(expected = NullPointerException.class) + @Test public void testSetNullHeaderValueNotValidate() { HttpHeaders headers = new DefaultHttpHeaders(false); - headers.set(of("test"), (CharSequence) null); + assertThrows(NullPointerException.class, () -> headers.set(of("test"), (CharSequence) null)); } - @Test(expected = IllegalArgumentException.class) + @Test public void testAddSelf() { HttpHeaders headers = new DefaultHttpHeaders(false); - headers.add(headers); + assertThrows(IllegalArgumentException.class, () -> headers.add(headers)); } @Test diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeadersTestUtils.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeadersTestUtils.java index d5e453c39f8..84e614fef2e 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeadersTestUtils.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpHeadersTestUtils.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -54,7 +54,7 @@ public String toString() { public List asList() { if (array == null) { - List list = new ArrayList(nr); + List list = new ArrayList<>(nr); for (int i = 1; i <= nr; i++) { list.add(of(i).toString()); } @@ -68,7 +68,7 @@ public List subset(int from) { --from; final int size = nr - from; final int end = from + size; - List list = new ArrayList(size); + List list = new ArrayList<>(size); List fullList = asList(); for (int i = from; i < end; ++i) { list.add(fullList.get(i)); @@ -117,7 +117,7 @@ private static boolean contains(CharSequence value, char c) { private static final Map MAP; static { - final Map map = new HashMap(); + final Map map = new HashMap<>(); for (HeaderValue v : values()) { final int nr = v.nr; map.put(Integer.valueOf(nr), v); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpInvalidMessageTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpInvalidMessageTest.java index 6275c27e855..d5e96eefa21 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpInvalidMessageTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpInvalidMessageTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,15 +20,15 @@ import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.DecoderResult; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Random; import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpInvalidMessageTest { diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpObjectAggregatorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpObjectAggregatorTest.java index 7dc0ac5a519..085effd2f60 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpObjectAggregatorTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpObjectAggregatorTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -23,8 +23,12 @@ import io.netty.handler.codec.DecoderResult; import io.netty.handler.codec.DecoderResultProvider; import io.netty.handler.codec.TooLongFrameException; +import io.netty.util.AsciiString; import io.netty.util.CharsetUtil; -import org.junit.Test; +import io.netty.util.ReferenceCountUtil; + +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; import org.mockito.Mockito; import java.nio.channels.ClosedChannelException; @@ -33,13 +37,15 @@ import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.is; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpObjectAggregatorTest { @@ -114,11 +120,11 @@ public void testAggregateWithTrailer() { @Test public void testOversizedRequest() { - EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(4)); + final EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(4)); HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "http://localhost"); HttpContent chunk1 = new DefaultHttpContent(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII)); HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII)); - HttpContent chunk3 = LastHttpContent.EMPTY_LAST_CONTENT; + final HttpContent chunk3 = LastHttpContent.EMPTY_LAST_CONTENT; assertFalse(embedder.writeInbound(message)); assertFalse(embedder.writeInbound(chunk1)); @@ -129,19 +135,70 @@ public void testOversizedRequest() { assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH)); assertFalse(embedder.isOpen()); - try { - assertFalse(embedder.writeInbound(chunk3)); - fail(); - } catch (Exception e) { - assertTrue(e instanceof ClosedChannelException); - } + assertThrows(ClosedChannelException.class, new Executable() { + @Override + public void execute() { + embedder.writeInbound(chunk3); + } + }); + + assertFalse(embedder.finish()); + } + + @Test + public void testOversizedRequestWithContentLengthAndDecoder() { + EmbeddedChannel embedder = new EmbeddedChannel(new HttpRequestDecoder(), new HttpObjectAggregator(4, false)); + assertFalse(embedder.writeInbound(Unpooled.copiedBuffer( + "PUT /upload HTTP/1.1\r\n" + + "Content-Length: 5\r\n\r\n", CharsetUtil.US_ASCII))); + + assertNull(embedder.readInbound()); + + FullHttpResponse response = embedder.readOutbound(); + assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status()); + assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH)); + + assertTrue(embedder.isOpen()); + + assertFalse(embedder.writeInbound(Unpooled.wrappedBuffer(new byte[] { 1, 2, 3, 4 }))); + assertFalse(embedder.writeInbound(Unpooled.wrappedBuffer(new byte[] { 5 }))); + + assertNull(embedder.readOutbound()); + + assertFalse(embedder.writeInbound(Unpooled.copiedBuffer( + "PUT /upload HTTP/1.1\r\n" + + "Content-Length: 2\r\n\r\n", CharsetUtil.US_ASCII))); + + assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status()); + assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH)); + + assertThat(response, instanceOf(LastHttpContent.class)); + ReferenceCountUtil.release(response); + + assertTrue(embedder.isOpen()); + + assertFalse(embedder.writeInbound(Unpooled.copiedBuffer(new byte[] { 1 }))); + assertNull(embedder.readOutbound()); + assertTrue(embedder.writeInbound(Unpooled.copiedBuffer(new byte[] { 2 }))); + assertNull(embedder.readOutbound()); + + FullHttpRequest request = embedder.readInbound(); + assertEquals(HttpVersion.HTTP_1_1, request.protocolVersion()); + assertEquals(HttpMethod.PUT, request.method()); + assertEquals("/upload", request.uri()); + assertEquals(2, HttpUtil.getContentLength(request)); + + byte[] actual = new byte[request.content().readableBytes()]; + request.content().readBytes(actual); + assertArrayEquals(new byte[] { 1, 2 }, actual); + request.release(); assertFalse(embedder.finish()); } @Test public void testOversizedRequestWithoutKeepAlive() { - // send a HTTP/1.0 request with no keep-alive header + // send an HTTP/1.0 request with no keep-alive header HttpRequest message = new DefaultHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.PUT, "http://localhost"); HttpUtil.setContentLength(message, 5); checkOversizedRequest(message); @@ -155,18 +212,55 @@ public void testOversizedRequestWithContentLength() { } private static void checkOversizedRequest(HttpRequest message) { - EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(4)); + final EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(4)); assertFalse(embedder.writeInbound(message)); HttpResponse response = embedder.readOutbound(); assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, response.status()); assertEquals("0", response.headers().get(HttpHeaderNames.CONTENT_LENGTH)); + assertThat(response, instanceOf(LastHttpContent.class)); + ReferenceCountUtil.release(response); + if (serverShouldCloseConnection(message, response)) { assertFalse(embedder.isOpen()); + + assertThrows(ClosedChannelException.class, new Executable() { + @Override + public void execute() { + embedder.writeInbound(new DefaultHttpContent(Unpooled.EMPTY_BUFFER)); + } + }); + assertFalse(embedder.finish()); } else { assertTrue(embedder.isOpen()); + assertFalse(embedder.writeInbound(new DefaultHttpContent(Unpooled.copiedBuffer(new byte[8])))); + assertFalse(embedder.writeInbound(new DefaultHttpContent(Unpooled.copiedBuffer(new byte[8])))); + + // Now start a new message and ensure we will not reject it again. + HttpRequest message2 = new DefaultHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.PUT, "http://localhost"); + HttpUtil.setContentLength(message, 2); + + assertFalse(embedder.writeInbound(message2)); + assertNull(embedder.readOutbound()); + assertFalse(embedder.writeInbound(new DefaultHttpContent(Unpooled.copiedBuffer(new byte[] { 1 })))); + assertNull(embedder.readOutbound()); + assertTrue(embedder.writeInbound(new DefaultLastHttpContent(Unpooled.copiedBuffer(new byte[] { 2 })))); + assertNull(embedder.readOutbound()); + + FullHttpRequest request = embedder.readInbound(); + assertEquals(message2.protocolVersion(), request.protocolVersion()); + assertEquals(message2.method(), request.method()); + assertEquals(message2.uri(), request.uri()); + assertEquals(2, HttpUtil.getContentLength(request)); + + byte[] actual = new byte[request.content().readableBytes()]; + request.content().readBytes(actual); + assertArrayEquals(new byte[] { 1, 2 }, actual); + request.release(); + + assertFalse(embedder.finish()); } } @@ -188,43 +282,58 @@ private static boolean serverShouldCloseConnection(HttpRequest message, HttpResp @Test public void testOversizedResponse() { - EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(4)); + final EmbeddedChannel embedder = new EmbeddedChannel(new HttpObjectAggregator(4)); HttpResponse message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); HttpContent chunk1 = new DefaultHttpContent(Unpooled.copiedBuffer("test", CharsetUtil.US_ASCII)); - HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII)); + final HttpContent chunk2 = new DefaultHttpContent(Unpooled.copiedBuffer("test2", CharsetUtil.US_ASCII)); assertFalse(embedder.writeInbound(message)); assertFalse(embedder.writeInbound(chunk1)); - try { - embedder.writeInbound(chunk2); - fail(); - } catch (TooLongFrameException expected) { - // Expected - } + assertThrows(TooLongFrameException.class, new Executable() { + @Override + public void execute() { + embedder.writeInbound(chunk2); + } + }); assertFalse(embedder.isOpen()); assertFalse(embedder.finish()); } - @Test(expected = IllegalArgumentException.class) + @Test public void testInvalidConstructorUsage() { - new HttpObjectAggregator(-1); + assertThrows(IllegalArgumentException.class, new Executable() { + @Override + public void execute() { + new HttpObjectAggregator(-1); + } + }); } - @Test(expected = IllegalArgumentException.class) + @Test public void testInvalidMaxCumulationBufferComponents() { - HttpObjectAggregator aggr = new HttpObjectAggregator(Integer.MAX_VALUE); - aggr.setMaxCumulationBufferComponents(1); + final HttpObjectAggregator aggr = new HttpObjectAggregator(Integer.MAX_VALUE); + assertThrows(IllegalArgumentException.class, new Executable() { + @Override + public void execute() { + aggr.setMaxCumulationBufferComponents(1); + } + }); } - @Test(expected = IllegalStateException.class) + @Test public void testSetMaxCumulationBufferComponentsAfterInit() throws Exception { - HttpObjectAggregator aggr = new HttpObjectAggregator(Integer.MAX_VALUE); + final HttpObjectAggregator aggr = new HttpObjectAggregator(Integer.MAX_VALUE); ChannelHandlerContext ctx = Mockito.mock(ChannelHandlerContext.class); aggr.handlerAdded(ctx); Mockito.verifyNoMoreInteractions(ctx); - aggr.setMaxCumulationBufferComponents(10); + assertThrows(IllegalStateException.class, new Executable() { + @Override + public void execute() { + aggr.setMaxCumulationBufferComponents(10); + } + }); } @Test @@ -517,4 +626,123 @@ public void testReplaceAggregatedResponse() { aggregatedRep.release(); replacedRep.release(); } + + @Test + public void testSelectiveRequestAggregation() { + HttpObjectAggregator myPostAggregator = new HttpObjectAggregator(1024 * 1024) { + @Override + protected boolean isStartMessage(HttpObject msg) throws Exception { + if (msg instanceof HttpRequest) { + HttpRequest request = (HttpRequest) msg; + HttpMethod method = request.method(); + + if (method.equals(HttpMethod.POST)) { + return true; + } + } + + return false; + } + }; + + EmbeddedChannel channel = new EmbeddedChannel(myPostAggregator); + + try { + // Aggregate: POST + HttpRequest request1 = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + HttpContent content1 = new DefaultHttpContent(Unpooled.copiedBuffer("Hello, World!", CharsetUtil.UTF_8)); + request1.headers().set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.TEXT_PLAIN); + + assertTrue(channel.writeInbound(request1, content1, LastHttpContent.EMPTY_LAST_CONTENT)); + + // Getting an aggregated response out + Object msg1 = channel.readInbound(); + try { + assertTrue(msg1 instanceof FullHttpRequest); + } finally { + ReferenceCountUtil.release(msg1); + } + + // Don't aggregate: non-POST + HttpRequest request2 = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.PUT, "/"); + HttpContent content2 = new DefaultHttpContent(Unpooled.copiedBuffer("Hello, World!", CharsetUtil.UTF_8)); + request2.headers().set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.TEXT_PLAIN); + + try { + assertTrue(channel.writeInbound(request2, content2, LastHttpContent.EMPTY_LAST_CONTENT)); + + // Getting the same response objects out + assertSame(request2, channel.readInbound()); + assertSame(content2, channel.readInbound()); + assertSame(LastHttpContent.EMPTY_LAST_CONTENT, channel.readInbound()); + } finally { + ReferenceCountUtil.release(request2); + ReferenceCountUtil.release(content2); + } + + assertFalse(channel.finish()); + } finally { + channel.close(); + } + } + + @Test + public void testSelectiveResponseAggregation() { + HttpObjectAggregator myTextAggregator = new HttpObjectAggregator(1024 * 1024) { + @Override + protected boolean isStartMessage(HttpObject msg) throws Exception { + if (msg instanceof HttpResponse) { + HttpResponse response = (HttpResponse) msg; + HttpHeaders headers = response.headers(); + + String contentType = headers.get(HttpHeaderNames.CONTENT_TYPE); + if (AsciiString.contentEqualsIgnoreCase(contentType, HttpHeaderValues.TEXT_PLAIN)) { + return true; + } + } + + return false; + } + }; + + EmbeddedChannel channel = new EmbeddedChannel(myTextAggregator); + + try { + // Aggregate: text/plain + HttpResponse response1 = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + HttpContent content1 = new DefaultHttpContent(Unpooled.copiedBuffer("Hello, World!", CharsetUtil.UTF_8)); + response1.headers().set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.TEXT_PLAIN); + + assertTrue(channel.writeInbound(response1, content1, LastHttpContent.EMPTY_LAST_CONTENT)); + + // Getting an aggregated response out + Object msg1 = channel.readInbound(); + try { + assertTrue(msg1 instanceof FullHttpResponse); + } finally { + ReferenceCountUtil.release(msg1); + } + + // Don't aggregate: application/json + HttpResponse response2 = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + HttpContent content2 = new DefaultHttpContent(Unpooled.copiedBuffer("{key: 'value'}", CharsetUtil.UTF_8)); + response2.headers().set(HttpHeaderNames.CONTENT_TYPE, HttpHeaderValues.APPLICATION_JSON); + + try { + assertTrue(channel.writeInbound(response2, content2, LastHttpContent.EMPTY_LAST_CONTENT)); + + // Getting the same response objects out + assertSame(response2, channel.readInbound()); + assertSame(content2, channel.readInbound()); + assertSame(LastHttpContent.EMPTY_LAST_CONTENT, channel.readInbound()); + } finally { + ReferenceCountUtil.release(response2); + ReferenceCountUtil.release(content2); + } + + assertFalse(channel.finish()); + } finally { + channel.close(); + } + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java index 45720631c40..53d9778f2a7 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,7 +20,7 @@ import io.netty.handler.codec.TooLongFrameException; import io.netty.util.AsciiString; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.List; @@ -29,12 +29,12 @@ import static org.hamcrest.CoreMatchers.instanceOf; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.nullValue; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpRequestDecoderTest { private static final byte[] CONTENT_CRLF_DELIMITERS = createContent("\r\n"); @@ -81,7 +81,7 @@ public void testDecodeWholeRequestAtOnceMixedDelimiters() { private static void testDecodeWholeRequestAtOnce(byte[] content) { EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder()); - assertTrue(channel.writeInbound(Unpooled.wrappedBuffer(content))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(content))); HttpRequest req = channel.readInbound(); assertNotNull(req); checkHeaders(req.headers()); @@ -145,14 +145,14 @@ private static void testDecodeWholeRequestInMultipleSteps(byte[] content, int fr amount = headerLength - a; } - // if header is done it should produce a HttpRequest - channel.writeInbound(Unpooled.wrappedBuffer(content, a, amount)); + // if header is done it should produce an HttpRequest + channel.writeInbound(Unpooled.copiedBuffer(content, a, amount)); a += amount; } for (int i = CONTENT_LENGTH; i > 0; i --) { // Should produce HttpContent - channel.writeInbound(Unpooled.wrappedBuffer(content, content.length - i, 1)); + channel.writeInbound(Unpooled.copiedBuffer(content, content.length - i, 1)); } HttpRequest req = channel.readInbound(); @@ -297,7 +297,7 @@ public void testMessagesSplitBetweenMultipleBuffers() { @Test public void testTooLargeInitialLine() { - EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder(10, 1024, 1024)); + EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder(10, 1024)); String requestStr = "GET /some/path HTTP/1.1\r\n" + "Host: localhost1\r\n\r\n"; @@ -308,9 +308,45 @@ public void testTooLargeInitialLine() { assertFalse(channel.finish()); } + @Test + public void testTooLargeInitialLineWithWSOnly() { + testTooLargeInitialLineWithControlCharsOnly(" "); + } + + @Test + public void testTooLargeInitialLineWithCRLFOnly() { + testTooLargeInitialLineWithControlCharsOnly("\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n"); + } + + private static void testTooLargeInitialLineWithControlCharsOnly(String controlChars) { + EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder(15, 1024)); + String requestStr = controlChars + "GET / HTTP/1.1\r\n" + + "Host: localhost1\r\n\r\n"; + + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); + HttpRequest request = channel.readInbound(); + assertTrue(request.decoderResult().isFailure()); + assertTrue(request.decoderResult().cause() instanceof TooLongFrameException); + assertFalse(channel.finish()); + } + + @Test + public void testInitialLineWithLeadingControlChars() { + EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder()); + String crlf = "\r\n"; + String request = crlf + "GET /some/path HTTP/1.1" + crlf + + "Host: localhost" + crlf + crlf; + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(request, CharsetUtil.US_ASCII))); + HttpRequest req = channel.readInbound(); + assertEquals(HttpMethod.GET, req.method()); + assertEquals("/some/path", req.uri()); + assertEquals(HttpVersion.HTTP_1_1, req.protocolVersion()); + assertTrue(channel.finishAndReleaseAll()); + } + @Test public void testTooLargeHeaders() { - EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder(1024, 10, 1024)); + EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder(1024, 10)); String requestStr = "GET /some/path HTTP/1.1\r\n" + "Host: localhost1\r\n\r\n"; @@ -320,4 +356,154 @@ public void testTooLargeHeaders() { assertTrue(request.decoderResult().cause() instanceof TooLongFrameException); assertFalse(channel.finish()); } + + @Test + public void testWhitespace() { + String requestStr = "GET /some/path HTTP/1.1\r\n" + + "Transfer-Encoding : chunked\r\n" + + "Host: netty.io\r\n\r\n"; + testInvalidHeaders0(requestStr); + } + + @Test + public void testWhitespaceBeforeTransferEncoding01() { + String requestStr = "GET /some/path HTTP/1.1\r\n" + + " Transfer-Encoding : chunked\r\n" + + "Content-Length: 1\r\n" + + "Host: netty.io\r\n\r\n" + + "a"; + testInvalidHeaders0(requestStr); + } + + @Test + public void testWhitespaceBeforeTransferEncoding02() { + String requestStr = "POST / HTTP/1.1" + + " Transfer-Encoding : chunked\r\n" + + "Host: target.com" + + "Content-Length: 65\r\n\r\n" + + "0\r\n\r\n" + + "GET /maliciousRequest HTTP/1.1\r\n" + + "Host: evilServer.com\r\n" + + "Foo: x"; + testInvalidHeaders0(requestStr); + } + + @Test + public void testHeaderWithNoValueAndMissingColon() { + String requestStr = "GET /some/path HTTP/1.1\r\n" + + "Content-Length: 0\r\n" + + "Host:\r\n" + + "netty.io\r\n\r\n"; + testInvalidHeaders0(requestStr); + } + + @Test + public void testMultipleContentLengthHeaders() { + String requestStr = "GET /some/path HTTP/1.1\r\n" + + "Content-Length: 1\r\n" + + "Content-Length: 0\r\n\r\n" + + "b"; + testInvalidHeaders0(requestStr); + } + + @Test + public void testMultipleContentLengthHeaders2() { + String requestStr = "GET /some/path HTTP/1.1\r\n" + + "Content-Length: 1\r\n" + + "Connection: close\r\n" + + "Content-Length: 0\r\n\r\n" + + "b"; + testInvalidHeaders0(requestStr); + } + + @Test + public void testContentLengthHeaderWithCommaValue() { + String requestStr = "GET /some/path HTTP/1.1\r\n" + + "Content-Length: 1,1\r\n\r\n" + + "b"; + testInvalidHeaders0(requestStr); + } + + @Test + public void testMultipleContentLengthHeadersWithFolding() { + String requestStr = "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "Connection: close\r\n" + + "Content-Length: 5\r\n" + + "Content-Length:\r\n" + + "\t6\r\n\r\n" + + "123456"; + testInvalidHeaders0(requestStr); + } + + @Test + public void testContentLengthAndTransferEncodingHeadersWithVerticalTab() { + testContentLengthAndTransferEncodingHeadersWithInvalidSeparator((char) 0x0b, false); + testContentLengthAndTransferEncodingHeadersWithInvalidSeparator((char) 0x0b, true); + } + + @Test + public void testContentLengthAndTransferEncodingHeadersWithCR() { + testContentLengthAndTransferEncodingHeadersWithInvalidSeparator((char) 0x0d, false); + testContentLengthAndTransferEncodingHeadersWithInvalidSeparator((char) 0x0d, true); + } + + private static void testContentLengthAndTransferEncodingHeadersWithInvalidSeparator( + char separator, boolean extraLine) { + String requestStr = "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "Connection: close\r\n" + + "Content-Length: 9\r\n" + + "Transfer-Encoding:" + separator + "chunked\r\n\r\n" + + (extraLine ? "0\r\n\r\n" : "") + + "something\r\n\r\n"; + testInvalidHeaders0(requestStr); + } + + @Test + public void testContentLengthHeaderAndChunked() { + String requestStr = "POST / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "Connection: close\r\n" + + "Content-Length: 5\r\n" + + "Transfer-Encoding: chunked\r\n\r\n" + + "0\r\n\r\n"; + EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder()); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); + HttpRequest request = channel.readInbound(); + assertFalse(request.decoderResult().isFailure()); + assertTrue(request.headers().contains("Transfer-Encoding", "chunked", false)); + assertFalse(request.headers().contains("Content-Length")); + LastHttpContent c = channel.readInbound(); + assertFalse(channel.finish()); + } + + @Test + public void testHttpMessageDecoderResult() { + String requestStr = "PUT /some/path HTTP/1.1\r\n" + + "Content-Length: 11\r\n" + + "Connection: close\r\n\r\n" + + "Lorem ipsum"; + EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder()); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); + HttpRequest request = channel.readInbound(); + assertTrue(request.decoderResult().isSuccess()); + assertThat(request.decoderResult(), instanceOf(HttpMessageDecoderResult.class)); + HttpMessageDecoderResult decoderResult = (HttpMessageDecoderResult) request.decoderResult(); + assertThat(decoderResult.initialLineLength(), is(23)); + assertThat(decoderResult.headerSize(), is(35)); + assertThat(decoderResult.totalSize(), is(58)); + HttpContent c = channel.readInbound(); + c.release(); + assertFalse(channel.finish()); + } + + private static void testInvalidHeaders0(String requestStr) { + EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder()); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); + HttpRequest request = channel.readInbound(); + assertTrue(request.decoderResult().isFailure()); + assertTrue(request.decoderResult().cause() instanceof IllegalArgumentException); + assertFalse(channel.finish()); + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestEncoderTest.java index 2f866f719d2..d4c32b650e7 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,16 +21,21 @@ import io.netty.handler.codec.DecoderResult; import io.netty.util.CharsetUtil; import io.netty.util.IllegalReferenceCountException; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; import java.nio.ByteBuffer; import java.nio.ByteOrder; import java.nio.charset.Charset; import java.util.concurrent.ExecutionException; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.is; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; /** */ @@ -41,8 +46,8 @@ private static ByteBuf[] getBuffers() { return new ByteBuf[]{ Unpooled.buffer(128).order(ByteOrder.BIG_ENDIAN), Unpooled.buffer(128).order(ByteOrder.LITTLE_ENDIAN), - Unpooled.wrappedBuffer(ByteBuffer.allocate(128).order(ByteOrder.BIG_ENDIAN)).resetWriterIndex(), - Unpooled.wrappedBuffer(ByteBuffer.allocate(128).order(ByteOrder.LITTLE_ENDIAN)).resetWriterIndex() + Unpooled.wrappedBuffer(ByteBuffer.allocate(128).order(ByteOrder.BIG_ENDIAN)).writerIndex(0), + Unpooled.wrappedBuffer(ByteBuffer.allocate(128).order(ByteOrder.LITTLE_ENDIAN)).writerIndex(0) }; } @@ -133,15 +138,17 @@ public void testQueryStringPath() throws Exception { @Test public void testEmptyReleasedBufferShouldNotWriteEmptyBufferToChannel() throws Exception { HttpRequestEncoder encoder = new HttpRequestEncoder(); - EmbeddedChannel channel = new EmbeddedChannel(encoder); - ByteBuf buf = Unpooled.buffer(); + final EmbeddedChannel channel = new EmbeddedChannel(encoder); + final ByteBuf buf = Unpooled.buffer(); buf.release(); - try { - channel.writeAndFlush(buf).get(); - fail(); - } catch (ExecutionException e) { - assertThat(e.getCause().getCause(), is(instanceOf(IllegalReferenceCountException.class))); - } + ExecutionException e = assertThrows(ExecutionException.class, new Executable() { + @Override + public void execute() throws Throwable { + channel.writeAndFlush(buf).get(); + } + }); + assertThat(e.getCause().getCause(), is(instanceOf(IllegalReferenceCountException.class))); + channel.finishAndReleaseAll(); } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java index 017dbd5ff94..8ad40665832 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ import io.netty.handler.codec.PrematureChannelClosureException; import io.netty.handler.codec.TooLongFrameException; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.List; @@ -32,12 +32,12 @@ import static org.hamcrest.CoreMatchers.not; import static org.hamcrest.CoreMatchers.nullValue; import static org.hamcrest.CoreMatchers.sameInstance; -import static org.junit.Assert.assertArrayEquals; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpResponseDecoderTest { @@ -49,8 +49,8 @@ public class HttpResponseDecoderTest { public void testMaxHeaderSize1() { final int maxHeaderSize = 8192; - final EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, maxHeaderSize, 8192)); - final char[] bytes = new char[maxHeaderSize / 2 - 2]; + final EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, maxHeaderSize)); + final char[] bytes = new char[maxHeaderSize / 2 - 4]; Arrays.fill(bytes, 'a'); ch.writeInbound(Unpooled.copiedBuffer("HTTP/1.1 200 OK\r\n", CharsetUtil.US_ASCII)); @@ -81,7 +81,7 @@ public void testMaxHeaderSize1() { public void testMaxHeaderSize2() { final int maxHeaderSize = 8192; - final EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, maxHeaderSize, 8192)); + final EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, maxHeaderSize)); final char[] bytes = new char[maxHeaderSize / 2 - 2]; Arrays.fill(bytes, 'a'); @@ -122,7 +122,7 @@ public void testResponseChunked() { for (int i = 0; i < 10; i++) { assertFalse(ch.writeInbound(Unpooled.copiedBuffer(Integer.toHexString(data.length) + "\r\n", CharsetUtil.US_ASCII))); - assertTrue(ch.writeInbound(Unpooled.wrappedBuffer(data))); + assertTrue(ch.writeInbound(Unpooled.copiedBuffer(data))); HttpContent content = ch.readInbound(); assertEquals(data.length, content.content().readableBytes()); @@ -146,55 +146,6 @@ public void testResponseChunked() { assertNull(ch.readInbound()); } - @Test - public void testResponseChunkedExceedMaxChunkSize() { - EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder(4096, 8192, 32)); - ch.writeInbound( - Unpooled.copiedBuffer("HTTP/1.1 200 OK\r\nTransfer-Encoding: chunked\r\n\r\n", CharsetUtil.US_ASCII)); - - HttpResponse res = ch.readInbound(); - assertThat(res.protocolVersion(), sameInstance(HttpVersion.HTTP_1_1)); - assertThat(res.status(), is(HttpResponseStatus.OK)); - - byte[] data = new byte[64]; - for (int i = 0; i < data.length; i++) { - data[i] = (byte) i; - } - - for (int i = 0; i < 10; i++) { - assertFalse(ch.writeInbound(Unpooled.copiedBuffer(Integer.toHexString(data.length) + "\r\n", - CharsetUtil.US_ASCII))); - assertTrue(ch.writeInbound(Unpooled.wrappedBuffer(data))); - - byte[] decodedData = new byte[data.length]; - HttpContent content = ch.readInbound(); - assertEquals(32, content.content().readableBytes()); - content.content().readBytes(decodedData, 0, 32); - content.release(); - - content = ch.readInbound(); - assertEquals(32, content.content().readableBytes()); - - content.content().readBytes(decodedData, 32, 32); - - assertArrayEquals(data, decodedData); - content.release(); - - assertFalse(ch.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII))); - } - - // Write the last chunk. - ch.writeInbound(Unpooled.copiedBuffer("0\r\n\r\n", CharsetUtil.US_ASCII)); - - // Ensure the last chunk was decoded. - LastHttpContent content = ch.readInbound(); - assertFalse(content.content().isReadable()); - content.release(); - - ch.finish(); - assertNull(ch.readInbound()); - } - @Test public void testClosureWithoutContentLength1() throws Exception { EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder()); @@ -446,13 +397,13 @@ private static void testLastResponseWithTrailingHeaderFragmented(byte[] content, amount = headerLength - a; } - // if header is done it should produce a HttpRequest + // if header is done it should produce an HttpRequest boolean headerDone = a + amount == headerLength; - assertEquals(headerDone, ch.writeInbound(Unpooled.wrappedBuffer(content, a, amount))); + assertEquals(headerDone, ch.writeInbound(Unpooled.copiedBuffer(content, a, amount))); a += amount; } - ch.writeInbound(Unpooled.wrappedBuffer(content, headerLength, content.length - headerLength)); + ch.writeInbound(Unpooled.copiedBuffer(content, headerLength, content.length - headerLength)); HttpResponse res = ch.readInbound(); assertThat(res.protocolVersion(), sameInstance(HttpVersion.HTTP_1_1)); assertThat(res.status(), is(HttpResponseStatus.OK)); @@ -483,8 +434,8 @@ public void testResponseWithContentLength() { for (int i = 0; i < data.length; i++) { data[i] = (byte) i; } - ch.writeInbound(Unpooled.wrappedBuffer(data, 0, data.length / 2)); - ch.writeInbound(Unpooled.wrappedBuffer(data, 5, data.length / 2)); + ch.writeInbound(Unpooled.copiedBuffer(data, 0, data.length / 2)); + ch.writeInbound(Unpooled.copiedBuffer(data, 5, data.length / 2)); HttpResponse res = ch.readInbound(); assertThat(res.protocolVersion(), sameInstance(HttpVersion.HTTP_1_1)); @@ -492,12 +443,12 @@ public void testResponseWithContentLength() { HttpContent firstContent = ch.readInbound(); assertThat(firstContent.content().readableBytes(), is(5)); - assertEquals(Unpooled.wrappedBuffer(data, 0, 5), firstContent.content()); + assertEquals(Unpooled.copiedBuffer(data, 0, 5), firstContent.content()); firstContent.release(); LastHttpContent lastContent = ch.readInbound(); assertEquals(5, lastContent.content().readableBytes()); - assertEquals(Unpooled.wrappedBuffer(data, 5, 5), lastContent.content()); + assertEquals(Unpooled.copiedBuffer(data, 5, 5), lastContent.content()); lastContent.release(); assertThat(ch.finish(), is(false)); @@ -524,15 +475,15 @@ private static void testResponseWithContentLengthFragmented(byte[] header, int f amount = header.length - a; } - ch.writeInbound(Unpooled.wrappedBuffer(header, a, amount)); + ch.writeInbound(Unpooled.copiedBuffer(header, a, amount)); a += amount; } byte[] data = new byte[10]; for (int i = 0; i < data.length; i++) { data[i] = (byte) i; } - ch.writeInbound(Unpooled.wrappedBuffer(data, 0, data.length / 2)); - ch.writeInbound(Unpooled.wrappedBuffer(data, 5, data.length / 2)); + ch.writeInbound(Unpooled.copiedBuffer(data, 0, data.length / 2)); + ch.writeInbound(Unpooled.copiedBuffer(data, 5, data.length / 2)); HttpResponse res = ch.readInbound(); assertThat(res.protocolVersion(), sameInstance(HttpVersion.HTTP_1_1)); @@ -589,7 +540,7 @@ public void testWebSocketResponseWithDataFollowing() { byte[] otherData = {1, 2, 3, 4}; EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder()); - ch.writeInbound(Unpooled.wrappedBuffer(data, otherData)); + ch.writeInbound(Unpooled.copiedBuffer(data, otherData)); HttpResponse res = ch.readInbound(); assertThat(res.protocolVersion(), sameInstance(HttpVersion.HTTP_1_1)); @@ -625,7 +576,7 @@ public void testGarbageHeaders() { EmbeddedChannel ch = new EmbeddedChannel(new HttpResponseDecoder()); - ch.writeInbound(Unpooled.wrappedBuffer(data)); + ch.writeInbound(Unpooled.copiedBuffer(data)); // Garbage input should generate the 999 Unknown response. HttpResponse res = ch.readInbound(); @@ -636,7 +587,7 @@ public void testGarbageHeaders() { assertThat(ch.readInbound(), is(nullValue())); // More garbage should not generate anything (i.e. the decoder discards anything beyond this point.) - ch.writeInbound(Unpooled.wrappedBuffer(data)); + ch.writeInbound(Unpooled.copiedBuffer(data)); assertThat(ch.readInbound(), is(nullValue())); // Closing the connection should not generate anything since the protocol has been violated. @@ -683,4 +634,68 @@ public void testConnectionClosedBeforeHeadersReceived() { assertThat(message.decoderResult().cause(), instanceOf(PrematureChannelClosureException.class)); assertNull(channel.readInbound()); } + + @Test + public void testTrailerWithEmptyLineInSeparateBuffer() { + HttpResponseDecoder decoder = new HttpResponseDecoder(); + EmbeddedChannel channel = new EmbeddedChannel(decoder); + + String headers = "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding: chunked\r\n" + + "Trailer: My-Trailer\r\n"; + assertFalse(channel.writeInbound(Unpooled.copiedBuffer(headers.getBytes(CharsetUtil.US_ASCII)))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("\r\n".getBytes(CharsetUtil.US_ASCII)))); + + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("0\r\n", CharsetUtil.US_ASCII))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("My-Trailer: 42\r\n", CharsetUtil.US_ASCII))); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer("\r\n", CharsetUtil.US_ASCII))); + + HttpResponse response = channel.readInbound(); + assertEquals(2, response.headers().size()); + assertEquals("chunked", response.headers().get(HttpHeaderNames.TRANSFER_ENCODING)); + assertEquals("My-Trailer", response.headers().get(HttpHeaderNames.TRAILER)); + + LastHttpContent lastContent = channel.readInbound(); + assertEquals(1, lastContent.trailingHeaders().size()); + assertEquals("42", lastContent.trailingHeaders().get("My-Trailer")); + assertEquals(0, lastContent.content().readableBytes()); + lastContent.release(); + + assertFalse(channel.finish()); + } + + @Test + public void testWhitespace() { + EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder()); + String requestStr = "HTTP/1.1 200 OK\r\n" + + "Transfer-Encoding : chunked\r\n" + + "Host: netty.io\n\r\n"; + + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII))); + HttpResponse response = channel.readInbound(); + assertFalse(response.decoderResult().isFailure()); + assertEquals(HttpHeaderValues.CHUNKED.toString(), response.headers().get(HttpHeaderNames.TRANSFER_ENCODING)); + assertEquals("netty.io", response.headers().get(HttpHeaderNames.HOST)); + assertFalse(channel.finish()); + } + + @Test + public void testHttpMessageDecoderResult() { + String responseStr = "HTTP/1.1 200 OK\r\n" + + "Content-Length: 11\r\n" + + "Connection: close\r\n\r\n" + + "Lorem ipsum"; + EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder()); + assertTrue(channel.writeInbound(Unpooled.copiedBuffer(responseStr, CharsetUtil.US_ASCII))); + HttpResponse response = channel.readInbound(); + assertTrue(response.decoderResult().isSuccess()); + assertThat(response.decoderResult(), instanceOf(HttpMessageDecoderResult.class)); + HttpMessageDecoderResult decoderResult = (HttpMessageDecoderResult) response.decoderResult(); + assertThat(decoderResult.initialLineLength(), is(15)); + assertThat(decoderResult.headerSize(), is(35)); + assertThat(decoderResult.totalSize(), is(50)); + HttpContent c = channel.readInbound(); + c.release(); + assertFalse(channel.finish()); + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseEncoderTest.java index 218f6bdf748..3ecd918a909 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * -* http://www.apache.org/licenses/LICENSE-2.0 +* https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -20,13 +20,17 @@ import io.netty.channel.FileRegion; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.IOException; import java.nio.channels.WritableByteChannel; +import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.Matchers.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpResponseEncoderTest { private static final long INTEGER_OVERFLOW = (long) Integer.MAX_VALUE + 1; @@ -282,14 +286,14 @@ private static void assertEmptyResponse(EmbeddedChannel channel, HttpResponseSta ByteBuf buffer = channel.readOutbound(); StringBuilder responseText = new StringBuilder(); - responseText.append(HttpVersion.HTTP_1_1.toString()).append(' ').append(status.toString()).append("\r\n"); + responseText.append(HttpVersion.HTTP_1_1).append(' ').append(status.toString()).append("\r\n"); if (!headerStripped && headerName != null) { responseText.append(headerName).append(": "); if (HttpHeaderNames.CONTENT_LENGTH.contentEquals(headerName)) { responseText.append('0'); } else { - responseText.append(HttpHeaderValues.CHUNKED.toString()); + responseText.append(HttpHeaderValues.CHUNKED); } responseText.append("\r\n"); } @@ -353,4 +357,48 @@ private void testEmptyContents(boolean chunked, boolean trailers) throws Excepti lastContent.release(); assertFalse(channel.finish()); } + + @Test + public void testStatusResetContentTransferContentLength() { + testStatusResetContentTransferContentLength0(HttpHeaderNames.CONTENT_LENGTH, Unpooled.buffer().writeLong(8)); + } + + @Test + public void testStatusResetContentTransferEncoding() { + testStatusResetContentTransferContentLength0(HttpHeaderNames.TRANSFER_ENCODING, Unpooled.buffer().writeLong(8)); + } + + private static void testStatusResetContentTransferContentLength0(CharSequence headerName, ByteBuf content) { + EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseEncoder()); + + HttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.RESET_CONTENT); + if (HttpHeaderNames.CONTENT_LENGTH.contentEqualsIgnoreCase(headerName)) { + response.headers().set(HttpHeaderNames.CONTENT_LENGTH, content.readableBytes()); + } else { + response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); + } + + assertTrue(channel.writeOutbound(response)); + assertTrue(channel.writeOutbound(new DefaultHttpContent(content))); + assertTrue(channel.writeOutbound(LastHttpContent.EMPTY_LAST_CONTENT)); + + StringBuilder responseText = new StringBuilder(); + responseText.append(HttpVersion.HTTP_1_1).append(' ') + .append(HttpResponseStatus.RESET_CONTENT).append("\r\n"); + responseText.append(HttpHeaderNames.CONTENT_LENGTH).append(": 0\r\n"); + responseText.append("\r\n"); + + StringBuilder written = new StringBuilder(); + for (;;) { + ByteBuf buffer = channel.readOutbound(); + if (buffer == null) { + break; + } + written.append(buffer.toString(CharsetUtil.US_ASCII)); + buffer.release(); + } + + assertEquals(responseText.toString(), written.toString()); + assertFalse(channel.finish()); + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseStatusTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseStatusTest.java index 2bc83bac8fc..f190dbd807b 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseStatusTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseStatusTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,11 +16,12 @@ package io.netty.handler.codec.http; import io.netty.util.AsciiString; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static io.netty.handler.codec.http.HttpResponseStatus.parseLine; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertSame; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertSame; +import static org.junit.jupiter.api.Assertions.assertThrows; public class HttpResponseStatusTest { @Test @@ -46,14 +47,14 @@ public void parseLineStringCustomCodeAndPhrase() { assertEquals("FOO", customStatus.reasonPhrase()); } - @Test(expected = IllegalArgumentException.class) + @Test public void parseLineStringMalformedCode() { - parseLine("200a"); + assertThrows(IllegalArgumentException.class, () -> parseLine("200a")); } - @Test(expected = IllegalArgumentException.class) + @Test public void parseLineStringMalformedCodeWithPhrase() { - parseLine("200a foo"); + assertThrows(IllegalArgumentException.class, () -> parseLine("200a foo")); } @Test @@ -79,13 +80,13 @@ public void parseLineAsciiStringCustomCodeAndPhrase() { assertEquals("FOO", customStatus.reasonPhrase()); } - @Test(expected = IllegalArgumentException.class) + @Test public void parseLineAsciiStringMalformedCode() { - parseLine(new AsciiString("200a")); + assertThrows(IllegalArgumentException.class, () -> parseLine(new AsciiString("200a"))); } - @Test(expected = IllegalArgumentException.class) + @Test public void parseLineAsciiStringMalformedCodeWithPhrase() { - parseLine(new AsciiString("200a foo")); + assertThrows(IllegalArgumentException.class, () -> parseLine(new AsciiString("200a foo"))); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerCodecTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerCodecTest.java index 17570b758f9..0ffdc9b7446 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerCodecTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerCodecTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,10 +19,14 @@ import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.hamcrest.CoreMatchers.*; -import static org.junit.Assert.*; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpServerCodecTest { @@ -33,7 +37,7 @@ public class HttpServerCodecTest { public void testUnfinishedChunkedHttpRequestIsLastFlag() throws Exception { int maxChunkSize = 2000; - HttpServerCodec httpServerCodec = new HttpServerCodec(1000, 1000, maxChunkSize); + HttpServerCodec httpServerCodec = new HttpServerCodec(1000, 1000); EmbeddedChannel decoderEmbedder = new EmbeddedChannel(httpServerCodec); int totalContentLength = maxChunkSize * 5; diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerExpectContinueHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerExpectContinueHandlerTest.java index e3288cefc1a..d908c8d7dde 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerExpectContinueHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerExpectContinueHandlerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,10 +17,12 @@ import io.netty.channel.embedded.EmbeddedChannel; import io.netty.util.ReferenceCountUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static org.hamcrest.CoreMatchers.*; -import static org.junit.Assert.*; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; public class HttpServerExpectContinueHandlerTest { diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerKeepAliveHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerKeepAliveHandlerTest.java index 1332a2b8a89..66462916f0e 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerKeepAliveHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerKeepAliveHandlerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,13 +16,11 @@ package io.netty.handler.codec.http; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.util.AsciiString; import io.netty.util.ReferenceCountUtil; -import io.netty.util.internal.StringUtil; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.Parameterized; -import org.junit.runners.Parameterized.Parameters; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; import java.util.Arrays; import java.util.Collection; @@ -37,11 +35,10 @@ import static io.netty.handler.codec.http.HttpUtil.setContentLength; import static io.netty.handler.codec.http.HttpUtil.setKeepAlive; import static io.netty.handler.codec.http.HttpUtil.setTransferEncodingChunked; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; -@RunWith(Parameterized.class) public class HttpServerKeepAliveHandlerTest { private static final String REQUEST_KEEP_ALIVE = "REQUEST_KEEP_ALIVE"; private static final int NOT_SELF_DEFINED_MSG_LENGTH = 0; @@ -49,16 +46,14 @@ public class HttpServerKeepAliveHandlerTest { private static final int SET_MULTIPART = 2; private static final int SET_CHUNKED = 4; - private final boolean isKeepAliveResponseExpected; - private final HttpVersion httpVersion; - private final HttpResponseStatus responseStatus; - private final String sendKeepAlive; - private final int setSelfDefinedMessageLength; - private final String setResponseConnection; private EmbeddedChannel channel; - @Parameters - public static Collection keepAliveProvider() { + @BeforeEach + public void setUp() { + channel = new EmbeddedChannel(new HttpServerKeepAliveHandler()); + } + + static Collection keepAliveProvider() { return Arrays.asList(new Object[][] { { true, HttpVersion.HTTP_1_0, OK, REQUEST_KEEP_ALIVE, SET_RESPONSE_LENGTH, KEEP_ALIVE }, // 0 { true, HttpVersion.HTTP_1_0, OK, REQUEST_KEEP_ALIVE, SET_MULTIPART, KEEP_ALIVE }, // 1 @@ -78,31 +73,19 @@ public static Collection keepAliveProvider() { }); } - public HttpServerKeepAliveHandlerTest(boolean isKeepAliveResponseExpected, HttpVersion httpVersion, - HttpResponseStatus responseStatus, String sendKeepAlive, - int setSelfDefinedMessageLength, CharSequence setResponseConnection) { - this.isKeepAliveResponseExpected = isKeepAliveResponseExpected; - this.httpVersion = httpVersion; - this.responseStatus = responseStatus; - this.sendKeepAlive = sendKeepAlive; - this.setSelfDefinedMessageLength = setSelfDefinedMessageLength; - this.setResponseConnection = setResponseConnection == null? null : setResponseConnection.toString(); - } - - @Before - public void setUp() { - channel = new EmbeddedChannel(new HttpServerKeepAliveHandler()); - } - - @Test - public void test_KeepAlive() throws Exception { + @ParameterizedTest + @MethodSource("keepAliveProvider") + public void test_KeepAlive(boolean isKeepAliveResponseExpected, HttpVersion httpVersion, + HttpResponseStatus responseStatus, + String sendKeepAlive, int setSelfDefinedMessageLength, + AsciiString setResponseConnection) throws Exception { FullHttpRequest request = new DefaultFullHttpRequest(httpVersion, HttpMethod.GET, "/v1/foo/bar"); setKeepAlive(request, REQUEST_KEEP_ALIVE.equals(sendKeepAlive)); HttpResponse response = new DefaultFullHttpResponse(httpVersion, responseStatus); - if (!StringUtil.isNullOrEmpty(setResponseConnection)) { + if (setResponseConnection != null) { response.headers().set(HttpHeaderNames.CONNECTION, setResponseConnection); } - setupMessageLength(response); + setupMessageLength(response, setSelfDefinedMessageLength); assertTrue(channel.writeInbound(request)); Object requestForwarded = channel.readInbound(); @@ -111,33 +94,35 @@ public void test_KeepAlive() throws Exception { channel.writeAndFlush(response); HttpResponse writtenResponse = channel.readOutbound(); - assertEquals("channel.isOpen", isKeepAliveResponseExpected, channel.isOpen()); - assertEquals("response keep-alive", isKeepAliveResponseExpected, isKeepAlive(writtenResponse)); + assertEquals(isKeepAliveResponseExpected, channel.isOpen(), "channel.isOpen"); + assertEquals(isKeepAliveResponseExpected, isKeepAlive(writtenResponse), "response keep-alive"); ReferenceCountUtil.release(writtenResponse); assertFalse(channel.finishAndReleaseAll()); } - @Test - public void testConnectionCloseHeaderHandledCorrectly() throws Exception { - HttpResponse response = new DefaultFullHttpResponse(httpVersion, responseStatus); - response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); - setupMessageLength(response); - - channel.writeAndFlush(response); - HttpResponse writtenResponse = channel.readOutbound(); - - assertFalse(channel.isOpen()); - ReferenceCountUtil.release(writtenResponse); - assertFalse(channel.finishAndReleaseAll()); + static Collection connectionCloseProvider() { + return Arrays.asList(new Object[][] { + { HttpVersion.HTTP_1_0, OK, SET_RESPONSE_LENGTH }, + { HttpVersion.HTTP_1_0, OK, SET_MULTIPART }, + { HttpVersion.HTTP_1_0, OK, NOT_SELF_DEFINED_MSG_LENGTH }, + { HttpVersion.HTTP_1_0, NO_CONTENT, NOT_SELF_DEFINED_MSG_LENGTH }, + { HttpVersion.HTTP_1_1, OK, SET_RESPONSE_LENGTH }, + { HttpVersion.HTTP_1_1, OK, SET_MULTIPART }, + { HttpVersion.HTTP_1_1, OK, NOT_SELF_DEFINED_MSG_LENGTH }, + { HttpVersion.HTTP_1_1, OK, SET_CHUNKED }, + { HttpVersion.HTTP_1_1, NO_CONTENT, NOT_SELF_DEFINED_MSG_LENGTH } + }); } - @Test - public void testConnectionCloseHeaderHandledCorrectlyForVoidPromise() throws Exception { + @ParameterizedTest + @MethodSource("connectionCloseProvider") + public void testConnectionCloseHeaderHandledCorrectly( + HttpVersion httpVersion, HttpResponseStatus responseStatus, int setSelfDefinedMessageLength) { HttpResponse response = new DefaultFullHttpResponse(httpVersion, responseStatus); response.headers().set(HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE); - setupMessageLength(response); + setupMessageLength(response, setSelfDefinedMessageLength); - channel.writeAndFlush(response, channel.voidPromise()); + channel.writeAndFlush(response); HttpResponse writtenResponse = channel.readOutbound(); assertFalse(channel.isOpen()); @@ -145,8 +130,12 @@ public void testConnectionCloseHeaderHandledCorrectlyForVoidPromise() throws Exc assertFalse(channel.finishAndReleaseAll()); } - @Test - public void test_PipelineKeepAlive() { + @ParameterizedTest + @MethodSource("keepAliveProvider") + public void testPipelineKeepAlive(boolean isKeepAliveResponseExpected, HttpVersion httpVersion, + HttpResponseStatus responseStatus, + String sendKeepAlive, int setSelfDefinedMessageLength, + AsciiString setResponseConnection) { FullHttpRequest firstRequest = new DefaultFullHttpRequest(httpVersion, HttpMethod.GET, "/v1/foo/bar"); setKeepAlive(firstRequest, true); FullHttpRequest secondRequest = new DefaultFullHttpRequest(httpVersion, HttpMethod.GET, "/v1/foo/bar"); @@ -167,8 +156,8 @@ public void test_PipelineKeepAlive() { channel.writeAndFlush(response.retainedDuplicate()); HttpResponse firstResponse = channel.readOutbound(); - assertTrue("channel.isOpen", channel.isOpen()); - assertTrue("response keep-alive", isKeepAlive(firstResponse)); + assertTrue(channel.isOpen(), "channel.isOpen"); + assertTrue(isKeepAlive(firstResponse), "response keep-alive"); ReferenceCountUtil.release(firstResponse); requestForwarded = channel.readInbound(); @@ -177,20 +166,20 @@ public void test_PipelineKeepAlive() { channel.writeAndFlush(informationalResp); HttpResponse writtenInfoResp = channel.readOutbound(); - assertTrue("channel.isOpen", channel.isOpen()); - assertTrue("response keep-alive", isKeepAlive(writtenInfoResp)); + assertTrue(channel.isOpen(), "channel.isOpen"); + assertTrue(isKeepAlive(writtenInfoResp), "response keep-alive"); ReferenceCountUtil.release(writtenInfoResp); - if (!StringUtil.isNullOrEmpty(setResponseConnection)) { + if (setResponseConnection != null) { response.headers().set(HttpHeaderNames.CONNECTION, setResponseConnection); } else { response.headers().remove(HttpHeaderNames.CONNECTION); } - setupMessageLength(response); + setupMessageLength(response, setSelfDefinedMessageLength); channel.writeAndFlush(response.retainedDuplicate()); HttpResponse secondResponse = channel.readOutbound(); - assertEquals("channel.isOpen", isKeepAliveResponseExpected, channel.isOpen()); - assertEquals("response keep-alive", isKeepAliveResponseExpected, isKeepAlive(secondResponse)); + assertEquals(isKeepAliveResponseExpected, channel.isOpen(), "channel.isOpen"); + assertEquals(isKeepAliveResponseExpected, isKeepAlive(secondResponse), "response keep-alive"); ReferenceCountUtil.release(secondResponse); requestForwarded = channel.readInbound(); @@ -200,14 +189,14 @@ public void test_PipelineKeepAlive() { if (isKeepAliveResponseExpected) { channel.writeAndFlush(response); HttpResponse finalResponse = channel.readOutbound(); - assertFalse("channel.isOpen", channel.isOpen()); - assertFalse("response keep-alive", isKeepAlive(finalResponse)); + assertFalse(channel.isOpen(), "channel.isOpen"); + assertFalse(isKeepAlive(finalResponse), "response keep-alive"); } ReferenceCountUtil.release(response); assertFalse(channel.finishAndReleaseAll()); } - private void setupMessageLength(HttpResponse response) { + private static void setupMessageLength(HttpResponse response, int setSelfDefinedMessageLength) { switch (setSelfDefinedMessageLength) { case NOT_SELF_DEFINED_MSG_LENGTH: if (isContentLengthSet(response)) { diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerUpgradeHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerUpgradeHandlerTest.java new file mode 100644 index 00000000000..507e05c8c9b --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpServerUpgradeHandlerTest.java @@ -0,0 +1,215 @@ +/* + * Copyright 2018 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodec; +import io.netty.handler.codec.http.HttpServerUpgradeHandler.UpgradeCodecFactory; +import io.netty.util.CharsetUtil; +import io.netty.util.ReferenceCountUtil; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import org.junit.jupiter.api.Test; + +import java.util.Collection; +import java.util.Collections; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public class HttpServerUpgradeHandlerTest { + + private static class TestUpgradeCodec implements UpgradeCodec { + @Override + public Collection requiredUpgradeHeaders() { + return Collections.emptyList(); + } + + @Override + public boolean prepareUpgradeResponse(ChannelHandlerContext ctx, FullHttpRequest upgradeRequest, + HttpHeaders upgradeHeaders) { + return true; + } + + @Override + public void upgradeTo(ChannelHandlerContext ctx, FullHttpRequest upgradeRequest) { + // Ensure that the HttpServerUpgradeHandler is still installed when this is called + assertEquals(ctx.pipeline().context(HttpServerUpgradeHandler.class), ctx); + assertNotNull(ctx.pipeline().get(HttpServerUpgradeHandler.class)); + + // Add a marker handler to signal that the upgrade has happened + ctx.pipeline().addAfter(ctx.name(), "marker", new ChannelHandler() { }); + } + } + + @Test + public void upgradesPipelineInSameMethodInvocation() { + final HttpServerCodec httpServerCodec = new HttpServerCodec(); + final UpgradeCodecFactory factory = protocol -> new TestUpgradeCodec(); + + ChannelHandler testInStackFrame = new ChannelHandler() { + // marker boolean to signal that we're in the `channelRead` method + private boolean inReadCall; + private boolean writeUpgradeMessage; + private boolean writeFlushed; + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + assertFalse(inReadCall); + assertFalse(writeUpgradeMessage); + + inReadCall = true; + try { + ctx.fireChannelRead(msg); + // All in the same call stack, the upgrade codec should receive the message, + // written the upgrade response, and upgraded the pipeline. + assertTrue(writeUpgradeMessage); + assertFalse(writeFlushed); + assertNull(ctx.pipeline().get(HttpServerCodec.class)); + assertNotNull(ctx.pipeline().get("marker")); + } finally { + inReadCall = false; + } + } + + @Override + public Future write(final ChannelHandlerContext ctx, final Object msg) { + // We ensure that we're in the read call and defer the write so we can + // make sure the pipeline was reformed irrespective of the flush completing. + assertTrue(inReadCall); + writeUpgradeMessage = true; + Promise promise = ctx.newPromise(); + ctx.channel().executor().execute(() -> ctx.write(msg).cascadeTo(promise)); + promise.addListener(f -> writeFlushed = true); + return promise; + } + }; + + HttpServerUpgradeHandler upgradeHandler = new HttpServerUpgradeHandler(httpServerCodec, factory); + + EmbeddedChannel channel = new EmbeddedChannel(testInStackFrame, httpServerCodec, upgradeHandler); + + String upgradeString = "GET / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "Connection: Upgrade, HTTP2-Settings\r\n" + + "Upgrade: nextprotocol\r\n" + + "HTTP2-Settings: AAMAAABkAAQAAP__\r\n\r\n"; + ByteBuf upgrade = Unpooled.copiedBuffer(upgradeString, CharsetUtil.US_ASCII); + + assertFalse(channel.writeInbound(upgrade)); + assertNull(channel.pipeline().get(HttpServerCodec.class)); + assertNotNull(channel.pipeline().get("marker")); + + channel.flushOutbound(); + ByteBuf upgradeMessage = channel.readOutbound(); + String expectedHttpResponse = "HTTP/1.1 101 Switching Protocols\r\n" + + "connection: upgrade\r\n" + + "upgrade: nextprotocol\r\n\r\n"; + assertEquals(expectedHttpResponse, upgradeMessage.toString(CharsetUtil.US_ASCII)); + assertTrue(upgradeMessage.release()); + assertFalse(channel.finishAndReleaseAll()); + } + + @Test + public void skippedUpgrade() { + final HttpServerCodec httpServerCodec = new HttpServerCodec(); + final UpgradeCodecFactory factory = new UpgradeCodecFactory() { + @Override + public UpgradeCodec newUpgradeCodec(CharSequence protocol) { + fail("Should never be invoked"); + return null; + } + }; + + HttpServerUpgradeHandler upgradeHandler = new HttpServerUpgradeHandler(httpServerCodec, factory) { + @Override + protected boolean shouldHandleUpgradeRequest(HttpRequest req) { + return !req.headers().contains(HttpHeaderNames.UPGRADE, "do-not-upgrade", false); + } + }; + + EmbeddedChannel channel = new EmbeddedChannel(httpServerCodec, upgradeHandler); + + String upgradeString = "GET / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "Connection: Upgrade\r\n" + + "Upgrade: do-not-upgrade\r\n\r\n"; + ByteBuf upgrade = Unpooled.copiedBuffer(upgradeString, CharsetUtil.US_ASCII); + + // The upgrade request should not be passed to the next handler without any processing. + assertTrue(channel.writeInbound(upgrade)); + assertNotNull(channel.pipeline().get(HttpServerCodec.class)); + assertNull(channel.pipeline().get("marker")); + + HttpRequest req = channel.readInbound(); + assertFalse(req instanceof FullHttpRequest); // Should not be aggregated. + assertTrue(req.headers().contains(HttpHeaderNames.CONNECTION, "Upgrade", false)); + assertTrue(req.headers().contains(HttpHeaderNames.UPGRADE, "do-not-upgrade", false)); + assertTrue(channel.readInbound() instanceof LastHttpContent); + assertNull(channel.readInbound()); + + // No response should be written because we're just passing through. + channel.flushOutbound(); + assertNull(channel.readOutbound()); + assertFalse(channel.finishAndReleaseAll()); + } + + @Test + public void upgradeFail() { + final HttpServerCodec httpServerCodec = new HttpServerCodec(); + final UpgradeCodecFactory factory = new UpgradeCodecFactory() { + @Override + public UpgradeCodec newUpgradeCodec(CharSequence protocol) { + return new TestUpgradeCodec(); + } + }; + + HttpServerUpgradeHandler upgradeHandler = new HttpServerUpgradeHandler(httpServerCodec, factory); + + EmbeddedChannel channel = new EmbeddedChannel(httpServerCodec, upgradeHandler); + + // Build a h2c upgrade request, but without connection header. + String upgradeString = "GET / HTTP/1.1\r\n" + + "Host: example.com\r\n" + + "Upgrade: h2c\r\n\r\n"; + ByteBuf upgrade = Unpooled.copiedBuffer(upgradeString, CharsetUtil.US_ASCII); + + assertTrue(channel.writeInbound(upgrade)); + assertNotNull(channel.pipeline().get(HttpServerCodec.class)); + assertNotNull(channel.pipeline().get(HttpServerUpgradeHandler.class)); // Should not be removed. + assertNull(channel.pipeline().get("marker")); + + HttpRequest req = channel.readInbound(); + assertEquals(HttpVersion.HTTP_1_1, req.protocolVersion()); + assertTrue(req.headers().contains(HttpHeaderNames.UPGRADE, "h2c", false)); + assertFalse(req.headers().contains(HttpHeaderNames.CONNECTION)); + ReferenceCountUtil.release(req); + assertNull(channel.readInbound()); + + // No response should be written because we're just passing through. + channel.flushOutbound(); + assertNull(channel.readOutbound()); + assertFalse(channel.finishAndReleaseAll()); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpUtilTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpUtilTest.java index 0a2de487958..21871a3be16 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpUtilTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpUtilTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,22 +17,24 @@ import io.netty.util.CharsetUtil; import io.netty.util.ReferenceCountUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.function.Executable; +import java.net.InetAddress; +import java.net.InetSocketAddress; import java.nio.charset.StandardCharsets; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; -import static org.hamcrest.Matchers.containsString; -import static org.hamcrest.Matchers.hasToString; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertThat; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static io.netty.handler.codec.http.HttpUtil.normalizeAndGetContentLength; +import static java.util.Collections.singletonList; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class HttpUtilTest { @@ -77,23 +79,87 @@ public void testGetCharsetAsRawCharSequence() { @Test public void testGetCharset() { - String NORMAL_CONTENT_TYPE = "text/html; charset=utf-8"; - String UPPER_CASE_NORMAL_CONTENT_TYPE = "TEXT/HTML; CHARSET=UTF-8"; + testGetCharsetUtf8("text/html; charset=utf-8"); + } + + @Test + public void testGetCharsetNoSpace() { + testGetCharsetUtf8("text/html;charset=utf-8"); + } + + @Test + public void testGetCharsetQuoted() { + testGetCharsetUtf8("text/html; charset=\"utf-8\""); + } + + @Test + public void testGetCharsetNoSpaceQuoted() { + testGetCharsetUtf8("text/html;charset=\"utf-8\""); + } + + private void testGetCharsetUtf8(String contentType) { + String UPPER_CASE_NORMAL_CONTENT_TYPE = contentType.toUpperCase(); HttpMessage message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); - message.headers().set(HttpHeaderNames.CONTENT_TYPE, NORMAL_CONTENT_TYPE); + message.headers().set(HttpHeaderNames.CONTENT_TYPE, contentType); assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(message)); - assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(NORMAL_CONTENT_TYPE)); + assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(contentType)); message.headers().set(HttpHeaderNames.CONTENT_TYPE, UPPER_CASE_NORMAL_CONTENT_TYPE); assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(message)); assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(UPPER_CASE_NORMAL_CONTENT_TYPE)); } + @Test + public void testGetCharsetNoLeadingQuotes() { + testGetCharsetInvalidQuotes("text/html;charset=utf-8\""); + } + + @Test + public void testGetCharsetNoTrailingQuotes() { + testGetCharsetInvalidQuotes("text/html;charset=\"utf-8"); + } + + @Test + public void testGetCharsetOnlyQuotes() { + testGetCharsetInvalidQuotes("text/html;charset=\"\""); + } + + private static void testGetCharsetInvalidQuotes(String contentType) { + String UPPER_CASE_NORMAL_CONTENT_TYPE = contentType.toUpperCase(); + + HttpMessage message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); + message.headers().set(HttpHeaderNames.CONTENT_TYPE, contentType); + assertEquals(CharsetUtil.ISO_8859_1, HttpUtil.getCharset(message, CharsetUtil.ISO_8859_1)); + assertEquals(CharsetUtil.ISO_8859_1, HttpUtil.getCharset(contentType, CharsetUtil.ISO_8859_1)); + + message.headers().set(HttpHeaderNames.CONTENT_TYPE, UPPER_CASE_NORMAL_CONTENT_TYPE); + assertEquals(CharsetUtil.ISO_8859_1, HttpUtil.getCharset(message, CharsetUtil.ISO_8859_1)); + assertEquals(CharsetUtil.ISO_8859_1, HttpUtil.getCharset(UPPER_CASE_NORMAL_CONTENT_TYPE, + CharsetUtil.ISO_8859_1)); + } + + @Test + public void testGetCharsetIfNotLastParameter() { + String NORMAL_CONTENT_TYPE_WITH_PARAMETERS = "application/soap-xml; charset=utf-8; " + + "action=\"http://www.soap-service.by/foo/add\""; + + HttpMessage message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, + "http://localhost:7788/foo"); + message.headers().set(HttpHeaderNames.CONTENT_TYPE, NORMAL_CONTENT_TYPE_WITH_PARAMETERS); + + assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(message)); + assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(NORMAL_CONTENT_TYPE_WITH_PARAMETERS)); + + assertEquals("utf-8", HttpUtil.getCharsetAsSequence(message)); + assertEquals("utf-8", HttpUtil.getCharsetAsSequence(NORMAL_CONTENT_TYPE_WITH_PARAMETERS)); + } + @Test public void testGetCharset_defaultValue() { final String SIMPLE_CONTENT_TYPE = "text/html"; final String CONTENT_TYPE_WITH_INCORRECT_CHARSET = "text/html; charset=UTFFF"; + final String CONTENT_TYPE_WITH_ILLEGAL_CHARSET_NAME = "text/html; charset=!illegal!"; HttpMessage message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); message.headers().set(HttpHeaderNames.CONTENT_TYPE, SIMPLE_CONTENT_TYPE); @@ -112,6 +178,15 @@ public void testGetCharset_defaultValue() { assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(message, StandardCharsets.UTF_8)); assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(CONTENT_TYPE_WITH_INCORRECT_CHARSET, StandardCharsets.UTF_8)); + + message.headers().set(HttpHeaderNames.CONTENT_TYPE, CONTENT_TYPE_WITH_ILLEGAL_CHARSET_NAME); + assertEquals(CharsetUtil.ISO_8859_1, HttpUtil.getCharset(message)); + assertEquals(CharsetUtil.ISO_8859_1, HttpUtil.getCharset(CONTENT_TYPE_WITH_ILLEGAL_CHARSET_NAME)); + + message.headers().set(HttpHeaderNames.CONTENT_TYPE, CONTENT_TYPE_WITH_ILLEGAL_CHARSET_NAME); + assertEquals(CharsetUtil.UTF_8, HttpUtil.getCharset(message, StandardCharsets.UTF_8)); + assertEquals(CharsetUtil.UTF_8, + HttpUtil.getCharset(CONTENT_TYPE_WITH_ILLEGAL_CHARSET_NAME, StandardCharsets.UTF_8)); } @Test @@ -174,12 +249,12 @@ public void testDoubleChunkedHeader() { HttpMessage message = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK); message.headers().add(HttpHeaderNames.TRANSFER_ENCODING, "chunked"); HttpUtil.setTransferEncodingChunked(message, true); - List expected = Collections.singletonList("chunked"); + List expected = singletonList("chunked"); assertEquals(expected, message.headers().getAll(HttpHeaderNames.TRANSFER_ENCODING)); } private static List allPossibleCasesOfContinue() { - final List cases = new ArrayList(); + final List cases = new ArrayList<>(); final String c = "continue"; for (int i = 0; i < Math.pow(2, c.length()); i++) { final StringBuilder sb = new StringBuilder(c.length()); @@ -256,4 +331,95 @@ private static void runUnsupportedExpectationTest(final HttpMessage message, fin ReferenceCountUtil.release(message); } + @Test + public void testFormatHostnameForHttpFromResolvedAddressWithHostname() throws Exception { + InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getByName("localhost"), 8080); + assertEquals("localhost", HttpUtil.formatHostnameForHttp(socketAddress)); + } + + @Test + public void testFormatHostnameForHttpFromUnesolvedAddressWithHostname() { + InetSocketAddress socketAddress = InetSocketAddress.createUnresolved("localhost", 80); + assertEquals("localhost", HttpUtil.formatHostnameForHttp(socketAddress)); + } + + @Test + public void testIpv6() throws Exception { + InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getByName("::1"), 8080); + assertEquals("[::1]", HttpUtil.formatHostnameForHttp(socketAddress)); + } + + @Test + public void testIpv6Unresolved() { + InetSocketAddress socketAddress = InetSocketAddress.createUnresolved("::1", 8080); + assertEquals("[::1]", HttpUtil.formatHostnameForHttp(socketAddress)); + } + + @Test + public void testIpv4() throws Exception { + InetSocketAddress socketAddress = new InetSocketAddress(InetAddress.getByName("10.0.0.1"), 8080); + assertEquals("10.0.0.1", HttpUtil.formatHostnameForHttp(socketAddress)); + } + + @Test + public void testIpv4Unresolved() { + InetSocketAddress socketAddress = InetSocketAddress.createUnresolved("10.0.0.1", 8080); + assertEquals("10.0.0.1", HttpUtil.formatHostnameForHttp(socketAddress)); + } + + @Test + public void testKeepAliveIfConnectionHeaderAbsent() { + HttpMessage http11Message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, + "http:localhost/http_1_1"); + assertTrue(HttpUtil.isKeepAlive(http11Message)); + + HttpMessage http10Message = new DefaultHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.GET, + "http:localhost/http_1_0"); + assertFalse(HttpUtil.isKeepAlive(http10Message)); + } + + @Test + public void testKeepAliveIfConnectionHeaderMultipleValues() { + HttpMessage http11Message = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, + "http:localhost/http_1_1"); + http11Message.headers().set( + HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE + ", " + HttpHeaderValues.CLOSE); + assertFalse(HttpUtil.isKeepAlive(http11Message)); + + http11Message.headers().set( + HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE + ", Close"); + assertFalse(HttpUtil.isKeepAlive(http11Message)); + + http11Message.headers().set( + HttpHeaderNames.CONNECTION, HttpHeaderValues.CLOSE + ", " + HttpHeaderValues.UPGRADE); + assertFalse(HttpUtil.isKeepAlive(http11Message)); + + http11Message.headers().set( + HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE + ", " + HttpHeaderValues.KEEP_ALIVE); + assertTrue(HttpUtil.isKeepAlive(http11Message)); + } + + @Test + public void normalizeAndGetContentLengthEmpty() { + testNormalizeAndGetContentLengthInvalidContentLength(""); + } + + @Test + public void normalizeAndGetContentLengthNotANumber() { + testNormalizeAndGetContentLengthInvalidContentLength("foo"); + } + + @Test + public void normalizeAndGetContentLengthNegative() { + testNormalizeAndGetContentLengthInvalidContentLength("-1"); + } + + private static void testNormalizeAndGetContentLengthInvalidContentLength(final String contentLengthField) { + assertThrows(IllegalArgumentException.class, new Executable() { + @Override + public void execute() { + normalizeAndGetContentLength(singletonList(contentLengthField), false, false); + } + }); + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/MultipleContentLengthHeadersTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/MultipleContentLengthHeadersTest.java new file mode 100644 index 00000000000..54aadd0fd03 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/MultipleContentLengthHeadersTest.java @@ -0,0 +1,128 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http; + +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.util.CharsetUtil; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.params.ParameterizedTest; +import org.junit.jupiter.params.provider.MethodSource; + +import java.util.Arrays; +import java.util.Collection; +import java.util.List; + +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_INITIAL_BUFFER_SIZE; +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_MAX_HEADER_SIZE; +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_MAX_INITIAL_LINE_LENGTH; +import static io.netty.handler.codec.http.HttpObjectDecoder.DEFAULT_VALIDATE_HEADERS; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.Matchers.contains; +import static org.hamcrest.Matchers.containsString; +import static org.hamcrest.Matchers.is; +import static org.hamcrest.core.IsInstanceOf.instanceOf; + +public class MultipleContentLengthHeadersTest { + + static Collection parameters() { + return Arrays.asList(new Object[][] { + { false, false, false }, + { false, false, true }, + { false, true, false }, + { false, true, true }, + { true, false, false }, + { true, false, true }, + { true, true, false }, + { true, true, true } + }); + } + + private static EmbeddedChannel newChannel(boolean allowDuplicateContentLengths) { + HttpRequestDecoder decoder = new HttpRequestDecoder( + DEFAULT_MAX_INITIAL_LINE_LENGTH, + DEFAULT_MAX_HEADER_SIZE, + DEFAULT_VALIDATE_HEADERS, + DEFAULT_INITIAL_BUFFER_SIZE, + allowDuplicateContentLengths); + return new EmbeddedChannel(decoder); + } + + @ParameterizedTest + @MethodSource("parameters") + public void testMultipleContentLengthHeadersBehavior(boolean allowDuplicateContentLengths, + boolean sameValue, boolean singleField) { + EmbeddedChannel channel = newChannel(allowDuplicateContentLengths); + String requestStr = setupRequestString(sameValue, singleField); + assertThat(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)), is(true)); + HttpRequest request = channel.readInbound(); + + if (allowDuplicateContentLengths) { + if (sameValue) { + assertValid(request); + List contentLengths = request.headers().getAll(HttpHeaderNames.CONTENT_LENGTH); + assertThat(contentLengths, contains("1")); + LastHttpContent body = channel.readInbound(); + assertThat(body.content().readableBytes(), is(1)); + assertThat(body.content().readCharSequence(1, CharsetUtil.US_ASCII).toString(), is("a")); + } else { + assertInvalid(request); + } + } else { + assertInvalid(request); + } + assertThat(channel.finish(), is(false)); + } + + private static String setupRequestString(boolean sameValue, boolean singleField) { + String firstValue = "1"; + String secondValue = sameValue ? firstValue : "2"; + String contentLength; + if (singleField) { + contentLength = "Content-Length: " + firstValue + ", " + secondValue + "\r\n\r\n"; + } else { + contentLength = "Content-Length: " + firstValue + "\r\n" + + "Content-Length: " + secondValue + "\r\n\r\n"; + } + return "PUT /some/path HTTP/1.1\r\n" + + contentLength + + "ab"; + } + + @Test + public void testDanglingComma() { + EmbeddedChannel channel = newChannel(false); + String requestStr = "GET /some/path HTTP/1.1\r\n" + + "Content-Length: 1,\r\n" + + "Connection: close\n\n" + + "ab"; + assertThat(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)), is(true)); + HttpRequest request = channel.readInbound(); + assertInvalid(request); + assertThat(channel.finish(), is(false)); + } + + private static void assertValid(HttpRequest request) { + assertThat(request.decoderResult().isFailure(), is(false)); + } + + private static void assertInvalid(HttpRequest request) { + assertThat(request.decoderResult().isFailure(), is(true)); + assertThat(request.decoderResult().cause(), instanceOf(IllegalArgumentException.class)); + assertThat(request.decoderResult().cause().getMessage(), + containsString("Multiple Content-Length values found")); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/QueryStringDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/QueryStringDecoderTest.java index a0071c4a70f..b6adc4e2ffd 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/QueryStringDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/QueryStringDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,8 +16,7 @@ package io.netty.handler.codec.http; import io.netty.util.CharsetUtil; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.net.URI; import java.net.URISyntaxException; @@ -26,12 +25,17 @@ import java.util.Map; import java.util.Map.Entry; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + public class QueryStringDecoderTest { @Test public void testBasicUris() throws URISyntaxException { QueryStringDecoder d = new QueryStringDecoder(new URI("http://localhost/path")); - Assert.assertEquals(0, d.parameters().size()); + assertEquals(0, d.parameters().size()); } @Test @@ -39,68 +43,71 @@ public void testBasic() { QueryStringDecoder d; d = new QueryStringDecoder("/foo"); - Assert.assertEquals("/foo", d.path()); - Assert.assertEquals(0, d.parameters().size()); + assertEquals("/foo", d.path()); + assertEquals(0, d.parameters().size()); d = new QueryStringDecoder("/foo%20bar"); - Assert.assertEquals("/foo bar", d.path()); - Assert.assertEquals(0, d.parameters().size()); + assertEquals("/foo bar", d.path()); + assertEquals(0, d.parameters().size()); d = new QueryStringDecoder("/foo?a=b=c"); - Assert.assertEquals("/foo", d.path()); - Assert.assertEquals(1, d.parameters().size()); - Assert.assertEquals(1, d.parameters().get("a").size()); - Assert.assertEquals("b=c", d.parameters().get("a").get(0)); + assertEquals("/foo", d.path()); + assertEquals(1, d.parameters().size()); + assertEquals(1, d.parameters().get("a").size()); + assertEquals("b=c", d.parameters().get("a").get(0)); d = new QueryStringDecoder("/foo?a=1&a=2"); - Assert.assertEquals("/foo", d.path()); - Assert.assertEquals(1, d.parameters().size()); - Assert.assertEquals(2, d.parameters().get("a").size()); - Assert.assertEquals("1", d.parameters().get("a").get(0)); - Assert.assertEquals("2", d.parameters().get("a").get(1)); + assertEquals("/foo", d.path()); + assertEquals(1, d.parameters().size()); + assertEquals(2, d.parameters().get("a").size()); + assertEquals("1", d.parameters().get("a").get(0)); + assertEquals("2", d.parameters().get("a").get(1)); d = new QueryStringDecoder("/foo%20bar?a=1&a=2"); - Assert.assertEquals("/foo bar", d.path()); - Assert.assertEquals(1, d.parameters().size()); - Assert.assertEquals(2, d.parameters().get("a").size()); - Assert.assertEquals("1", d.parameters().get("a").get(0)); - Assert.assertEquals("2", d.parameters().get("a").get(1)); + assertEquals("/foo bar", d.path()); + assertEquals(1, d.parameters().size()); + assertEquals(2, d.parameters().get("a").size()); + assertEquals("1", d.parameters().get("a").get(0)); + assertEquals("2", d.parameters().get("a").get(1)); d = new QueryStringDecoder("/foo?a=&a=2"); - Assert.assertEquals("/foo", d.path()); - Assert.assertEquals(1, d.parameters().size()); - Assert.assertEquals(2, d.parameters().get("a").size()); - Assert.assertEquals("", d.parameters().get("a").get(0)); - Assert.assertEquals("2", d.parameters().get("a").get(1)); + assertEquals("/foo", d.path()); + assertEquals(1, d.parameters().size()); + assertEquals(2, d.parameters().get("a").size()); + assertEquals("", d.parameters().get("a").get(0)); + assertEquals("2", d.parameters().get("a").get(1)); d = new QueryStringDecoder("/foo?a=1&a="); - Assert.assertEquals("/foo", d.path()); - Assert.assertEquals(1, d.parameters().size()); - Assert.assertEquals(2, d.parameters().get("a").size()); - Assert.assertEquals("1", d.parameters().get("a").get(0)); - Assert.assertEquals("", d.parameters().get("a").get(1)); + assertEquals("/foo", d.path()); + assertEquals(1, d.parameters().size()); + assertEquals(2, d.parameters().get("a").size()); + assertEquals("1", d.parameters().get("a").get(0)); + assertEquals("", d.parameters().get("a").get(1)); d = new QueryStringDecoder("/foo?a=1&a=&a="); - Assert.assertEquals("/foo", d.path()); - Assert.assertEquals(1, d.parameters().size()); - Assert.assertEquals(3, d.parameters().get("a").size()); - Assert.assertEquals("1", d.parameters().get("a").get(0)); - Assert.assertEquals("", d.parameters().get("a").get(1)); - Assert.assertEquals("", d.parameters().get("a").get(2)); + assertEquals("/foo", d.path()); + assertEquals(1, d.parameters().size()); + assertEquals(3, d.parameters().get("a").size()); + assertEquals("1", d.parameters().get("a").get(0)); + assertEquals("", d.parameters().get("a").get(1)); + assertEquals("", d.parameters().get("a").get(2)); d = new QueryStringDecoder("/foo?a=1=&a==2"); - Assert.assertEquals("/foo", d.path()); - Assert.assertEquals(1, d.parameters().size()); - Assert.assertEquals(2, d.parameters().get("a").size()); - Assert.assertEquals("1=", d.parameters().get("a").get(0)); - Assert.assertEquals("=2", d.parameters().get("a").get(1)); + assertEquals("/foo", d.path()); + assertEquals(1, d.parameters().size()); + assertEquals(2, d.parameters().get("a").size()); + assertEquals("1=", d.parameters().get("a").get(0)); + assertEquals("=2", d.parameters().get("a").get(1)); d = new QueryStringDecoder("/foo?abc=1%2023&abc=124%20"); - Assert.assertEquals("/foo", d.path()); - Assert.assertEquals(1, d.parameters().size()); - Assert.assertEquals(2, d.parameters().get("abc").size()); - Assert.assertEquals("1 23", d.parameters().get("abc").get(0)); - Assert.assertEquals("124 ", d.parameters().get("abc").get(1)); + assertEquals("/foo", d.path()); + assertEquals(1, d.parameters().size()); + assertEquals(2, d.parameters().get("abc").size()); + assertEquals("1 23", d.parameters().get("abc").get(0)); + assertEquals("124 ", d.parameters().get("abc").get(1)); + + d = new QueryStringDecoder("/foo?abc=%7E"); + assertEquals("~", d.parameters().get("abc").get(0)); } @Test @@ -129,35 +136,42 @@ public void testExotic() { assertQueryString("/foo?a=1&a=&a=", "/foo?a=1&a&a="); } + @Test + public void testSemicolon() { + assertQueryString("/foo?a=1;2", "/foo?a=1;2", false); + // ";" should be treated as a normal character, see #8855 + assertQueryString("/foo?a=1;2", "/foo?a=1%3B2", true); + } + @Test public void testPathSpecific() { // decode escaped characters - Assert.assertEquals("/foo bar/", new QueryStringDecoder("/foo%20bar/?").path()); - Assert.assertEquals("/foo\r\n\\bar/", new QueryStringDecoder("/foo%0D%0A\\bar/?").path()); + assertEquals("/foo bar/", new QueryStringDecoder("/foo%20bar/?").path()); + assertEquals("/foo\r\n\\bar/", new QueryStringDecoder("/foo%0D%0A\\bar/?").path()); // a 'fragment' after '#' should be cuted (see RFC 3986) - Assert.assertEquals("", new QueryStringDecoder("#123").path()); - Assert.assertEquals("foo", new QueryStringDecoder("foo?bar#anchor").path()); - Assert.assertEquals("/foo-bar", new QueryStringDecoder("/foo-bar#anchor").path()); - Assert.assertEquals("/foo-bar", new QueryStringDecoder("/foo-bar#a#b?c=d").path()); + assertEquals("", new QueryStringDecoder("#123").path()); + assertEquals("foo", new QueryStringDecoder("foo?bar#anchor").path()); + assertEquals("/foo-bar", new QueryStringDecoder("/foo-bar#anchor").path()); + assertEquals("/foo-bar", new QueryStringDecoder("/foo-bar#a#b?c=d").path()); // '+' is not escape ' ' for the path - Assert.assertEquals("+", new QueryStringDecoder("+").path()); - Assert.assertEquals("/foo+bar/", new QueryStringDecoder("/foo+bar/?").path()); - Assert.assertEquals("/foo++", new QueryStringDecoder("/foo++?index.php").path()); - Assert.assertEquals("/foo +", new QueryStringDecoder("/foo%20+?index.php").path()); - Assert.assertEquals("/foo+ ", new QueryStringDecoder("/foo+%20").path()); + assertEquals("+", new QueryStringDecoder("+").path()); + assertEquals("/foo+bar/", new QueryStringDecoder("/foo+bar/?").path()); + assertEquals("/foo++", new QueryStringDecoder("/foo++?index.php").path()); + assertEquals("/foo +", new QueryStringDecoder("/foo%20+?index.php").path()); + assertEquals("/foo+ ", new QueryStringDecoder("/foo+%20").path()); } @Test public void testExcludeFragment() { // a 'fragment' after '#' should be cuted (see RFC 3986) - Assert.assertEquals("a", new QueryStringDecoder("?a#anchor").parameters().keySet().iterator().next()); - Assert.assertEquals("b", new QueryStringDecoder("?a=b#anchor").parameters().get("a").get(0)); - Assert.assertTrue(new QueryStringDecoder("?#").parameters().isEmpty()); - Assert.assertTrue(new QueryStringDecoder("?#anchor").parameters().isEmpty()); - Assert.assertTrue(new QueryStringDecoder("#?a=b#anchor").parameters().isEmpty()); - Assert.assertTrue(new QueryStringDecoder("?#a=b#anchor").parameters().isEmpty()); + assertEquals("a", new QueryStringDecoder("?a#anchor").parameters().keySet().iterator().next()); + assertEquals("b", new QueryStringDecoder("?a=b#anchor").parameters().get("a").get(0)); + assertTrue(new QueryStringDecoder("?#").parameters().isEmpty()); + assertTrue(new QueryStringDecoder("?#anchor").parameters().isEmpty()); + assertTrue(new QueryStringDecoder("#?a=b#anchor").parameters().isEmpty()); + assertTrue(new QueryStringDecoder("?#a=b#anchor").parameters().isEmpty()); } @Test @@ -171,20 +185,20 @@ public void testHashDos() { buf.append(i); buf.append('&'); } - Assert.assertEquals(1024, new QueryStringDecoder(buf.toString()).parameters().size()); + assertEquals(1024, new QueryStringDecoder(buf.toString()).parameters().size()); } @Test public void testHasPath() { QueryStringDecoder decoder = new QueryStringDecoder("1=2", false); - Assert.assertEquals("", decoder.path()); + assertEquals("", decoder.path()); Map> params = decoder.parameters(); - Assert.assertEquals(1, params.size()); - Assert.assertTrue(params.containsKey("1")); + assertEquals(1, params.size()); + assertTrue(params.containsKey("1")); List param = params.get("1"); - Assert.assertNotNull(param); - Assert.assertEquals(1, param.size()); - Assert.assertEquals("2", param.get(0)); + assertNotNull(param); + assertEquals(1, param.size()); + assertEquals("2", param.get(0)); } @Test @@ -217,18 +231,24 @@ public void testUrlDecoding() throws Exception { final String expected = tests[i + 1]; try { final String decoded = QueryStringDecoder.decodeComponent(encoded); - Assert.assertEquals(expected, decoded); + assertEquals(expected, decoded); } catch (IllegalArgumentException e) { - Assert.assertEquals(expected, e.getMessage()); + assertEquals(expected, e.getMessage()); } } } private static void assertQueryString(String expected, String actual) { - QueryStringDecoder ed = new QueryStringDecoder(expected, CharsetUtil.UTF_8); - QueryStringDecoder ad = new QueryStringDecoder(actual, CharsetUtil.UTF_8); - Assert.assertEquals(ed.path(), ad.path()); - Assert.assertEquals(ed.parameters(), ad.parameters()); + assertQueryString(expected, actual, false); + } + + private static void assertQueryString(String expected, String actual, boolean semicolonIsNormalChar) { + QueryStringDecoder ed = new QueryStringDecoder(expected, CharsetUtil.UTF_8, true, + 1024, semicolonIsNormalChar); + QueryStringDecoder ad = new QueryStringDecoder(actual, CharsetUtil.UTF_8, true, + 1024, semicolonIsNormalChar); + assertEquals(ed.path(), ad.path()); + assertEquals(ed.parameters(), ad.parameters()); } // See #189 @@ -236,29 +256,29 @@ private static void assertQueryString(String expected, String actual) { public void testURI() { URI uri = URI.create("http://localhost:8080/foo?param1=value1¶m2=value2¶m3=value3"); QueryStringDecoder decoder = new QueryStringDecoder(uri); - Assert.assertEquals("/foo", decoder.path()); - Assert.assertEquals("/foo", decoder.rawPath()); - Assert.assertEquals("param1=value1¶m2=value2¶m3=value3", decoder.rawQuery()); + assertEquals("/foo", decoder.path()); + assertEquals("/foo", decoder.rawPath()); + assertEquals("param1=value1¶m2=value2¶m3=value3", decoder.rawQuery()); Map> params = decoder.parameters(); - Assert.assertEquals(3, params.size()); + assertEquals(3, params.size()); Iterator>> entries = params.entrySet().iterator(); Entry> entry = entries.next(); - Assert.assertEquals("param1", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("value1", entry.getValue().get(0)); + assertEquals("param1", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("value1", entry.getValue().get(0)); entry = entries.next(); - Assert.assertEquals("param2", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("value2", entry.getValue().get(0)); + assertEquals("param2", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("value2", entry.getValue().get(0)); entry = entries.next(); - Assert.assertEquals("param3", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("value3", entry.getValue().get(0)); + assertEquals("param3", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("value3", entry.getValue().get(0)); - Assert.assertFalse(entries.hasNext()); + assertFalse(entries.hasNext()); } // See #189 @@ -266,30 +286,30 @@ public void testURI() { public void testURISlashPath() { URI uri = URI.create("http://localhost:8080/?param1=value1¶m2=value2¶m3=value3"); QueryStringDecoder decoder = new QueryStringDecoder(uri); - Assert.assertEquals("/", decoder.path()); - Assert.assertEquals("/", decoder.rawPath()); - Assert.assertEquals("param1=value1¶m2=value2¶m3=value3", decoder.rawQuery()); + assertEquals("/", decoder.path()); + assertEquals("/", decoder.rawPath()); + assertEquals("param1=value1¶m2=value2¶m3=value3", decoder.rawQuery()); Map> params = decoder.parameters(); - Assert.assertEquals(3, params.size()); + assertEquals(3, params.size()); Iterator>> entries = params.entrySet().iterator(); Entry> entry = entries.next(); - Assert.assertEquals("param1", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("value1", entry.getValue().get(0)); + assertEquals("param1", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("value1", entry.getValue().get(0)); entry = entries.next(); - Assert.assertEquals("param2", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("value2", entry.getValue().get(0)); + assertEquals("param2", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("value2", entry.getValue().get(0)); entry = entries.next(); - Assert.assertEquals("param3", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("value3", entry.getValue().get(0)); + assertEquals("param3", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("value3", entry.getValue().get(0)); - Assert.assertFalse(entries.hasNext()); + assertFalse(entries.hasNext()); } // See #189 @@ -297,30 +317,30 @@ public void testURISlashPath() { public void testURINoPath() { URI uri = URI.create("http://localhost:8080?param1=value1¶m2=value2¶m3=value3"); QueryStringDecoder decoder = new QueryStringDecoder(uri); - Assert.assertEquals("", decoder.path()); - Assert.assertEquals("", decoder.rawPath()); - Assert.assertEquals("param1=value1¶m2=value2¶m3=value3", decoder.rawQuery()); + assertEquals("", decoder.path()); + assertEquals("", decoder.rawPath()); + assertEquals("param1=value1¶m2=value2¶m3=value3", decoder.rawQuery()); Map> params = decoder.parameters(); - Assert.assertEquals(3, params.size()); + assertEquals(3, params.size()); Iterator>> entries = params.entrySet().iterator(); Entry> entry = entries.next(); - Assert.assertEquals("param1", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("value1", entry.getValue().get(0)); + assertEquals("param1", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("value1", entry.getValue().get(0)); entry = entries.next(); - Assert.assertEquals("param2", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("value2", entry.getValue().get(0)); + assertEquals("param2", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("value2", entry.getValue().get(0)); entry = entries.next(); - Assert.assertEquals("param3", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("value3", entry.getValue().get(0)); + assertEquals("param3", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("value3", entry.getValue().get(0)); - Assert.assertFalse(entries.hasNext()); + assertFalse(entries.hasNext()); } // See https://github.com/netty/netty/issues/1833 @@ -328,37 +348,37 @@ public void testURINoPath() { public void testURI2() { URI uri = URI.create("http://foo.com/images;num=10?query=name;value=123"); QueryStringDecoder decoder = new QueryStringDecoder(uri); - Assert.assertEquals("/images;num=10", decoder.path()); - Assert.assertEquals("/images;num=10", decoder.rawPath()); - Assert.assertEquals("query=name;value=123", decoder.rawQuery()); + assertEquals("/images;num=10", decoder.path()); + assertEquals("/images;num=10", decoder.rawPath()); + assertEquals("query=name;value=123", decoder.rawQuery()); Map> params = decoder.parameters(); - Assert.assertEquals(2, params.size()); + assertEquals(2, params.size()); Iterator>> entries = params.entrySet().iterator(); Entry> entry = entries.next(); - Assert.assertEquals("query", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("name", entry.getValue().get(0)); + assertEquals("query", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("name", entry.getValue().get(0)); entry = entries.next(); - Assert.assertEquals("value", entry.getKey()); - Assert.assertEquals(1, entry.getValue().size()); - Assert.assertEquals("123", entry.getValue().get(0)); + assertEquals("value", entry.getKey()); + assertEquals(1, entry.getValue().size()); + assertEquals("123", entry.getValue().get(0)); - Assert.assertFalse(entries.hasNext()); + assertFalse(entries.hasNext()); } @Test public void testEmptyStrings() { QueryStringDecoder pathSlash = new QueryStringDecoder("path/"); - Assert.assertEquals("path/", pathSlash.rawPath()); - Assert.assertEquals("", pathSlash.rawQuery()); + assertEquals("path/", pathSlash.rawPath()); + assertEquals("", pathSlash.rawQuery()); QueryStringDecoder pathQuestion = new QueryStringDecoder("path?"); - Assert.assertEquals("path", pathQuestion.rawPath()); - Assert.assertEquals("", pathQuestion.rawQuery()); + assertEquals("path", pathQuestion.rawPath()); + assertEquals("", pathQuestion.rawQuery()); QueryStringDecoder empty = new QueryStringDecoder(""); - Assert.assertEquals("", empty.rawPath()); - Assert.assertEquals("", empty.rawQuery()); + assertEquals("", empty.rawPath()); + assertEquals("", empty.rawQuery()); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/QueryStringEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/QueryStringEncoderTest.java index a9f6f90155e..e30459a06b7 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/QueryStringEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/QueryStringEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,11 +15,12 @@ */ package io.netty.handler.codec.http; +import org.junit.jupiter.api.Test; + import java.net.URI; import java.nio.charset.Charset; -import org.junit.Assert; -import org.junit.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; public class QueryStringEncoderTest { @@ -29,42 +30,52 @@ public void testDefaultEncoding() throws Exception { e = new QueryStringEncoder("/foo"); e.addParam("a", "b=c"); - Assert.assertEquals("/foo?a=b%3Dc", e.toString()); - Assert.assertEquals(new URI("/foo?a=b%3Dc"), e.toUri()); + assertEquals("/foo?a=b%3Dc", e.toString()); + assertEquals(new URI("/foo?a=b%3Dc"), e.toUri()); e = new QueryStringEncoder("/foo/\u00A5"); e.addParam("a", "\u00A5"); - Assert.assertEquals("/foo/\u00A5?a=%C2%A5", e.toString()); - Assert.assertEquals(new URI("/foo/\u00A5?a=%C2%A5"), e.toUri()); + assertEquals("/foo/\u00A5?a=%C2%A5", e.toString()); + assertEquals(new URI("/foo/\u00A5?a=%C2%A5"), e.toUri()); + + e = new QueryStringEncoder("/foo/\u00A5"); + e.addParam("a", "abc\u00A5"); + assertEquals("/foo/\u00A5?a=abc%C2%A5", e.toString()); + assertEquals(new URI("/foo/\u00A5?a=abc%C2%A5"), e.toUri()); e = new QueryStringEncoder("/foo"); e.addParam("a", "1"); e.addParam("b", "2"); - Assert.assertEquals("/foo?a=1&b=2", e.toString()); - Assert.assertEquals(new URI("/foo?a=1&b=2"), e.toUri()); + assertEquals("/foo?a=1&b=2", e.toString()); + assertEquals(new URI("/foo?a=1&b=2"), e.toUri()); e = new QueryStringEncoder("/foo"); e.addParam("a", "1"); e.addParam("b", ""); e.addParam("c", null); e.addParam("d", null); - Assert.assertEquals("/foo?a=1&b=&c&d", e.toString()); - Assert.assertEquals(new URI("/foo?a=1&b=&c&d"), e.toUri()); + assertEquals("/foo?a=1&b=&c&d", e.toString()); + assertEquals(new URI("/foo?a=1&b=&c&d"), e.toUri()); + + e = new QueryStringEncoder("/foo"); + e.addParam("test", "a~b"); + assertEquals("/foo?test=a~b", e.toString()); + assertEquals(new URI("/foo?test=a~b"), e.toUri()); } @Test public void testNonDefaultEncoding() throws Exception { QueryStringEncoder e = new QueryStringEncoder("/foo/\u00A5", Charset.forName("UTF-16")); e.addParam("a", "\u00A5"); - Assert.assertEquals("/foo/\u00A5?a=%FE%FF%00%A5", e.toString()); - Assert.assertEquals(new URI("/foo/\u00A5?a=%FE%FF%00%A5"), e.toUri()); + assertEquals("/foo/\u00A5?a=%FE%FF%00%A5", e.toString()); + assertEquals(new URI("/foo/\u00A5?a=%FE%FF%00%A5"), e.toUri()); } @Test public void testWhitespaceEncoding() throws Exception { QueryStringEncoder e = new QueryStringEncoder("/foo"); e.addParam("a", "b c"); - Assert.assertEquals("/foo?a=b%20c", e.toString()); - Assert.assertEquals(new URI("/foo?a=b%20c"), e.toUri()); + assertEquals("/foo?a=b%20c", e.toString()); + assertEquals(new URI("/foo?a=b%20c"), e.toUri()); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/ReadOnlyHttpHeadersTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/ReadOnlyHttpHeadersTest.java index a0a41916d0f..4d32eb4d99b 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/ReadOnlyHttpHeadersTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/ReadOnlyHttpHeadersTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,7 +16,7 @@ package io.netty.handler.codec.http; import io.netty.util.AsciiString; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.util.Iterator; import java.util.List; @@ -30,10 +30,11 @@ import static io.netty.handler.codec.http.HttpHeaderValues.APPLICATION_OCTET_STREAM; import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE; import static io.netty.handler.codec.http.HttpHeaderValues.ZERO; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class ReadOnlyHttpHeadersTest { @Test @@ -113,10 +114,22 @@ public void getAll() { assertTrue(APPLICATION_OCTET_STREAM.contentEqualsIgnoreCase(names.get(1))); } - @Test(expected = IllegalArgumentException.class) + @Test public void validateNamesFail() { - new ReadOnlyHttpHeaders(true, - ACCEPT, APPLICATION_JSON, AsciiString.cached(" ")); + assertThrows(IllegalArgumentException.class, () -> new ReadOnlyHttpHeaders(true, + ACCEPT, APPLICATION_JSON, AsciiString.cached(" "))); + } + + @Test + public void emptyHeaderName() { + assertThrows(IllegalArgumentException.class, () -> new ReadOnlyHttpHeaders(true, + ACCEPT, APPLICATION_JSON, AsciiString.cached(" "), ZERO)); + } + + @Test + public void headerWithoutValue() { + assertThrows(IllegalArgumentException.class, () -> new ReadOnlyHttpHeaders(false, + ACCEPT, APPLICATION_JSON, CONTENT_LENGTH)); } private static void assert3ParisEquals(Iterator> itr) { diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ClientCookieDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ClientCookieDecoderTest.java index 60b85899c6b..ca7c7419f3d 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ClientCookieDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ClientCookieDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,7 +16,8 @@ package io.netty.handler.codec.http.cookie; import io.netty.handler.codec.DateFormatter; -import org.junit.Test; +import io.netty.handler.codec.http.cookie.CookieHeaderNames.SameSite; +import org.junit.jupiter.api.Test; import java.util.ArrayList; import java.util.Calendar; @@ -25,25 +26,36 @@ import java.util.Iterator; import java.util.TimeZone; -import static org.junit.Assert.*; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.hamcrest.CoreMatchers.is; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class ClientCookieDecoderTest { @Test public void testDecodingSingleCookieV0() { String cookieString = "myCookie=myValue;expires=" + DateFormatter.format(new Date(System.currentTimeMillis() + 50000)) - + ";path=/apathsomewhere;domain=.adomainsomewhere;secure;"; + + ";path=/apathsomewhere;domain=.adomainsomewhere;secure;SameSite=None"; Cookie cookie = ClientCookieDecoder.STRICT.decode(cookieString); assertNotNull(cookie); assertEquals("myValue", cookie.value()); assertEquals(".adomainsomewhere", cookie.domain()); - assertNotEquals("maxAge should be defined when parsing cookie " + cookieString, - Long.MIN_VALUE, cookie.maxAge()); - assertTrue("maxAge should be about 50ms when parsing cookie " + cookieString, - cookie.maxAge() >= 40 && cookie.maxAge() <= 60); + assertNotEquals(Long.MIN_VALUE, cookie.maxAge(), + "maxAge should be defined when parsing cookie " + cookieString); + assertTrue(cookie.maxAge() >= 40 && cookie.maxAge() <= 60, + "maxAge should be about 50ms when parsing cookie " + cookieString); assertEquals("/apathsomewhere", cookie.path()); assertTrue(cookie.isSecure()); + + assertThat(cookie, is(instanceOf(DefaultCookie.class))); + assertEquals(SameSite.None, ((DefaultCookie) cookie).sameSite()); } @Test @@ -118,11 +130,11 @@ public void testDecodingComplexCookie() { @Test public void testDecodingQuotedCookie() { - Collection sources = new ArrayList(); + Collection sources = new ArrayList<>(); sources.add("a=\"\","); sources.add("b=\"1\","); - Collection cookies = new ArrayList(); + Collection cookies = new ArrayList<>(); for (String source : sources) { cookies.add(ClientCookieDecoder.STRICT.decode(source)); } @@ -203,6 +215,13 @@ public void testDecodingValuesWithCommasAndEqualsFails() { assertNull(cookie); } + @Test + public void testDecodingInvalidValuesWithCommaAtStart() { + assertNull(ClientCookieDecoder.STRICT.decode(",")); + assertNull(ClientCookieDecoder.STRICT.decode(",a")); + assertNull(ClientCookieDecoder.STRICT.decode(",a=a")); + } + @Test public void testDecodingLongValue() { String longValue = @@ -252,7 +271,7 @@ public void testDecodingLongValue() { "'=KqtH"; Cookie cookie = ClientCookieDecoder.STRICT.decode("bh=\"" + longValue - + "\";"); + + "\";"); assertEquals("bh", cookie.name()); assertEquals(longValue, cookie.value()); } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ClientCookieEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ClientCookieEncoderTest.java index af81059dd8a..78267dd15ef 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ClientCookieEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ClientCookieEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,9 +15,10 @@ */ package io.netty.handler.codec.http.cookie; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertThrows; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class ClientCookieEncoderTest { @@ -47,8 +48,21 @@ public void testWrappedCookieValue() { ClientCookieEncoder.STRICT.encode(new DefaultCookie("myCookie", "\"foo\"")); } - @Test(expected = IllegalArgumentException.class) + @Test public void testRejectCookieValueWithSemicolon() { - ClientCookieEncoder.STRICT.encode(new DefaultCookie("myCookie", "foo;bar")); + assertThrows(IllegalArgumentException.class, + () -> ClientCookieEncoder.STRICT.encode(new DefaultCookie("myCookie", "foo;bar"))); + } + + @Test + public void testComparatorForSamePathLength() { + Cookie cookie = new DefaultCookie("test", "value"); + cookie.setPath("1"); + + Cookie cookie2 = new DefaultCookie("test", "value"); + cookie2.setPath("2"); + + assertEquals(0, ClientCookieEncoder.COOKIE_COMPARATOR.compare(cookie, cookie2)); + assertEquals(0, ClientCookieEncoder.COOKIE_COMPARATOR.compare(cookie2, cookie)); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ServerCookieDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ServerCookieDecoderTest.java index b157bc3c73b..78aa88be583 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ServerCookieDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ServerCookieDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,12 +15,16 @@ */ package io.netty.handler.codec.http.cookie; -import org.junit.Test; +import java.util.List; +import org.junit.jupiter.api.Test; import java.util.Iterator; import java.util.Set; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class ServerCookieDecoderTest { @Test @@ -53,6 +57,26 @@ public void testDecodingMultipleCookies() { assertEquals("myValue3", cookie.value()); } + @Test + public void testDecodingAllMultipleCookies() { + String c1 = "myCookie=myValue;"; + String c2 = "myCookie=myValue2;"; + String c3 = "myCookie=myValue3;"; + + List cookies = ServerCookieDecoder.STRICT.decodeAll(c1 + c2 + c3); + assertEquals(3, cookies.size()); + Iterator it = cookies.iterator(); + Cookie cookie = it.next(); + assertNotNull(cookie); + assertEquals("myValue", cookie.value()); + cookie = it.next(); + assertNotNull(cookie); + assertEquals("myValue2", cookie.value()); + cookie = it.next(); + assertNotNull(cookie); + assertEquals("myValue3", cookie.value()); + } + @Test public void testDecodingGoogleAnalyticsCookie() { String source = diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ServerCookieEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ServerCookieEncoderTest.java index 723a7a17a9f..e48b868f183 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ServerCookieEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/cookie/ServerCookieEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,10 +15,13 @@ */ package io.netty.handler.codec.http.cookie; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + import io.netty.handler.codec.DateFormatter; import java.text.ParseException; @@ -30,7 +33,8 @@ import java.util.regex.Matcher; import java.util.regex.Pattern; -import org.junit.Test; +import io.netty.handler.codec.http.cookie.CookieHeaderNames.SameSite; +import org.junit.jupiter.api.Test; public class ServerCookieEncoderTest { @@ -39,13 +43,14 @@ public void testEncodingSingleCookieV0() throws ParseException { int maxAge = 50; - String result = - "myCookie=myValue; Max-Age=50; Expires=(.+?); Path=/apathsomewhere; Domain=.adomainsomewhere; Secure"; - Cookie cookie = new DefaultCookie("myCookie", "myValue"); + String result = "myCookie=myValue; Max-Age=50; Expires=(.+?); Path=/apathsomewhere;" + + " Domain=.adomainsomewhere; Secure; SameSite=Lax"; + DefaultCookie cookie = new DefaultCookie("myCookie", "myValue"); cookie.setDomain(".adomainsomewhere"); cookie.setMaxAge(maxAge); cookie.setPath("/apathsomewhere"); cookie.setSecure(true); + cookie.setSameSite(SameSite.Lax); String encodedCookie = ServerCookieEncoder.STRICT.encode(cookie); @@ -68,7 +73,7 @@ public void testEncodingWithNoCookies() { @Test public void testEncodingMultipleCookiesStrict() { - List result = new ArrayList(); + List result = new ArrayList<>(); result.add("cookie2=value2"); result.add("cookie1=value3"); Cookie cookie1 = new DefaultCookie("cookie1", "value1"); @@ -80,7 +85,7 @@ public void testEncodingMultipleCookiesStrict() { @Test public void illegalCharInCookieNameMakesStrictEncoderThrowsException() { - Set illegalChars = new HashSet(); + Set illegalChars = new HashSet<>(); // CTLs for (int i = 0x00; i <= 0x1F; i++) { illegalChars.add((char) i); @@ -107,7 +112,7 @@ public void illegalCharInCookieNameMakesStrictEncoderThrowsException() { @Test public void illegalCharInCookieValueMakesStrictEncoderThrowsException() { - Set illegalChars = new HashSet(); + Set illegalChars = new HashSet<>(); // CTLs for (int i = 0x00; i <= 0x1F; i++) { illegalChars.add((char) i); @@ -131,9 +136,18 @@ public void illegalCharInCookieValueMakesStrictEncoderThrowsException() { assertEquals(illegalChars.size(), exceptions); } + @Test + public void illegalCharInWrappedValueAppearsInException() { + try { + ServerCookieEncoder.STRICT.encode(new DefaultCookie("name", "\"value,\"")); + } catch (IllegalArgumentException e) { + assertThat(e.getMessage().toLowerCase(), containsString("cookie value contains an invalid char: ,")); + } + } + @Test public void testEncodingMultipleCookiesLax() { - List result = new ArrayList(); + List result = new ArrayList<>(); result.add("cookie1=value1"); result.add("cookie2=value2"); result.add("cookie1=value3"); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/cors/CorsConfigTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/cors/CorsConfigTest.java index 91f6c8cdff3..4784a372a16 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/cors/CorsConfigTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/cors/CorsConfigTest.java @@ -5,7 +5,7 @@ * 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,7 +19,7 @@ import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpMethod; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; import static io.netty.handler.codec.http.cors.CorsConfigBuilder.forAnyOrigin; @@ -30,6 +30,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.CoreMatchers.notNullValue; import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertThrows; public class CorsConfigTest { @@ -125,9 +126,10 @@ public void emptyPreflightResponseHeaders() { assertThat(cors.preflightResponseHeaders(), equalTo((HttpHeaders) EmptyHttpHeaders.INSTANCE)); } - @Test (expected = IllegalArgumentException.class) + @Test public void shouldThrowIfValueIsNull() { - forOrigin("*").preflightResponseHeader("HeaderName", new Object[]{null}).build(); + assertThrows(IllegalArgumentException.class, + () -> forOrigin("*").preflightResponseHeader("HeaderName", new Object[]{null}).build()); } @Test diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/cors/CorsHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/cors/CorsHandlerTest.java index f3fe97efd01..c0e07a78578 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/cors/CorsHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/cors/CorsHandlerTest.java @@ -5,7 +5,7 @@ * 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -26,41 +26,26 @@ import io.netty.handler.codec.http.HttpUtil; import io.netty.util.AsciiString; import io.netty.util.ReferenceCountUtil; -import org.junit.Test; +import org.hamcrest.core.IsEqual; +import org.junit.jupiter.api.Test; import java.util.Arrays; +import java.util.Collections; +import java.util.List; import java.util.concurrent.Callable; -import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_ALLOW_CREDENTIALS; -import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_ALLOW_HEADERS; -import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_ALLOW_METHODS; -import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_ALLOW_ORIGIN; -import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_EXPOSE_HEADERS; -import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_REQUEST_HEADERS; -import static io.netty.handler.codec.http.HttpHeaderNames.ACCESS_CONTROL_REQUEST_METHOD; -import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION; -import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; -import static io.netty.handler.codec.http.HttpHeaderNames.DATE; -import static io.netty.handler.codec.http.HttpHeaderNames.ORIGIN; -import static io.netty.handler.codec.http.HttpHeaderNames.VARY; -import static io.netty.handler.codec.http.HttpHeaderValues.KEEP_ALIVE; +import static io.netty.handler.codec.http.HttpHeaderNames.*; import static io.netty.handler.codec.http.HttpHeaderValues.CLOSE; +import static io.netty.handler.codec.http.HttpHeaderValues.KEEP_ALIVE; import static io.netty.handler.codec.http.HttpHeadersTestUtils.of; -import static io.netty.handler.codec.http.HttpMethod.DELETE; -import static io.netty.handler.codec.http.HttpMethod.GET; -import static io.netty.handler.codec.http.HttpMethod.OPTIONS; +import static io.netty.handler.codec.http.HttpMethod.*; import static io.netty.handler.codec.http.HttpResponseStatus.FORBIDDEN; import static io.netty.handler.codec.http.HttpResponseStatus.OK; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; -import static io.netty.handler.codec.http.cors.CorsConfigBuilder.forAnyOrigin; -import static io.netty.handler.codec.http.cors.CorsConfigBuilder.forOrigin; -import static io.netty.handler.codec.http.cors.CorsConfigBuilder.forOrigins; -import static org.hamcrest.CoreMatchers.containsString; -import static org.hamcrest.CoreMatchers.equalTo; -import static org.hamcrest.CoreMatchers.is; -import static org.hamcrest.CoreMatchers.notNullValue; -import static org.hamcrest.CoreMatchers.nullValue; +import static io.netty.handler.codec.http.cors.CorsConfigBuilder.*; +import static org.hamcrest.CoreMatchers.*; import static org.hamcrest.MatcherAssert.assertThat; +import static org.hamcrest.core.IsEqual.equalTo; public class CorsHandlerTest { @@ -68,6 +53,7 @@ public class CorsHandlerTest { public void nonCorsRequest() { final HttpResponse response = simpleRequest(forAnyOrigin().build(), null); assertThat(response.headers().contains(ACCESS_CONTROL_ALLOW_ORIGIN), is(false)); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -75,6 +61,7 @@ public void simpleRequestWithAnyOrigin() { final HttpResponse response = simpleRequest(forAnyOrigin().build(), "http://localhost:7777"); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), is("*")); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_HEADERS), is(nullValue())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -85,6 +72,7 @@ public void simpleRequestWithNullOrigin() { assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), is("null")); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_CREDENTIALS), is(equalTo("true"))); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_HEADERS), is(nullValue())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -93,6 +81,7 @@ public void simpleRequestWithOrigin() { final HttpResponse response = simpleRequest(forOrigin(origin).build(), origin); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), is(origin)); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_HEADERS), is(nullValue())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -103,9 +92,12 @@ public void simpleRequestWithOrigins() { final HttpResponse response1 = simpleRequest(forOrigins(origins).build(), origin1); assertThat(response1.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), is(origin1)); assertThat(response1.headers().get(ACCESS_CONTROL_ALLOW_HEADERS), is(nullValue())); + assertThat(ReferenceCountUtil.release(response1), is(true)); + final HttpResponse response2 = simpleRequest(forOrigins(origins).build(), origin2); assertThat(response2.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), is(origin2)); assertThat(response2.headers().get(ACCESS_CONTROL_ALLOW_HEADERS), is(nullValue())); + assertThat(ReferenceCountUtil.release(response2), is(true)); } @Test @@ -115,6 +107,7 @@ public void simpleRequestWithNoMatchingOrigin() { forOrigins("https://localhost:8888").build(), origin); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), is(nullValue())); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_HEADERS), is(nullValue())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -127,6 +120,7 @@ public void preflightDeleteRequestWithCustomHeaders() { assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_METHODS), containsString("GET")); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_METHODS), containsString("DELETE")); assertThat(response.headers().get(VARY), equalTo(ORIGIN.toString())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -142,6 +136,7 @@ public void preflightGetRequestWithCustomHeaders() { assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_HEADERS), containsString("content-type")); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_HEADERS), containsString("xheader1")); assertThat(response.headers().get(VARY), equalTo(ORIGIN.toString())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -151,6 +146,7 @@ public void preflightRequestWithDefaultHeaders() { assertThat(response.headers().get(CONTENT_LENGTH), is("0")); assertThat(response.headers().get(DATE), is(notNullValue())); assertThat(response.headers().get(VARY), equalTo(ORIGIN.toString())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -162,6 +158,16 @@ public void preflightRequestWithCustomHeader() { assertThat(response.headers().get(of("CustomHeader")), equalTo("somevalue")); assertThat(response.headers().get(VARY), equalTo(ORIGIN.toString())); assertThat(response.headers().get(CONTENT_LENGTH), is("0")); + assertThat(ReferenceCountUtil.release(response), is(true)); + } + + @Test + public void preflightRequestWithUnauthorizedOrigin() { + final String origin = "http://host"; + final CorsConfig config = forOrigin("http://localhost").build(); + final HttpResponse response = preflightRequest(config, origin, "xheader1"); + assertThat(response.headers().contains(ACCESS_CONTROL_ALLOW_ORIGIN), is(false)); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -175,6 +181,7 @@ public void preflightRequestWithCustomHeaders() { final HttpResponse response = preflightRequest(config, "http://localhost:8888", "content-type, xheader1"); assertValues(response, headerName, value1, value2); assertThat(response.headers().get(VARY), equalTo(ORIGIN.toString())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -188,20 +195,17 @@ public void preflightRequestWithCustomHeadersIterable() { final HttpResponse response = preflightRequest(config, "http://localhost:8888", "content-type, xheader1"); assertValues(response, headerName, value1, value2); assertThat(response.headers().get(VARY), equalTo(ORIGIN.toString())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test public void preflightRequestWithValueGenerator() { final CorsConfig config = forOrigin("http://localhost:8888") - .preflightResponseHeader("GenHeader", new Callable() { - @Override - public String call() throws Exception { - return "generatedValue"; - } - }).build(); + .preflightResponseHeader("GenHeader", () -> "generatedValue").build(); final HttpResponse response = preflightRequest(config, "http://localhost:8888", "content-type, xheader1"); assertThat(response.headers().get(of("GenHeader")), equalTo("generatedValue")); assertThat(response.headers().get(VARY), equalTo(ORIGIN.toString())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -214,6 +218,7 @@ public void preflightRequestWithNullOrigin() { final HttpResponse response = preflightRequest(config, origin, "content-type, xheader1"); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), is(equalTo("null"))); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_CREDENTIALS), is(equalTo("true"))); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -222,6 +227,7 @@ public void preflightRequestAllowCredentials() { final CorsConfig config = forOrigin(origin).allowCredentials().build(); final HttpResponse response = preflightRequest(config, origin, "content-type, xheader1"); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_CREDENTIALS), is(equalTo("true"))); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -230,6 +236,7 @@ public void preflightRequestDoNotAllowCredentials() { final HttpResponse response = preflightRequest(config, "http://localhost:8888", ""); // the only valid value for Access-Control-Allow-Credentials is true. assertThat(response.headers().contains(ACCESS_CONTROL_ALLOW_CREDENTIALS), is(false)); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -239,6 +246,7 @@ public void simpleRequestCustomHeaders() { assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("*")); assertThat(response.headers().get(ACCESS_CONTROL_EXPOSE_HEADERS), containsString("custom1")); assertThat(response.headers().get(ACCESS_CONTROL_EXPOSE_HEADERS), containsString("custom2")); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -246,6 +254,7 @@ public void simpleRequestAllowCredentials() { final CorsConfig config = forAnyOrigin().allowCredentials().build(); final HttpResponse response = simpleRequest(config, "http://localhost:7777"); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_CREDENTIALS), equalTo("true")); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -253,6 +262,7 @@ public void simpleRequestDoNotAllowCredentials() { final CorsConfig config = forAnyOrigin().build(); final HttpResponse response = simpleRequest(config, "http://localhost:7777"); assertThat(response.headers().contains(ACCESS_CONTROL_ALLOW_CREDENTIALS), is(false)); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -262,6 +272,7 @@ public void anyOriginAndAllowCredentialsShouldEchoRequestOrigin() { assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_CREDENTIALS), equalTo("true")); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("http://localhost:7777")); assertThat(response.headers().get(VARY), equalTo(ORIGIN.toString())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -270,6 +281,7 @@ public void simpleRequestExposeHeaders() { final HttpResponse response = simpleRequest(config, "http://localhost:7777"); assertThat(response.headers().get(ACCESS_CONTROL_EXPOSE_HEADERS), containsString("one")); assertThat(response.headers().get(ACCESS_CONTROL_EXPOSE_HEADERS), containsString("two")); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -278,6 +290,7 @@ public void simpleRequestShortCircuit() { final HttpResponse response = simpleRequest(config, "http://localhost:7777"); assertThat(response.status(), is(FORBIDDEN)); assertThat(response.headers().get(CONTENT_LENGTH), is("0")); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -286,6 +299,7 @@ public void simpleRequestNoShortCircuit() { final HttpResponse response = simpleRequest(config, "http://localhost:7777"); assertThat(response.status(), is(OK)); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), is(nullValue())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -294,6 +308,7 @@ public void shortCircuitNonCorsRequest() { final HttpResponse response = simpleRequest(config, null); assertThat(response.status(), is(OK)); assertThat(response.headers().get(ACCESS_CONTROL_ALLOW_ORIGIN), is(nullValue())); + assertThat(ReferenceCountUtil.release(response), is(true)); } @Test @@ -422,6 +437,46 @@ public void forbiddenShouldReleaseRequest() { assertThat(channel.finish(), is(false)); } + @Test + public void differentConfigsPerOrigin() { + String host1 = "http://host1:80"; + String host2 = "http://host2"; + CorsConfig rule1 = forOrigin(host1).allowedRequestMethods(HttpMethod.GET).build(); + CorsConfig rule2 = forOrigin(host2).allowedRequestMethods(HttpMethod.GET, HttpMethod.POST) + .allowCredentials().build(); + + List corsConfigs = Arrays.asList(rule1, rule2); + + final HttpResponse preFlightHost1 = preflightRequest(corsConfigs, host1, "", false); + assertThat(preFlightHost1.headers().get(ACCESS_CONTROL_ALLOW_METHODS), is("GET")); + assertThat(preFlightHost1.headers().getAsString(ACCESS_CONTROL_ALLOW_CREDENTIALS), is(nullValue())); + + final HttpResponse preFlightHost2 = preflightRequest(corsConfigs, host2, "", false); + assertValues(preFlightHost2, ACCESS_CONTROL_ALLOW_METHODS.toString(), "GET", "POST"); + assertThat(preFlightHost2.headers().getAsString(ACCESS_CONTROL_ALLOW_CREDENTIALS), IsEqual.equalTo("true")); + } + + @Test + public void specificConfigPrecedenceOverGeneric() { + String host1 = "http://host1"; + String host2 = "http://host2"; + + CorsConfig forHost1 = forOrigin(host1).allowedRequestMethods(HttpMethod.GET).maxAge(3600L).build(); + CorsConfig allowAll = forAnyOrigin().allowedRequestMethods(HttpMethod.POST, HttpMethod.GET, HttpMethod.OPTIONS) + .maxAge(1800).build(); + + List rules = Arrays.asList(forHost1, allowAll); + + final HttpResponse host1Response = preflightRequest(rules, host1, "", false); + assertThat(host1Response.headers().get(ACCESS_CONTROL_ALLOW_METHODS), is("GET")); + assertThat(host1Response.headers().getAsString(ACCESS_CONTROL_MAX_AGE), equalTo("3600")); + + final HttpResponse host2Response = preflightRequest(rules, host2, "", false); + assertValues(host2Response, ACCESS_CONTROL_ALLOW_METHODS.toString(), "POST", "GET", "OPTIONS"); + assertThat(host2Response.headers().getAsString(ACCESS_CONTROL_ALLOW_ORIGIN), equalTo("*")); + assertThat(host2Response.headers().getAsString(ACCESS_CONTROL_MAX_AGE), equalTo("1800")); + } + private static HttpResponse simpleRequest(final CorsConfig config, final String origin) { return simpleRequest(config, origin, null); } @@ -445,13 +500,22 @@ private static HttpResponse simpleRequest(final CorsConfig config, httpRequest.headers().set(ACCESS_CONTROL_REQUEST_HEADERS, requestHeaders); } assertThat(channel.writeInbound(httpRequest), is(false)); - return (HttpResponse) channel.readOutbound(); + HttpResponse response = channel.readOutbound(); + assertThat(channel.finish(), is(false)); + return response; } private static HttpResponse preflightRequest(final CorsConfig config, final String origin, final String requestHeaders) { - final EmbeddedChannel channel = new EmbeddedChannel(new CorsHandler(config)); + return preflightRequest(Collections.singletonList(config), origin, requestHeaders, config.isShortCircuit()); + } + + private static HttpResponse preflightRequest(final List configs, + final String origin, + final String requestHeaders, + final boolean isSHortCircuit) { + final EmbeddedChannel channel = new EmbeddedChannel(new CorsHandler(configs, isSHortCircuit)); assertThat(channel.writeInbound(optionsRequest(origin, requestHeaders, null)), is(false)); HttpResponse response = channel.readOutbound(); assertThat(channel.finish(), is(false)); @@ -478,7 +542,7 @@ private static FullHttpRequest createHttpRequest(HttpMethod method) { private static class EchoHandler extends SimpleChannelInboundHandler { @Override - public void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { + public void messageReceived(ChannelHandlerContext ctx, Object msg) throws Exception { ctx.writeAndFlush(new DefaultFullHttpResponse(HTTP_1_1, OK, true, true)); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/AbstractDiskHttpDataTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/AbstractDiskHttpDataTest.java new file mode 100644 index 00000000000..e686405306a --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/AbstractDiskHttpDataTest.java @@ -0,0 +1,129 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.multipart; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; +import io.netty.util.internal.PlatformDependent; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.io.FileOutputStream; +import java.nio.charset.Charset; +import java.util.Arrays; +import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; + +import static io.netty.util.CharsetUtil.UTF_8; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; + +/** + * {@link AbstractDiskHttpData} test cases + */ +public class AbstractDiskHttpDataTest { + + @Test + public void testGetChunk() throws Exception { + TestHttpData test = new TestHttpData("test", UTF_8, 0); + try { + File tmpFile = PlatformDependent.createTempFile(UUID.randomUUID().toString(), ".tmp", null); + tmpFile.deleteOnExit(); + FileOutputStream fos = new FileOutputStream(tmpFile); + byte[] bytes = new byte[4096]; + ThreadLocalRandom.current().nextBytes(bytes); + try { + fos.write(bytes); + fos.flush(); + } finally { + fos.close(); + } + test.setContent(tmpFile); + ByteBuf buf1 = test.getChunk(1024); + assertEquals(buf1.readerIndex(), 0); + assertEquals(buf1.writerIndex(), 1024); + ByteBuf buf2 = test.getChunk(1024); + assertEquals(buf2.readerIndex(), 0); + assertEquals(buf2.writerIndex(), 1024); + assertFalse(Arrays.equals(ByteBufUtil.getBytes(buf1), ByteBufUtil.getBytes(buf2)), + "Arrays should not be equal"); + } finally { + test.delete(); + } + } + + private static final class TestHttpData extends AbstractDiskHttpData { + + private TestHttpData(String name, Charset charset, long size) { + super(name, charset, size); + } + + @Override + protected String getDiskFilename() { + return null; + } + + @Override + protected String getPrefix() { + return null; + } + + @Override + protected String getBaseDirectory() { + return null; + } + + @Override + protected String getPostfix() { + return null; + } + + @Override + protected boolean deleteOnExit() { + return false; + } + + @Override + public HttpData copy() { + return null; + } + + @Override + public HttpData duplicate() { + return null; + } + + @Override + public HttpData retainedDuplicate() { + return null; + } + + @Override + public HttpData replace(ByteBuf content) { + return null; + } + + @Override + public HttpDataType getHttpDataType() { + return null; + } + + @Override + public int compareTo(InterfaceHttpData o) { + return 0; + } + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpDataTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpDataTest.java index df4a425de5c..4b906e47dbf 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpDataTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/AbstractMemoryHttpDataTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,19 +16,96 @@ package io.netty.handler.codec.http.multipart; import io.netty.buffer.ByteBuf; -import org.junit.Test; +import io.netty.buffer.ByteBufInputStream; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; + +import io.netty.util.internal.PlatformDependent; +import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; import java.nio.charset.Charset; import java.security.SecureRandom; import java.util.Arrays; import java.util.Random; +import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; import static io.netty.util.CharsetUtil.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; /** {@link AbstractMemoryHttpData} test cases. */ public class AbstractMemoryHttpDataTest { + + @Test + public void testSetContentFromFile() throws Exception { + TestHttpData test = new TestHttpData("test", UTF_8, 0); + try { + File tmpFile = PlatformDependent.createTempFile(UUID.randomUUID().toString(), ".tmp", null); + tmpFile.deleteOnExit(); + FileOutputStream fos = new FileOutputStream(tmpFile); + byte[] bytes = new byte[4096]; + ThreadLocalRandom.current().nextBytes(bytes); + try { + fos.write(bytes); + fos.flush(); + } finally { + fos.close(); + } + test.setContent(tmpFile); + ByteBuf buf = test.getByteBuf(); + assertEquals(buf.readerIndex(), 0); + assertEquals(buf.writerIndex(), bytes.length); + assertArrayEquals(bytes, test.get()); + assertArrayEquals(bytes, ByteBufUtil.getBytes(buf)); + } finally { + //release the ByteBuf + test.delete(); + } + } + + @Test + public void testRenameTo() throws Exception { + TestHttpData test = new TestHttpData("test", UTF_8, 0); + try { + File tmpFile = PlatformDependent.createTempFile(UUID.randomUUID().toString(), ".tmp", null); + tmpFile.deleteOnExit(); + final int totalByteCount = 4096; + byte[] bytes = new byte[totalByteCount]; + ThreadLocalRandom.current().nextBytes(bytes); + ByteBuf content = Unpooled.wrappedBuffer(bytes); + test.setContent(content); + boolean succ = test.renameTo(tmpFile); + assertTrue(succ); + FileInputStream fis = new FileInputStream(tmpFile); + try { + byte[] buf = new byte[totalByteCount]; + int count = 0; + int offset = 0; + int size = totalByteCount; + while ((count = fis.read(buf, offset, size)) > 0) { + offset += count; + size -= count; + if (offset >= totalByteCount || size <= 0) { + break; + } + } + assertArrayEquals(bytes, buf); + assertEquals(0, fis.available()); + } finally { + fis.close(); + } + } finally { + //release the ByteBuf in AbstractMemoryHttpData + test.delete(); + } + } /** * Provide content into HTTP data with input stream. * @@ -36,6 +113,20 @@ public class AbstractMemoryHttpDataTest { */ @Test public void testSetContentFromStream() throws Exception { + // definedSize=0 + TestHttpData test = new TestHttpData("test", UTF_8, 0); + String contentStr = "foo_test"; + ByteBuf buf = Unpooled.wrappedBuffer(contentStr.getBytes(UTF_8)); + int readerIndex = buf.readerIndex(); + + try (ByteBufInputStream is = new ByteBufInputStream(buf)) { + test.setContent(is); + assertFalse(buf.isReadable()); + assertEquals(test.getString(UTF_8), contentStr); + buf.readerIndex(readerIndex); + assertTrue(ByteBufUtil.equals(buf, test.getByteBuf())); + } + Random random = new SecureRandom(); for (int i = 0; i < 20; i++) { @@ -56,6 +147,7 @@ public void testSetContentFromStream() throws Exception { assertEquals(0, buffer.readerIndex()); assertEquals(bytes.length, buffer.writerIndex()); assertArrayEquals(bytes, Arrays.copyOf(buffer.array(), bytes.length)); + assertArrayEquals(bytes, data.get()); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactoryTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactoryTest.java index 872686922dd..9e6c2e17e47 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactoryTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DefaultHttpDataFactoryTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,20 +18,20 @@ import io.netty.buffer.Unpooled; import io.netty.handler.codec.http.DefaultHttpRequest; import io.netty.handler.codec.http.HttpRequest; -import org.junit.After; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; +import org.junit.jupiter.api.AfterEach; +import org.junit.jupiter.api.BeforeAll; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import static io.netty.handler.codec.http.HttpHeaderValues.IDENTITY; import static io.netty.handler.codec.http.HttpMethod.POST; import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; import static io.netty.handler.codec.http.multipart.HttpPostBodyUtil.DEFAULT_TEXT_CONTENT_TYPE; import static io.netty.util.CharsetUtil.UTF_8; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class DefaultHttpDataFactoryTest { // req1 equals req2 @@ -40,23 +40,38 @@ public class DefaultHttpDataFactoryTest { private DefaultHttpDataFactory factory; - @BeforeClass + @BeforeAll public static void assertReq1EqualsReq2() { // Before doing anything, assert that the requests are equal assertEquals(req1.hashCode(), req2.hashCode()); assertTrue(req1.equals(req2)); } - @Before + @BeforeEach public void setupFactory() { factory = new DefaultHttpDataFactory(); } - @After + @AfterEach public void cleanupFactory() { factory.cleanAllHttpData(); } + @Test + public void customBaseDirAndDeleteOnExit() { + final DefaultHttpDataFactory defaultHttpDataFactory = new DefaultHttpDataFactory(true); + final String dir = "target/DefaultHttpDataFactoryTest/customBaseDirAndDeleteOnExit"; + defaultHttpDataFactory.setBaseDir(dir); + defaultHttpDataFactory.setDeleteOnExit(true); + final Attribute attr = defaultHttpDataFactory.createAttribute(req1, "attribute1"); + final FileUpload fu = defaultHttpDataFactory.createFileUpload( + req1, "attribute1", "f.txt", "text/plain", null, null, 0); + assertEquals(dir, DiskAttribute.class.cast(attr).getBaseDirectory()); + assertEquals(dir, DiskFileUpload.class.cast(fu).getBaseDirectory()); + assertTrue(DiskAttribute.class.cast(attr).deleteOnExit()); + assertTrue(DiskFileUpload.class.cast(fu).deleteOnExit()); + } + @Test public void cleanRequestHttpDataShouldIdentifiesRequestsByTheirIdentities() throws Exception { // Create some data belonging to req1 and req2 diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DeleteFileOnExitHookTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DeleteFileOnExitHookTest.java new file mode 100644 index 00000000000..fb1799f2040 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DeleteFileOnExitHookTest.java @@ -0,0 +1,83 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.multipart; + +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.HttpRequest; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.io.FilenameFilter; +import java.io.IOException; + +import static io.netty.handler.codec.http.HttpMethod.POST; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Test DeleteFileOnExitHook + */ +public class DeleteFileOnExitHookTest { + private static final HttpRequest REQUEST = new DefaultHttpRequest(HTTP_1_1, POST, "/form"); + private static final String HOOK_TEST_TMP = "target/DeleteFileOnExitHookTest/tmp"; + private FileUpload fu; + + @BeforeEach + public void setUp() throws IOException { + DefaultHttpDataFactory defaultHttpDataFactory = new DefaultHttpDataFactory(true); + defaultHttpDataFactory.setBaseDir(HOOK_TEST_TMP); + defaultHttpDataFactory.setDeleteOnExit(true); + + File baseDir = new File(HOOK_TEST_TMP); + baseDir.mkdirs(); // we don't need to clean it since it is in volatile files anyway + + fu = defaultHttpDataFactory.createFileUpload( + REQUEST, "attribute1", "tmp_f.txt", "text/plain", null, null, 0); + fu.setContent(Unpooled.wrappedBuffer(new byte[]{1, 2, 3, 4})); + + assertTrue(fu.getFile().exists()); + } + + @Test + public void testSimulateTriggerDeleteFileOnExitHook() { + + // simulate app exit + DeleteFileOnExitHook.runHook(); + + File[] files = new File(HOOK_TEST_TMP).listFiles(new FilenameFilter() { + @Override + public boolean accept(File dir, String name) { + return name.startsWith(DiskFileUpload.prefix); + } + }); + + assertEquals(0, files.length); + } + + @Test + public void testAfterHttpDataReleaseCheckFileExist() throws IOException { + + String filePath = fu.getFile().getPath(); + assertTrue(DeleteFileOnExitHook.checkFileExist(filePath)); + + fu.release(); + assertFalse(DeleteFileOnExitHook.checkFileExist(filePath)); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DiskFileUploadTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DiskFileUploadTest.java index fa5661e379d..96e101eecb3 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DiskFileUploadTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/DiskFileUploadTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,15 +15,285 @@ */ package io.netty.handler.codec.http.multipart; -import org.junit.Assert; -import org.junit.Test; +import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufInputStream; +import io.netty.buffer.ByteBufUtil; +import io.netty.buffer.Unpooled; +import io.netty.util.CharsetUtil; + +import io.netty.util.internal.PlatformDependent; +import org.junit.jupiter.api.Test; + +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.util.UUID; +import java.util.concurrent.ThreadLocalRandom; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class DiskFileUploadTest { + @Test + public void testSpecificCustomBaseDir() throws IOException { + File baseDir = new File("target/DiskFileUploadTest/testSpecificCustomBaseDir"); + baseDir.mkdirs(); // we don't need to clean it since it is in volatile files anyway + DiskFileUpload f = + new DiskFileUpload("d1", "d1", "application/json", null, null, 100, + baseDir.getAbsolutePath(), false); + + f.setContent(Unpooled.EMPTY_BUFFER); + + assertTrue(f.getFile().getAbsolutePath().startsWith(baseDir.getAbsolutePath())); + assertTrue(f.getFile().exists()); + assertEquals(0, f.getFile().length()); + f.delete(); + } @Test public final void testDiskFileUploadEquals() { DiskFileUpload f2 = new DiskFileUpload("d1", "d1", "application/json", null, null, 100); - Assert.assertEquals(f2, f2); + assertEquals(f2, f2); + f2.delete(); + } + + @Test + public void testEmptyBufferSetMultipleTimes() throws IOException { + DiskFileUpload f = + new DiskFileUpload("d1", "d1", "application/json", null, null, 100); + + f.setContent(Unpooled.EMPTY_BUFFER); + + assertTrue(f.getFile().exists()); + assertEquals(0, f.getFile().length()); + f.setContent(Unpooled.EMPTY_BUFFER); + assertTrue(f.getFile().exists()); + assertEquals(0, f.getFile().length()); + f.delete(); + } + + @Test + public void testEmptyBufferSetAfterNonEmptyBuffer() throws IOException { + DiskFileUpload f = + new DiskFileUpload("d1", "d1", "application/json", null, null, 100); + + f.setContent(Unpooled.wrappedBuffer(new byte[] { 1, 2, 3, 4 })); + + assertTrue(f.getFile().exists()); + assertEquals(4, f.getFile().length()); + f.setContent(Unpooled.EMPTY_BUFFER); + assertTrue(f.getFile().exists()); + assertEquals(0, f.getFile().length()); + f.delete(); + } + + @Test + public void testNonEmptyBufferSetMultipleTimes() throws IOException { + DiskFileUpload f = + new DiskFileUpload("d1", "d1", "application/json", null, null, 100); + + f.setContent(Unpooled.wrappedBuffer(new byte[] { 1, 2, 3, 4 })); + + assertTrue(f.getFile().exists()); + assertEquals(4, f.getFile().length()); + f.setContent(Unpooled.wrappedBuffer(new byte[] { 1, 2})); + assertTrue(f.getFile().exists()); + assertEquals(2, f.getFile().length()); + f.delete(); + } + + @Test + public void testAddContents() throws Exception { + DiskFileUpload f1 = new DiskFileUpload("file1", "file1", "application/json", null, null, 0); + try { + byte[] jsonBytes = new byte[4096]; + ThreadLocalRandom.current().nextBytes(jsonBytes); + + f1.addContent(Unpooled.wrappedBuffer(jsonBytes, 0, 1024), false); + f1.addContent(Unpooled.wrappedBuffer(jsonBytes, 1024, jsonBytes.length - 1024), true); + assertArrayEquals(jsonBytes, f1.get()); + + File file = f1.getFile(); + assertEquals(jsonBytes.length, file.length()); + + FileInputStream fis = new FileInputStream(file); + try { + byte[] buf = new byte[jsonBytes.length]; + int offset = 0; + int read = 0; + int len = buf.length; + while ((read = fis.read(buf, offset, len)) > 0) { + len -= read; + offset += read; + if (len <= 0 || offset >= buf.length) { + break; + } + } + assertArrayEquals(jsonBytes, buf); + } finally { + fis.close(); + } + } finally { + f1.delete(); + } + } + + @Test + public void testSetContentFromByteBuf() throws Exception { + DiskFileUpload f1 = new DiskFileUpload("file2", "file2", "application/json", null, null, 0); + try { + String json = "{\"hello\":\"world\"}"; + byte[] bytes = json.getBytes(CharsetUtil.UTF_8); + f1.setContent(Unpooled.wrappedBuffer(bytes)); + assertEquals(json, f1.getString()); + assertArrayEquals(bytes, f1.get()); + File file = f1.getFile(); + assertEquals((long) bytes.length, file.length()); + assertArrayEquals(bytes, doReadFile(file, bytes.length)); + } finally { + f1.delete(); + } + } + + @Test + public void testSetContentFromInputStream() throws Exception { + String json = "{\"hello\":\"world\",\"foo\":\"bar\"}"; + DiskFileUpload f1 = new DiskFileUpload("file3", "file3", "application/json", null, null, 0); + try { + byte[] bytes = json.getBytes(CharsetUtil.UTF_8); + ByteBuf buf = Unpooled.wrappedBuffer(bytes); + InputStream is = new ByteBufInputStream(buf); + try { + f1.setContent(is); + assertEquals(json, f1.getString()); + assertArrayEquals(bytes, f1.get()); + File file = f1.getFile(); + assertEquals((long) bytes.length, file.length()); + assertArrayEquals(bytes, doReadFile(file, bytes.length)); + } finally { + buf.release(); + is.close(); + } + } finally { + f1.delete(); + } + } + + @Test + public void testAddContentFromByteBuf() throws Exception { + testAddContentFromByteBuf0(false); + } + + @Test + public void testAddContentFromCompositeByteBuf() throws Exception { + testAddContentFromByteBuf0(true); + } + + private static void testAddContentFromByteBuf0(boolean composite) throws Exception { + DiskFileUpload f1 = new DiskFileUpload("file3", "file3", "application/json", null, null, 0); + try { + byte[] bytes = new byte[4096]; + ThreadLocalRandom.current().nextBytes(bytes); + + final ByteBuf buffer; + + if (composite) { + buffer = Unpooled.compositeBuffer() + .addComponent(true, Unpooled.wrappedBuffer(bytes, 0 , bytes.length / 2)) + .addComponent(true, Unpooled.wrappedBuffer(bytes, bytes.length / 2, bytes.length / 2)); + } else { + buffer = Unpooled.wrappedBuffer(bytes); + } + f1.addContent(buffer, true); + ByteBuf buf = f1.getByteBuf(); + assertEquals(buf.readerIndex(), 0); + assertEquals(buf.writerIndex(), bytes.length); + assertArrayEquals(bytes, ByteBufUtil.getBytes(buf)); + } finally { + //release the ByteBuf + f1.delete(); + } + } + + private static byte[] doReadFile(File file, int maxRead) throws Exception { + FileInputStream fis = new FileInputStream(file); + try { + byte[] buf = new byte[maxRead]; + int offset = 0; + int read = 0; + int len = buf.length; + while ((read = fis.read(buf, offset, len)) > 0) { + len -= read; + offset += read; + if (len <= 0 || offset >= buf.length) { + break; + } + } + return buf; + } finally { + fis.close(); + } + } + + @Test + public void testDelete() throws Exception { + String json = "{\"foo\":\"bar\"}"; + byte[] bytes = json.getBytes(CharsetUtil.UTF_8); + File tmpFile = null; + DiskFileUpload f1 = new DiskFileUpload("file4", "file4", "application/json", null, null, 0); + try { + assertNull(f1.getFile()); + f1.setContent(Unpooled.wrappedBuffer(bytes)); + assertNotNull(tmpFile = f1.getFile()); + } finally { + f1.delete(); + assertNull(f1.getFile()); + assertNotNull(tmpFile); + assertFalse(tmpFile.exists()); + } + } + + @Test + public void setSetContentFromFileExceptionally() throws Exception { + final long maxSize = 4; + DiskFileUpload f1 = new DiskFileUpload("file5", "file5", "application/json", null, null, 0); + f1.setMaxSize(maxSize); + try { + f1.setContent(Unpooled.wrappedBuffer(new byte[(int) maxSize])); + File originalFile = f1.getFile(); + assertNotNull(originalFile); + assertEquals(maxSize, originalFile.length()); + assertEquals(maxSize, f1.length()); + byte[] bytes = new byte[8]; + + ThreadLocalRandom.current().nextBytes(bytes); + File tmpFile = PlatformDependent.createTempFile(UUID.randomUUID().toString(), ".tmp", null); + tmpFile.deleteOnExit(); + FileOutputStream fos = new FileOutputStream(tmpFile); + try { + fos.write(bytes); + fos.flush(); + } finally { + fos.close(); + } + try { + f1.setContent(tmpFile); + fail("should not reach here!"); + } catch (IOException e) { + assertNotNull(f1.getFile()); + assertEquals(originalFile, f1.getFile()); + assertEquals(maxSize, f1.length()); + } + } finally { + f1.delete(); + } } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java new file mode 100644 index 00000000000..fb7e08202dd --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostMultiPartRequestDecoderTest.java @@ -0,0 +1,361 @@ +/* + * Copyright 2021 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.multipart; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpContent; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.DefaultLastHttpContent; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.HttpConstants; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.util.CharsetUtil; +import org.junit.jupiter.api.Test; + +import java.io.IOException; +import java.util.Arrays; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public class HttpPostMultiPartRequestDecoderTest { + + @Test + public void testDecodeFullHttpRequestWithNoContentTypeHeader() { + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + try { + new HttpPostMultipartRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException expected) { + // expected + } finally { + assertTrue(req.release()); + } + } + + @Test + public void testDecodeFullHttpRequestWithInvalidCharset() { + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + req.headers().set(HttpHeaderNames.CONTENT_TYPE, + "multipart/form-data; boundary=--89421926422648 [; charset=UTF-8]"); + + try { + new HttpPostMultipartRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException expected) { + // expected + } finally { + assertTrue(req.release()); + } + } + + @Test + public void testDecodeFullHttpRequestWithInvalidPayloadReleaseBuffer() { + String content = "\n--861fbeab-cd20-470c-9609-d40a0f704466\n" + + "Content-Disposition: form-data; name=\"image1\"; filename*=\"'some.jpeg\"\n" + + "Content-Type: image/jpeg\n" + + "Content-Length: 1\n" + + "x\n" + + "--861fbeab-cd20-470c-9609-d40a0f704466--\n"; + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload", + Unpooled.copiedBuffer(content, CharsetUtil.US_ASCII)); + req.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); + req.headers().set("content-length", content.length()); + + try { + new HttpPostMultipartRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException expected) { + // expected + } finally { + assertTrue(req.release()); + } + } + + @Test + public void testDelimiterExceedLeftSpaceInCurrentBuffer() { + String delimiter = "--861fbeab-cd20-470c-9609-d40a0f704466"; + String suffix = '\n' + delimiter + "--\n"; + byte[] bsuffix = suffix.getBytes(CharsetUtil.UTF_8); + int partOfDelimiter = bsuffix.length / 2; + int bytesLastChunk = 355 - partOfDelimiter; // to try to have an out of bound since content is > delimiter + byte[] bsuffix1 = Arrays.copyOf(bsuffix, partOfDelimiter); + byte[] bsuffix2 = Arrays.copyOfRange(bsuffix, partOfDelimiter, bsuffix.length); + String prefix = delimiter + "\n" + + "Content-Disposition: form-data; name=\"image\"; filename=\"guangzhou.jpeg\"\n" + + "Content-Type: image/jpeg\n" + + "Content-Length: " + bytesLastChunk + "\n\n"; + HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload"); + request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); + request.headers().set("content-length", prefix.length() + bytesLastChunk + suffix.length()); + + // Factory using Memory mode + HttpDataFactory factory = new DefaultHttpDataFactory(false); + HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); + ByteBuf buf = Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8)); + decoder.offer(new DefaultHttpContent(buf)); + assertNotNull((HttpData) decoder.currentPartialHttpData()); + buf.release(); + // Chunk less than Delimiter size but containing part of delimiter + byte[] body = new byte[bytesLastChunk + bsuffix1.length]; + Arrays.fill(body, (byte) 2); + for (int i = 0; i < bsuffix1.length; i++) { + body[bytesLastChunk + i] = bsuffix1[i]; + } + ByteBuf content = Unpooled.wrappedBuffer(body); + decoder.offer(new DefaultHttpContent(content)); // Ouf of range before here + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); + content.release(); + content = Unpooled.wrappedBuffer(bsuffix2); + decoder.offer(new DefaultHttpContent(content)); + assertNull((HttpData) decoder.currentPartialHttpData()); + content.release(); + decoder.offer(new DefaultLastHttpContent()); + FileUpload data = (FileUpload) decoder.getBodyHttpDatas().get(0); + assertEquals(data.length(), bytesLastChunk); + assertEquals(true, data.isInMemory()); + + InterfaceHttpData[] httpDatas = decoder.getBodyHttpDatas().toArray(new InterfaceHttpData[0]); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals(1, httpData.refCnt(), "Before cleanAllHttpData should be 1"); + } + factory.cleanAllHttpData(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals(1, httpData.refCnt(), "After cleanAllHttpData should be 1 if in Memory"); + } + decoder.destroy(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals(0, httpData.refCnt(), "RefCnt should be 0"); + } + } + + private void commonTestBigFileDelimiterInMiddleChunk(HttpDataFactory factory, boolean inMemory) + throws IOException { + int nbChunks = 100; + int bytesPerChunk = 100000; + int bytesLastChunk = 10000; + int fileSize = bytesPerChunk * nbChunks + bytesLastChunk; // set Xmx to a number lower than this and it crashes + + String delimiter = "--861fbeab-cd20-470c-9609-d40a0f704466"; + String prefix = delimiter + "\n" + + "Content-Disposition: form-data; name=\"image\"; filename=\"guangzhou.jpeg\"\n" + + "Content-Type: image/jpeg\n" + + "Content-Length: " + fileSize + "\n" + + "\n"; + + String suffix1 = "\n" + + "--861fbeab-"; + String suffix2 = "cd20-470c-9609-d40a0f704466--\n"; + String suffix = suffix1 + suffix2; + + HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload"); + request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); + request.headers().set("content-length", prefix.length() + fileSize + suffix.length()); + + HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); + ByteBuf buf = Unpooled.wrappedBuffer(prefix.getBytes(CharsetUtil.UTF_8)); + decoder.offer(new DefaultHttpContent(buf)); + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); + buf.release(); + + byte[] body = new byte[bytesPerChunk]; + Arrays.fill(body, (byte) 1); + // Set first bytes as CRLF to ensure it is correctly getting the last CRLF + body[0] = HttpConstants.CR; + body[1] = HttpConstants.LF; + for (int i = 0; i < nbChunks; i++) { + ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerChunk); + decoder.offer(new DefaultHttpContent(content)); // **OutOfMemory previously here** + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); + content.release(); + } + + byte[] bsuffix1 = suffix1.getBytes(CharsetUtil.UTF_8); + byte[] previousLastbody = new byte[bytesLastChunk - bsuffix1.length]; + byte[] bdelimiter = delimiter.getBytes(CharsetUtil.UTF_8); + byte[] lastbody = new byte[2 * bsuffix1.length]; + Arrays.fill(previousLastbody, (byte) 1); + previousLastbody[0] = HttpConstants.CR; + previousLastbody[1] = HttpConstants.LF; + Arrays.fill(lastbody, (byte) 1); + // put somewhere a not valid delimiter + for (int i = 0; i < bdelimiter.length; i++) { + previousLastbody[i + 10] = bdelimiter[i]; + } + lastbody[0] = HttpConstants.CR; + lastbody[1] = HttpConstants.LF; + for (int i = 0; i < bsuffix1.length; i++) { + lastbody[bsuffix1.length + i] = bsuffix1[i]; + } + + ByteBuf content2 = Unpooled.wrappedBuffer(previousLastbody, 0, previousLastbody.length); + decoder.offer(new DefaultHttpContent(content2)); + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); + content2.release(); + content2 = Unpooled.wrappedBuffer(lastbody, 0, lastbody.length); + decoder.offer(new DefaultHttpContent(content2)); + assertNotNull(((HttpData) decoder.currentPartialHttpData()).content()); + content2.release(); + content2 = Unpooled.wrappedBuffer(suffix2.getBytes(CharsetUtil.UTF_8)); + decoder.offer(new DefaultHttpContent(content2)); + assertNull(decoder.currentPartialHttpData()); + content2.release(); + decoder.offer(new DefaultLastHttpContent()); + + FileUpload data = (FileUpload) decoder.getBodyHttpDatas().get(0); + assertEquals(data.length(), fileSize); + assertEquals(inMemory, data.isInMemory()); + if (data.isInMemory()) { + // To be done only if not inMemory: assertEquals(data.get().length, fileSize); + assertFalse(data.getByteBuf().capacity() < 1024 * 1024, + "Capacity should be higher than 1M"); + } + assertTrue(decoder.getCurrentAllocatedCapacity() < 1024 * 1024, + "Capacity should be less than 1M"); + InterfaceHttpData[] httpDatas = decoder.getBodyHttpDatas().toArray(new InterfaceHttpData[0]); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals(1, httpData.refCnt(), "Before cleanAllHttpData should be 1"); + } + factory.cleanAllHttpData(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals(inMemory? 1 : 0, httpData.refCnt(), "After cleanAllHttpData should be 1 if in Memory"); + } + decoder.destroy(); + for (InterfaceHttpData httpData : httpDatas) { + assertEquals(0, httpData.refCnt(), "RefCnt should be 0"); + } + } + + @Test + public void testBIgFileUploadDelimiterInMiddleChunkDecoderDiskFactory() throws IOException { + // Factory using Disk mode + HttpDataFactory factory = new DefaultHttpDataFactory(true); + + commonTestBigFileDelimiterInMiddleChunk(factory, false); + } + + @Test + public void testBIgFileUploadDelimiterInMiddleChunkDecoderMemoryFactory() throws IOException { + // Factory using Memory mode + HttpDataFactory factory = new DefaultHttpDataFactory(false); + + commonTestBigFileDelimiterInMiddleChunk(factory, true); + } + + @Test + public void testBIgFileUploadDelimiterInMiddleChunkDecoderMixedFactory() throws IOException { + // Factory using Mixed mode, where file shall be on Disk + HttpDataFactory factory = new DefaultHttpDataFactory(10000); + + commonTestBigFileDelimiterInMiddleChunk(factory, false); + } + + @Test + public void testNotBadReleaseBuffersDuringDecodingDiskFactory() throws IOException { + // Using Disk Factory + HttpDataFactory factory = new DefaultHttpDataFactory(true); + commonNotBadReleaseBuffersDuringDecoding(factory, false); + } + @Test + public void testNotBadReleaseBuffersDuringDecodingMemoryFactory() throws IOException { + // Using Memory Factory + HttpDataFactory factory = new DefaultHttpDataFactory(false); + commonNotBadReleaseBuffersDuringDecoding(factory, true); + } + @Test + public void testNotBadReleaseBuffersDuringDecodingMixedFactory() throws IOException { + // Using Mixed Factory + HttpDataFactory factory = new DefaultHttpDataFactory(100); + commonNotBadReleaseBuffersDuringDecoding(factory, false); + } + + private void commonNotBadReleaseBuffersDuringDecoding(HttpDataFactory factory, boolean inMemory) + throws IOException { + int nbItems = 20; + int bytesPerItem = 1000; + int maxMemory = 500; + + String prefix1 = "\n--861fbeab-cd20-470c-9609-d40a0f704466\n" + + "Content-Disposition: form-data; name=\"image"; + String prefix2 = + "\"; filename=\"guangzhou.jpeg\"\n" + + "Content-Type: image/jpeg\n" + + "Content-Length: " + bytesPerItem + "\n" + "\n"; + + String suffix = "\n--861fbeab-cd20-470c-9609-d40a0f704466--\n"; + + HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/upload"); + request.headers().set("content-type", "multipart/form-data; boundary=861fbeab-cd20-470c-9609-d40a0f704466"); + request.headers().set("content-length", nbItems * (prefix1.length() + prefix2.length() + 2 + bytesPerItem) + + suffix.length()); + HttpPostMultipartRequestDecoder decoder = new HttpPostMultipartRequestDecoder(factory, request); + decoder.setDiscardThreshold(maxMemory); + for (int rank = 0; rank < nbItems; rank++) { + byte[] bp1 = prefix1.getBytes(CharsetUtil.UTF_8); + byte[] bp2 = prefix2.getBytes(CharsetUtil.UTF_8); + byte[] prefix = new byte[bp1.length + 2 + bp2.length]; + for (int i = 0; i < bp1.length; i++) { + prefix[i] = bp1[i]; + } + byte[] brank = Integer.toString(10 + rank).getBytes(CharsetUtil.UTF_8); + prefix[bp1.length] = brank[0]; + prefix[bp1.length + 1] = brank[1]; + for (int i = 0; i < bp2.length; i++) { + prefix[bp1.length + 2 + i] = bp2[i]; + } + ByteBuf buf = Unpooled.wrappedBuffer(prefix); + decoder.offer(new DefaultHttpContent(buf)); + buf.release(); + byte[] body = new byte[bytesPerItem]; + Arrays.fill(body, (byte) rank); + ByteBuf content = Unpooled.wrappedBuffer(body, 0, bytesPerItem); + decoder.offer(new DefaultHttpContent(content)); + content.release(); + } + byte[] lastbody = suffix.getBytes(CharsetUtil.UTF_8); + ByteBuf content2 = Unpooled.wrappedBuffer(lastbody, 0, lastbody.length); + decoder.offer(new DefaultHttpContent(content2)); + content2.release(); + decoder.offer(new DefaultLastHttpContent()); + + for (int rank = 0; rank < nbItems; rank++) { + FileUpload data = (FileUpload) decoder.getBodyHttpData("image" + (10 + rank)); + assertEquals(data.length(), bytesPerItem); + assertEquals(inMemory, data.isInMemory()); + byte[] body = new byte[bytesPerItem]; + Arrays.fill(body, (byte) rank); + assertTrue(Arrays.equals(body, data.get())); + } + // To not be done since will load full file on memory: assertEquals(data.get().length, fileSize); + // Not mandatory since implicitely called during destroy of decoder + for (InterfaceHttpData httpData: decoder.getBodyHttpDatas()) { + httpData.release(); + factory.removeHttpDataFromClean(request, httpData); + } + factory.cleanAllHttpData(); + decoder.destroy(); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java index f8b756ca70a..89c03d9800c 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -24,26 +24,30 @@ import io.netty.handler.codec.http.DefaultHttpContent; import io.netty.handler.codec.http.DefaultHttpRequest; import io.netty.handler.codec.http.DefaultLastHttpContent; -import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpVersion; import io.netty.handler.codec.http.LastHttpContent; import io.netty.util.CharsetUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.net.URLEncoder; import java.nio.charset.UnsupportedCharsetException; import java.util.Arrays; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; -/** {@link HttpPostRequestDecoder} test case. */ +/** + * {@link HttpPostRequestDecoder} test case. + */ public class HttpPostRequestDecoderTest { @Test @@ -78,16 +82,17 @@ private static void testBinaryStreamUpload(boolean withSpace) throws Exception { for (String data : Arrays.asList("", "\r", "\r\r", "\r\r\r")) { final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename=\"tmp-0.txt\"\r\n" + - "Content-Type: image/gif\r\n" + - "\r\n" + - data + "\r\n" + - "--" + boundary + "--\r\n"; + "Content-Disposition: form-data; name=\"file\"; filename=\"tmp-0.txt\"\r\n" + + "Content-Type: image/gif\r\n" + + "\r\n" + + data + "\r\n" + + "--" + boundary + "--\r\n"; // Create decoder instance to test. final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); - decoder.offer(new DefaultHttpContent(Unpooled.copiedBuffer(body, CharsetUtil.UTF_8))); + ByteBuf buf = Unpooled.copiedBuffer(body, CharsetUtil.UTF_8); + decoder.offer(new DefaultHttpContent(buf)); decoder.offer(new DefaultHttpContent(Unpooled.EMPTY_BUFFER)); // Validate it's enough chunks to decode upload. @@ -97,10 +102,11 @@ private static void testBinaryStreamUpload(boolean withSpace) throws Exception { MemoryFileUpload upload = (MemoryFileUpload) decoder.next(); // Validate data has been parsed correctly as it was passed into request. - assertEquals("Invalid decoded data [data=" + data.replaceAll("\r", "\\\\r") + ", upload=" + upload + ']', - data, upload.getString(CharsetUtil.UTF_8)); + assertEquals(data, upload.getString(CharsetUtil.UTF_8), + "Invalid decoded data [data=" + data.replaceAll("\r", "\\\\r") + ", upload=" + upload + ']'); upload.release(); decoder.destroy(); + buf.release(); } } @@ -134,6 +140,7 @@ public void testFullHttpRequestUpload() throws Exception { final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); decoder.destroy(); + assertTrue(req.release()); } // See https://github.com/netty/netty/issues/2544 @@ -180,6 +187,7 @@ public void testMultipartCodecWithCRasEndOfAttribute() throws Exception { assertEquals(datas[i].getBytes(CharsetUtil.UTF_8).length, datar.length); decoder.destroy(); + assertTrue(req.release()); } } @@ -213,6 +221,7 @@ public void testQuotedBoundary() throws Exception { final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); decoder.destroy(); + assertTrue(req.release()); } // See https://github.com/netty/netty/issues/1848 @@ -223,28 +232,28 @@ public void testNoZeroOut() throws Exception { final DefaultHttpDataFactory aMemFactory = new DefaultHttpDataFactory(false); DefaultHttpRequest aRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost"); + HttpMethod.POST, + "http://localhost"); aRequest.headers().set(HttpHeaderNames.CONTENT_TYPE, - "multipart/form-data; boundary=" + boundary); + "multipart/form-data; boundary=" + boundary); aRequest.headers().set(HttpHeaderNames.TRANSFER_ENCODING, - HttpHeaderValues.CHUNKED); + HttpHeaderValues.CHUNKED); HttpPostRequestDecoder aDecoder = new HttpPostRequestDecoder(aMemFactory, aRequest); final String aData = "some data would be here. the data should be long enough that it " + - "will be longer than the original buffer length of 256 bytes in " + - "the HttpPostRequestDecoder in order to trigger the issue. Some more " + - "data just to be on the safe side."; + "will be longer than the original buffer length of 256 bytes in " + + "the HttpPostRequestDecoder in order to trigger the issue. Some more " + + "data just to be on the safe side."; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"root\"\r\n" + - "Content-Type: text/plain\r\n" + - "\r\n" + - aData + - "\r\n" + - "--" + boundary + "--\r\n"; + "Content-Disposition: form-data; name=\"root\"\r\n" + + "Content-Type: text/plain\r\n" + + "\r\n" + + aData + + "\r\n" + + "--" + boundary + "--\r\n"; byte[] aBytes = body.getBytes(); @@ -262,7 +271,7 @@ public void testNoZeroOut() throws Exception { aDecoder.offer(LastHttpContent.EMPTY_LAST_CONTENT); - assertTrue("Should have a piece of data", aDecoder.hasNext()); + assertTrue(aDecoder.hasNext(), "Should have a piece of data"); InterfaceHttpData aDecodedData = aDecoder.next(); assertEquals(InterfaceHttpData.HttpDataType.Attribute, aDecodedData.getHttpDataType()); @@ -272,12 +281,14 @@ public void testNoZeroOut() throws Exception { aDecodedData.release(); aDecoder.destroy(); + aSmallBuf.release(); + aLargeBuf.release(); } // See https://github.com/netty/netty/issues/2305 @Test public void testChunkCorrect() throws Exception { - String payload = "town=794649819&town=784444184&town=794649672&town=794657800&town=" + + String payload = "town=794649819&town=784444184&town=794649672&town=794657800&town=" + "794655734&town=794649377&town=794652136&town=789936338&town=789948986&town=" + "789949643&town=786358677&town=794655880&town=786398977&town=789901165&town=" + "789913325&town=789903418&town=789903579&town=794645251&town=794694126&town=" + @@ -306,26 +317,44 @@ public void testChunkCorrect() throws Exception { "789958999&town=789961555&town=794694050&town=794650241&town=794656286&town=" + "794692081&town=794660090&town=794665227&town=794665136&town=794669931"; DefaultHttpRequest defaultHttpRequest = - new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(defaultHttpRequest); int firstChunk = 10; int middleChunk = 1024; - HttpContent part1 = new DefaultHttpContent(Unpooled.wrappedBuffer( - payload.substring(0, firstChunk).getBytes())); - HttpContent part2 = new DefaultHttpContent(Unpooled.wrappedBuffer( - payload.substring(firstChunk, firstChunk + middleChunk).getBytes())); - HttpContent part3 = new DefaultHttpContent(Unpooled.wrappedBuffer( - payload.substring(firstChunk + middleChunk, firstChunk + middleChunk * 2).getBytes())); - HttpContent part4 = new DefaultHttpContent(Unpooled.wrappedBuffer( - payload.substring(firstChunk + middleChunk * 2).getBytes())); + byte[] payload1 = payload.substring(0, firstChunk).getBytes(); + byte[] payload2 = payload.substring(firstChunk, firstChunk + middleChunk).getBytes(); + byte[] payload3 = payload.substring(firstChunk + middleChunk, firstChunk + middleChunk * 2).getBytes(); + byte[] payload4 = payload.substring(firstChunk + middleChunk * 2).getBytes(); - decoder.offer(part1); - decoder.offer(part2); - decoder.offer(part3); - decoder.offer(part4); + ByteBuf buf1 = Unpooled.directBuffer(payload1.length); + ByteBuf buf2 = Unpooled.directBuffer(payload2.length); + ByteBuf buf3 = Unpooled.directBuffer(payload3.length); + ByteBuf buf4 = Unpooled.directBuffer(payload4.length); + + buf1.writeBytes(payload1); + buf2.writeBytes(payload2); + buf3.writeBytes(payload3); + buf4.writeBytes(payload4); + + decoder.offer(new DefaultHttpContent(buf1)); + decoder.offer(new DefaultHttpContent(buf2)); + decoder.offer(new DefaultHttpContent(buf3)); + decoder.offer(new DefaultLastHttpContent(buf4)); + + assertFalse(decoder.getBodyHttpDatas().isEmpty()); + assertEquals(139, decoder.getBodyHttpDatas().size()); + + Attribute attr = (Attribute) decoder.getBodyHttpData("town"); + assertEquals("794649819", attr.getValue()); + + decoder.destroy(); + buf1.release(); + buf2.release(); + buf3.release(); + buf4.release(); } // See https://github.com/netty/netty/issues/3326 @@ -352,13 +381,14 @@ public void testFilenameContainingSemicolon() throws Exception { final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); decoder.destroy(); + assertTrue(req.release()); } @Test public void testFilenameContainingSemicolon2() throws Exception { final String boundary = "dLV9Wyq26L_-JQxk6ferf-RT153LhOO"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, - "http://localhost"); + "http://localhost"); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); // Force to use memory-based data. final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); @@ -366,11 +396,11 @@ public void testFilenameContainingSemicolon2() throws Exception { final String filename = "tmp;0.txt"; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + - "Content-Type: image/gif\r\n" + - "\r\n" + - data + "\r\n" + - "--" + boundary + "--\r\n"; + "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + + "Content-Type: image/gif\r\n" + + "\r\n" + + data + "\r\n" + + "--" + boundary + "--\r\n"; req.content().writeBytes(body.getBytes(CharsetUtil.UTF_8.name())); // Create decoder instance to test. @@ -381,6 +411,7 @@ public void testFilenameContainingSemicolon2() throws Exception { FileUpload fileUpload = (FileUpload) part1; assertEquals("tmp 0.txt", fileUpload.getFilename()); decoder.destroy(); + assertTrue(req.release()); } @Test @@ -411,25 +442,59 @@ public void testMultipartRequestWithoutContentTypeBody() { final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); decoder.destroy(); + assertTrue(req.release()); + } + + @Test + public void testDecodeOtherMimeHeaderFields() throws Exception { + final String boundary = "74e78d11b0214bdcbc2f86491eeb4902"; + String filecontent = "123456"; + + final String body = "--" + boundary + "\r\n" + + "Content-Disposition: form-data; name=\"file\"; filename=" + "\"" + "attached.txt" + "\"" + + "\r\n" + + "Content-Type: application/octet-stream" + "\r\n" + + "Content-Encoding: gzip" + "\r\n" + + "\r\n" + + filecontent + + "\r\n" + + "--" + boundary + "--"; + + final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); + req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); + req.headers().add(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED); + final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); + final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); + assertFalse(decoder.getBodyHttpDatas().isEmpty()); + InterfaceHttpData part1 = decoder.getBodyHttpDatas().get(0); + assertTrue(part1 instanceof FileUpload, "the item should be a FileUpload"); + FileUpload fileUpload = (FileUpload) part1; + byte[] fileBytes = fileUpload.get(); + assertTrue(filecontent.equals(new String(fileBytes)), "the filecontent should not be decoded"); + decoder.destroy(); + assertTrue(req.release()); } @Test public void testMultipartRequestWithFileInvalidCharset() throws Exception { final String boundary = "dLV9Wyq26L_-JQxk6ferf-RT153LhOO"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, - "http://localhost"); + "http://localhost"); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); // Force to use memory-based data. final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); final String data = "asdf"; final String filename = "tmp;0.txt"; final String body = - "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + - "Content-Type: image/gif; charset=ABCD\r\n" + - "\r\n" + - data + "\r\n" + - "--" + boundary + "--\r\n"; + "--" + boundary + "\r\n" + + "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + + "Content-Type: image/gif; charset=ABCD\r\n" + + "\r\n" + + data + "\r\n" + + "--" + boundary + "--\r\n"; req.content().writeBytes(body.getBytes(CharsetUtil.UTF_8)); // Create decoder instance to test. @@ -439,7 +504,7 @@ public void testMultipartRequestWithFileInvalidCharset() throws Exception { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof UnsupportedCharsetException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -447,22 +512,22 @@ public void testMultipartRequestWithFileInvalidCharset() throws Exception { public void testMultipartRequestWithFieldInvalidCharset() throws Exception { final String boundary = "dLV9Wyq26L_-JQxk6ferf-RT153LhOO"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, - "http://localhost"); + "http://localhost"); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); // Force to use memory-based data. final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); final String aData = "some data would be here. the data should be long enough that it " + - "will be longer than the original buffer length of 256 bytes in " + - "the HttpPostRequestDecoder in order to trigger the issue. Some more " + - "data just to be on the safe side."; + "will be longer than the original buffer length of 256 bytes in " + + "the HttpPostRequestDecoder in order to trigger the issue. Some more " + + "data just to be on the safe side."; final String body = - "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"root\"\r\n" + - "Content-Type: text/plain; charset=ABCD\r\n" + - "\r\n" + - aData + - "\r\n" + - "--" + boundary + "--\r\n"; + "--" + boundary + "\r\n" + + "Content-Disposition: form-data; name=\"root\"\r\n" + + "Content-Type: text/plain; charset=ABCD\r\n" + + "\r\n" + + aData + + "\r\n" + + "--" + boundary + "--\r\n"; req.content().writeBytes(body.getBytes(CharsetUtil.UTF_8)); // Create decoder instance to test. @@ -472,7 +537,7 @@ public void testMultipartRequestWithFieldInvalidCharset() throws Exception { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof UnsupportedCharsetException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -488,8 +553,8 @@ public void testFormEncodeIncorrect() throws Exception { } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof IllegalArgumentException); } finally { - decoder.destroy(); content.release(); + decoder.destroy(); } } @@ -504,27 +569,27 @@ public void testDecodeContentDispositionFieldParameters() throws Exception { String filenameEncoded = URLEncoder.encode(filename, encoding); final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename*=" + encoding + "''" + filenameEncoded + "\r\n" + - "\r\n" + - "foo\r\n" + - "\r\n" + - "--" + boundary + "--"; + "Content-Disposition: form-data; name=\"file\"; filename*=" + encoding + "''" + filenameEncoded + + "\r\n\r\n" + + "foo\r\n" + + "\r\n" + + "--" + boundary + "--"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost", - Unpooled.wrappedBuffer(body.getBytes())); + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); InterfaceHttpData part1 = decoder.getBodyHttpDatas().get(0); - assertTrue("the item should be a FileUpload", part1 instanceof FileUpload); + assertTrue(part1 instanceof FileUpload, "the item should be a FileUpload"); FileUpload fileUpload = (FileUpload) part1; - assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); + assertEquals(filename, fileUpload.getFilename(), "the filename should be decoded"); decoder.destroy(); - req.release(); + assertTrue(req.release()); } // https://github.com/netty/netty/pull/7265 @@ -539,28 +604,28 @@ public void testDecodeWithLanguageContentDispositionFieldParameters() throws Exc String filenameEncoded = URLEncoder.encode(filename, encoding); final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename*=" + - encoding + "'" + language + "'" + filenameEncoded + "\r\n" + - "\r\n" + - "foo\r\n" + - "\r\n" + - "--" + boundary + "--"; + "Content-Disposition: form-data; name=\"file\"; filename*=" + + encoding + "'" + language + "'" + filenameEncoded + "\r\n" + + "\r\n" + + "foo\r\n" + + "\r\n" + + "--" + boundary + "--"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost", - Unpooled.wrappedBuffer(body.getBytes())); + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); assertFalse(decoder.getBodyHttpDatas().isEmpty()); InterfaceHttpData part1 = decoder.getBodyHttpDatas().get(0); - assertTrue("the item should be a FileUpload", part1 instanceof FileUpload); + assertTrue(part1 instanceof FileUpload, "the item should be a FileUpload"); FileUpload fileUpload = (FileUpload) part1; - assertEquals("the filename should be decoded", filename, fileUpload.getFilename()); + assertEquals(filename, fileUpload.getFilename(), "the filename should be decoded"); decoder.destroy(); - req.release(); + assertTrue(req.release()); } // https://github.com/netty/netty/pull/7265 @@ -570,16 +635,16 @@ public void testDecodeMalformedNotEncodedContentDispositionFieldParameters() thr final String boundary = "74e78d11b0214bdcbc2f86491eeb4902"; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename*=not-encoded\r\n" + - "\r\n" + - "foo\r\n" + - "\r\n" + - "--" + boundary + "--"; + "Content-Disposition: form-data; name=\"file\"; filename*=not-encoded\r\n" + + "\r\n" + + "foo\r\n" + + "\r\n" + + "--" + boundary + "--"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost", - Unpooled.wrappedBuffer(body.getBytes())); + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); @@ -591,7 +656,7 @@ public void testDecodeMalformedNotEncodedContentDispositionFieldParameters() thr } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof ArrayIndexOutOfBoundsException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -602,16 +667,16 @@ public void testDecodeMalformedBadCharsetContentDispositionFieldParameters() thr final String boundary = "74e78d11b0214bdcbc2f86491eeb4902"; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename*=not-a-charset''filename\r\n" + - "\r\n" + - "foo\r\n" + - "\r\n" + - "--" + boundary + "--"; + "Content-Disposition: form-data; name=\"file\"; filename*=not-a-charset''filename\r\n" + + "\r\n" + + "foo\r\n" + + "\r\n" + + "--" + boundary + "--"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, - HttpMethod.POST, - "http://localhost", - Unpooled.wrappedBuffer(body.getBytes())); + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); @@ -623,7 +688,7 @@ public void testDecodeMalformedBadCharsetContentDispositionFieldParameters() thr } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { assertTrue(e.getCause() instanceof UnsupportedCharsetException); } finally { - req.release(); + assertTrue(req.release()); } } @@ -632,7 +697,7 @@ public void testDecodeMalformedBadCharsetContentDispositionFieldParameters() thr public void testDecodeMalformedEmptyContentTypeFieldParameters() throws Exception { final String boundary = "dLV9Wyq26L_-JQxk6ferf-RT153LhOO"; final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, - "http://localhost"); + "http://localhost"); req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); // Force to use memory-based data. final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); @@ -640,11 +705,11 @@ public void testDecodeMalformedEmptyContentTypeFieldParameters() throws Exceptio final String filename = "tmp-0.txt"; final String body = "--" + boundary + "\r\n" + - "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + - "Content-Type: \r\n" + - "\r\n" + - data + "\r\n" + - "--" + boundary + "--\r\n"; + "Content-Disposition: form-data; name=\"file\"; filename=\"" + filename + "\"\r\n" + + "Content-Type: \r\n" + + "\r\n" + + data + "\r\n" + + "--" + boundary + "--\r\n"; req.content().writeBytes(body.getBytes(CharsetUtil.UTF_8.name())); // Create decoder instance to test. @@ -655,5 +720,273 @@ public void testDecodeMalformedEmptyContentTypeFieldParameters() throws Exceptio FileUpload fileUpload = (FileUpload) part1; assertEquals("tmp-0.txt", fileUpload.getFilename()); decoder.destroy(); + assertTrue(req.release()); + } + + // https://github.com/netty/netty/issues/8575 + @Test + public void testMultipartRequest() throws Exception { + String BOUNDARY = "01f136d9282f"; + + byte[] bodyBytes = ("--" + BOUNDARY + "\n" + + "Content-Disposition: form-data; name=\"msg_id\"\n" + + "\n" + + "15200\n" + + "--" + BOUNDARY + "\n" + + "Content-Disposition: form-data; name=\"msg\"\n" + + "\n" + + "test message\n" + + "--" + BOUNDARY + "--").getBytes(); + ByteBuf byteBuf = Unpooled.directBuffer(bodyBytes.length); + byteBuf.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_0, HttpMethod.POST, "/up", byteBuf); + req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + BOUNDARY); + + HttpPostRequestDecoder decoder = + new HttpPostRequestDecoder(new DefaultHttpDataFactory(DefaultHttpDataFactory.MINSIZE), + req, + CharsetUtil.UTF_8); + + assertTrue(decoder.isMultipart()); + assertFalse(decoder.getBodyHttpDatas().isEmpty()); + assertEquals(2, decoder.getBodyHttpDatas().size()); + + Attribute attrMsg = (Attribute) decoder.getBodyHttpData("msg"); + assertTrue(attrMsg.getByteBuf().isDirect()); + assertEquals("test message", attrMsg.getValue()); + Attribute attrMsgId = (Attribute) decoder.getBodyHttpData("msg_id"); + assertTrue(attrMsgId.getByteBuf().isDirect()); + assertEquals("15200", attrMsgId.getValue()); + + decoder.destroy(); + assertTrue(req.release()); + } + + @Test + public void testNotLeak() { + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", + Unpooled.copiedBuffer("a=1&&b=2", CharsetUtil.US_ASCII)); + try { + assertThrows(HttpPostRequestDecoder.ErrorDataDecoderException.class, + () -> new HttpPostStandardRequestDecoder(request).destroy()); + } finally { + assertTrue(request.release()); + } + } + + @Test + public void testNotLeakDirectBufferWhenWrapIllegalArgumentException() { + assertThrows(HttpPostRequestDecoder.ErrorDataDecoderException.class, + () -> testNotLeakWhenWrapIllegalArgumentException(Unpooled.directBuffer())); + } + + @Test + public void testNotLeakHeapBufferWhenWrapIllegalArgumentException() { + assertThrows(HttpPostRequestDecoder.ErrorDataDecoderException.class, + () -> testNotLeakWhenWrapIllegalArgumentException(Unpooled.buffer())); + } + + private static void testNotLeakWhenWrapIllegalArgumentException(ByteBuf buf) { + buf.writeCharSequence("a=b&foo=%22bar%22&==", CharsetUtil.US_ASCII); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", buf); + try { + new HttpPostStandardRequestDecoder(request).destroy(); + } finally { + assertTrue(request.release()); + } + } + + @Test + public void testMultipartFormDataContentType() { + HttpRequest request = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + assertFalse(HttpPostRequestDecoder.isMultipart(request)); + + String multipartDataValue = HttpHeaderValues.MULTIPART_FORM_DATA + ";" + "boundary=gc0p4Jq0M2Yt08jU534c0p"; + request.headers().set(HttpHeaderNames.CONTENT_TYPE, ";" + multipartDataValue); + assertFalse(HttpPostRequestDecoder.isMultipart(request)); + + request.headers().set(HttpHeaderNames.CONTENT_TYPE, multipartDataValue); + assertTrue(HttpPostRequestDecoder.isMultipart(request)); + } + + // see https://github.com/netty/netty/issues/10087 + @Test + public void testDecodeWithLanguageContentDispositionFieldParametersForFix() throws Exception { + + final String boundary = "952178786863262625034234"; + + String encoding = "UTF-8"; + String filename = "æĩ‹č¯•test.txt"; + String filenameEncoded = URLEncoder.encode(filename, encoding); + + final String body = "--" + boundary + "\r\n" + + "Content-Disposition: form-data; name=\"file\"; filename*=\"" + + encoding + "''" + filenameEncoded + "\"\r\n" + + "\r\n" + + "foo\r\n" + + "\r\n" + + "--" + boundary + "--"; + + final DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, + HttpMethod.POST, + "http://localhost", + Unpooled.wrappedBuffer(body.getBytes())); + + req.headers().add(HttpHeaderNames.CONTENT_TYPE, "multipart/form-data; boundary=" + boundary); + final DefaultHttpDataFactory inMemoryFactory = new DefaultHttpDataFactory(false); + final HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(inMemoryFactory, req); + assertFalse(decoder.getBodyHttpDatas().isEmpty()); + InterfaceHttpData part1 = decoder.getBodyHttpDatas().get(0); + assertTrue(part1 instanceof FileUpload, "the item should be a FileUpload"); + FileUpload fileUpload = (FileUpload) part1; + assertEquals(filename, fileUpload.getFilename(), "the filename should be decoded"); + + decoder.destroy(); + assertTrue(req.release()); + } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBody() throws Exception { + byte[] bodyBytes = "foo=bar&a=b&empty=&city=%3c%22new%22%20york%20city%3e&other_city=los+angeles".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(req); + assertFalse(decoder.getBodyHttpDatas().isEmpty()); + + assertFalse(decoder.getBodyHttpDatas().isEmpty()); + assertEquals(5, decoder.getBodyHttpDatas().size()); + + Attribute attr = (Attribute) decoder.getBodyHttpData("foo"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("bar", attr.getValue()); + + attr = (Attribute) decoder.getBodyHttpData("a"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("b", attr.getValue()); + + attr = (Attribute) decoder.getBodyHttpData("empty"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("", attr.getValue()); + + attr = (Attribute) decoder.getBodyHttpData("city"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("<\"new\" york city>", attr.getValue()); + + attr = (Attribute) decoder.getBodyHttpData("other_city"); + assertTrue(attr.getByteBuf().isDirect()); + assertEquals("los angeles", attr.getValue()); + + decoder.destroy(); + assertTrue(req.release()); + } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte0() { + byte[] bodyBytes = "foo=bar&a=b&empty=%&city=paris".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + try { + new HttpPostRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + assertEquals("Invalid hex byte at index '0' in string: '%'", e.getMessage()); + } finally { + assertTrue(req.release()); + } + } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBodyWithBrokenHexByte1() { + byte[] bodyBytes = "foo=bar&a=b&empty=%2&city=london".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + try { + new HttpPostRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + assertEquals("Invalid hex byte at index '0' in string: '%2'", e.getMessage()); + } finally { + assertTrue(req.release()); + } + } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleHi() { + byte[] bodyBytes = "foo=bar&a=b&empty=%Zc&city=london".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + try { + new HttpPostRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + assertEquals("Invalid hex byte at index '0' in string: '%Zc'", e.getMessage()); + } finally { + assertTrue(req.release()); + } + } + + @Test + public void testDecodeFullHttpRequestWithUrlEncodedBodyWithInvalidHexNibbleLo() { + byte[] bodyBytes = "foo=bar&a=b&empty=%2g&city=london".getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + try { + new HttpPostRequestDecoder(req); + fail("Was expecting an ErrorDataDecoderException"); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + assertEquals("Invalid hex byte at index '0' in string: '%2g'", e.getMessage()); + } finally { + assertTrue(req.release()); + } + } + + @Test + public void testDecodeMultipartRequest() { + byte[] bodyBytes = ("--be38b42a9ad2713f\n" + + "content-disposition: form-data; name=\"title\"\n" + + "content-length: 10\n" + + "content-type: text/plain; charset=UTF-8\n" + + "\n" + + "bar-stream\n" + + "--be38b42a9ad2713f\n" + + "content-disposition: form-data; name=\"data\"; filename=\"data.json\"\n" + + "content-length: 16\n" + + "content-type: application/json; charset=UTF-8\n" + + "\n" + + "{\"title\":\"Test\"}\n" + + "--be38b42a9ad2713f--").getBytes(); + ByteBuf content = Unpooled.directBuffer(bodyBytes.length); + content.writeBytes(bodyBytes); + FullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/", content); + req.headers().add("Content-Type", "multipart/form-data;boundary=be38b42a9ad2713f"); + + try { + HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(new DefaultHttpDataFactory(false), req); + assertEquals(2, decoder.getBodyHttpDatas().size()); + InterfaceHttpData data = decoder.getBodyHttpData("title"); + assertTrue(data instanceof MemoryAttribute); + assertEquals("bar-stream", ((MemoryAttribute) data).getString()); + assertTrue(data.release()); + data = decoder.getBodyHttpData("data"); + assertTrue(data instanceof MemoryFileUpload); + assertEquals("{\"title\":\"Test\"}", ((MemoryFileUpload) data).getString()); + assertTrue(data.release()); + decoder.destroy(); + } catch (HttpPostRequestDecoder.ErrorDataDecoderException e) { + fail("Was not expecting an exception"); + } finally { + assertTrue(req.release()); + } } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoderTest.java index 7669f971b12..d1810cc08cd 100755 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,21 +18,22 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufAllocator; import io.netty.buffer.Unpooled; +import io.netty.handler.codec.http.DefaultHttpRequest; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.HttpConstants; import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpVersion; import io.netty.handler.codec.http.LastHttpContent; import io.netty.handler.codec.http.multipart.HttpPostRequestEncoder.EncoderMode; import io.netty.handler.codec.http.multipart.HttpPostRequestEncoder.ErrorDataEncoderException; import io.netty.util.CharsetUtil; import io.netty.util.internal.StringUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; import java.io.ByteArrayInputStream; import java.io.File; -import java.nio.charset.Charset; import java.util.Arrays; import java.util.List; @@ -40,10 +41,10 @@ import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TRANSFER_ENCODING; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_TYPE; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; /** {@link HttpPostRequestEncoder} test case. */ public class HttpPostRequestEncoderTest { @@ -139,9 +140,11 @@ public void testMultiFileUploadInMixedMode() throws Exception { HttpPostRequestEncoder encoder = new HttpPostRequestEncoder(request, true); File file1 = new File(getClass().getResource("/file-01.txt").toURI()); File file2 = new File(getClass().getResource("/file-02.txt").toURI()); + File file3 = new File(getClass().getResource("/file-03.txt").toURI()); encoder.addBodyAttribute("foo", "bar"); encoder.addBodyFileUpload("quux", file1, "text/plain", false); encoder.addBodyFileUpload("quux", file2, "text/plain", false); + encoder.addBodyFileUpload("quux", file3, "text/plain", false); // We have to query the value of these two fields before finalizing // the request, which unsets one of them. @@ -160,7 +163,7 @@ public void testMultiFileUploadInMixedMode() throws Exception { CONTENT_TYPE + ": multipart/mixed; boundary=" + multipartMixedBoundary + "\r\n" + "\r\n" + "--" + multipartMixedBoundary + "\r\n" + - CONTENT_DISPOSITION + ": attachment; filename=\"file-02.txt\"" + "\r\n" + + CONTENT_DISPOSITION + ": attachment; filename=\"file-01.txt\"" + "\r\n" + CONTENT_LENGTH + ": " + file1.length() + "\r\n" + CONTENT_TYPE + ": text/plain" + "\r\n" + CONTENT_TRANSFER_ENCODING + ": binary" + "\r\n" + @@ -175,6 +178,14 @@ public void testMultiFileUploadInMixedMode() throws Exception { "\r\n" + "File 02" + StringUtil.NEWLINE + "\r\n" + + "--" + multipartMixedBoundary + "\r\n" + + CONTENT_DISPOSITION + ": attachment; filename=\"file-03.txt\"" + "\r\n" + + CONTENT_LENGTH + ": " + file3.length() + "\r\n" + + CONTENT_TYPE + ": text/plain" + "\r\n" + + CONTENT_TRANSFER_ENCODING + ": binary" + "\r\n" + + "\r\n" + + "File 03" + StringUtil.NEWLINE + + "\r\n" + "--" + multipartMixedBoundary + "--" + "\r\n" + "--" + multipartDataBoundary + "--" + "\r\n"; @@ -334,9 +345,9 @@ public void testHttpPostRequestEncoderSlicedBuffer() throws Exception { HttpContent httpContent = encoder.readChunk((ByteBufAllocator) null); ByteBuf content = httpContent.content(); int refCnt = content.refCnt(); - assertTrue("content: " + content + " content.unwrap(): " + content.unwrap() + " refCnt: " + refCnt, - (content.unwrap() == content || content.unwrap() == null) && refCnt == 1 || - content.unwrap() != content && refCnt == 2); + assertTrue((content.unwrap() == content || content.unwrap() == null) && refCnt == 1 || + content.unwrap() != content && refCnt == 2, + "content: " + content + " content.unwrap(): " + content.unwrap() + " refCnt: " + refCnt); httpContent.release(); } encoder.cleanFiles(); @@ -390,10 +401,10 @@ public void testDataIsMultipleOfChunkSize1() throws Exception { checkNextChunkSize(encoder, 8080); HttpContent httpContent = encoder.readChunk((ByteBufAllocator) null); - assertTrue("Expected LastHttpContent is not received", httpContent instanceof LastHttpContent); + assertTrue(httpContent instanceof LastHttpContent, "Expected LastHttpContent is not received"); httpContent.release(); - assertTrue("Expected end of input is not receive", encoder.isEndOfInput()); + assertTrue(encoder.isEndOfInput(), "Expected end of input is not receive"); } @Test @@ -412,10 +423,10 @@ public void testDataIsMultipleOfChunkSize2() throws Exception { checkNextChunkSize(encoder, 8080); HttpContent httpContent = encoder.readChunk((ByteBufAllocator) null); - assertTrue("Expected LastHttpContent is not received", httpContent instanceof LastHttpContent); + assertTrue(httpContent instanceof LastHttpContent, "Expected LastHttpContent is not received"); httpContent.release(); - assertTrue("Expected end of input is not receive", encoder.isEndOfInput()); + assertTrue(encoder.isEndOfInput(), "Expected end of input is not receive"); } private static void checkNextChunkSize(HttpPostRequestEncoder encoder, int sizeWithoutDelimiter) throws Exception { @@ -430,8 +441,31 @@ private static void checkNextChunkSize(HttpPostRequestEncoder encoder, int sizeW int readable = httpContent.content().readableBytes(); boolean expectedSize = readable >= expectedSizeMin && readable <= expectedSizeMax; - assertTrue("Chunk size is not in expected range (" + expectedSizeMin + " - " + expectedSizeMax + "), was: " - + readable, expectedSize); + assertTrue(expectedSize, "Chunk size is not in expected range (" + expectedSizeMin + " - " + + expectedSizeMax + "), was: " + readable); httpContent.release(); } + + @Test + public void testEncodeChunkedContent() throws Exception { + HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + HttpPostRequestEncoder encoder = new HttpPostRequestEncoder(req, false); + + int length = 8077 + 8096; + char[] array = new char[length]; + Arrays.fill(array, 'a'); + String longText = new String(array); + + encoder.addBodyAttribute("data", longText); + encoder.addBodyAttribute("moreData", "abcd"); + + assertNotNull(encoder.finalizeRequest()); + + while (!encoder.isEndOfInput()) { + encoder.readChunk((ByteBufAllocator) null).release(); + } + + assertTrue(encoder.isEndOfInput()); + encoder.cleanFiles(); + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/MemoryFileUploadTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/MemoryFileUploadTest.java index 4d53f49e55b..167c8c3c209 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/MemoryFileUploadTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/MemoryFileUploadTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,8 +15,9 @@ */ package io.netty.handler.codec.http.multipart; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; public class MemoryFileUploadTest { @@ -24,6 +25,6 @@ public class MemoryFileUploadTest { public final void testMemoryFileUploadEquals() { MemoryFileUpload f1 = new MemoryFileUpload("m1", "m1", "application/json", null, null, 100); - Assert.assertEquals(f1, f1); + assertEquals(f1, f1); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrameTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrameTest.java new file mode 100644 index 00000000000..f1bcc3fc3d8 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/CloseWebSocketFrameTest.java @@ -0,0 +1,79 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and limitations under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import org.assertj.core.api.ThrowableAssert; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; + +class CloseWebSocketFrameTest { + + @Test + void testInvalidCode() { + doTestInvalidCode(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new CloseWebSocketFrame(WebSocketCloseStatus.ABNORMAL_CLOSURE); + } + }); + + doTestInvalidCode(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new CloseWebSocketFrame(WebSocketCloseStatus.ABNORMAL_CLOSURE, "invalid code"); + } + }); + + doTestInvalidCode(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new CloseWebSocketFrame(1006, "invalid code"); + } + }); + + doTestInvalidCode(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new CloseWebSocketFrame(true, 0, 1006, "invalid code"); + } + }); + } + + @Test + void testValidCode() { + doTestValidCode(new CloseWebSocketFrame(WebSocketCloseStatus.NORMAL_CLOSURE), + WebSocketCloseStatus.NORMAL_CLOSURE.code(), WebSocketCloseStatus.NORMAL_CLOSURE.reasonText()); + + doTestValidCode(new CloseWebSocketFrame(WebSocketCloseStatus.NORMAL_CLOSURE, "valid code"), + WebSocketCloseStatus.NORMAL_CLOSURE.code(), "valid code"); + + doTestValidCode(new CloseWebSocketFrame(1000, "valid code"), 1000, "valid code"); + + doTestValidCode(new CloseWebSocketFrame(true, 0, 1000, "valid code"), 1000, "valid code"); + } + + private static void doTestInvalidCode(ThrowableAssert.ThrowingCallable callable) { + assertThatExceptionOfType(IllegalArgumentException.class).isThrownBy(callable); + } + + private static void doTestValidCode(CloseWebSocketFrame frame, int expectedCode, String expectedReason) { + assertThat(frame.statusCode()).isEqualTo(expectedCode); + assertThat(frame.reasonText()).isEqualTo(expectedReason); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameEncoderTest.java index f152852add9..c428e316f78 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket00FrameEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,8 +17,11 @@ import io.netty.buffer.ByteBuf; import io.netty.channel.embedded.EmbeddedChannel; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class WebSocket00FrameEncoderTest { @@ -26,19 +29,19 @@ public class WebSocket00FrameEncoderTest { @Test public void testMultipleWebSocketCloseFrames() { EmbeddedChannel channel = new EmbeddedChannel(new WebSocket00FrameEncoder()); - Assert.assertTrue(channel.writeOutbound(new CloseWebSocketFrame())); - Assert.assertTrue(channel.writeOutbound(new CloseWebSocketFrame())); - Assert.assertTrue(channel.finish()); + assertTrue(channel.writeOutbound(new CloseWebSocketFrame())); + assertTrue(channel.writeOutbound(new CloseWebSocketFrame())); + assertTrue(channel.finish()); assertCloseWebSocketFrame(channel); assertCloseWebSocketFrame(channel); - Assert.assertNull(channel.readOutbound()); + assertNull(channel.readOutbound()); } private static void assertCloseWebSocketFrame(EmbeddedChannel channel) { ByteBuf buf = channel.readOutbound(); - Assert.assertEquals(2, buf.readableBytes()); - Assert.assertEquals((byte) 0xFF, buf.readByte()); - Assert.assertEquals((byte) 0x00, buf.readByte()); + assertEquals(2, buf.readableBytes()); + assertEquals((byte) 0xFF, buf.readByte()); + assertEquals((byte) 0x00, buf.readByte()); buf.release(); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket08EncoderDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket08EncoderDecoderTest.java index a84a81a2f76..2edc7bd35d0 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket08EncoderDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket08EncoderDecoderTest.java @@ -1,11 +1,11 @@ /* - * Copyright 2014 The Netty Project + * Copyright 2019 The Netty Project * * The Netty Project licenses this file to you under the Apache License, * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,8 +18,13 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Tests the WebSocket08FrameEncoder and Decoder implementation.
    @@ -53,6 +58,72 @@ private void initTestData() { strTestData = s.toString(); } + @Test + public void testWebSocketProtocolViolation() { + // Given + initTestData(); + + int maxPayloadLength = 255; + String errorMessage = "Max frame length of " + maxPayloadLength + " has been exceeded."; + WebSocketCloseStatus expectedStatus = WebSocketCloseStatus.MESSAGE_TOO_BIG; + + // With auto-close + WebSocketDecoderConfig config = WebSocketDecoderConfig.newBuilder() + .maxFramePayloadLength(maxPayloadLength) + .closeOnProtocolViolation(true) + .build(); + EmbeddedChannel inChannel = new EmbeddedChannel(new WebSocket08FrameDecoder(config)); + EmbeddedChannel outChannel = new EmbeddedChannel(new WebSocket08FrameEncoder(true)); + + executeProtocolViolationTest(outChannel, inChannel, maxPayloadLength + 1, expectedStatus, errorMessage); + + CloseWebSocketFrame response = inChannel.readOutbound(); + assertNotNull(response); + assertEquals(expectedStatus.code(), response.statusCode()); + assertEquals(errorMessage, response.reasonText()); + response.release(); + + assertFalse(inChannel.finish()); + assertFalse(outChannel.finish()); + + // Without auto-close + config = WebSocketDecoderConfig.newBuilder() + .maxFramePayloadLength(maxPayloadLength) + .closeOnProtocolViolation(false) + .build(); + inChannel = new EmbeddedChannel(new WebSocket08FrameDecoder(config)); + outChannel = new EmbeddedChannel(new WebSocket08FrameEncoder(true)); + + executeProtocolViolationTest(outChannel, inChannel, maxPayloadLength + 1, expectedStatus, errorMessage); + + response = inChannel.readOutbound(); + assertNull(response); + + assertFalse(inChannel.finish()); + assertFalse(outChannel.finish()); + + // Release test data + binTestData.release(); + } + + private void executeProtocolViolationTest(EmbeddedChannel outChannel, EmbeddedChannel inChannel, + int testDataLength, WebSocketCloseStatus expectedStatus, String errorMessage) { + CorruptedWebSocketFrameException corrupted = null; + + try { + testBinaryWithLen(outChannel, inChannel, testDataLength); + } catch (CorruptedWebSocketFrameException e) { + corrupted = e; + } + + BinaryWebSocketFrame exceedingFrame = inChannel.readInbound(); + assertNull(exceedingFrame); + + assertNotNull(corrupted); + assertEquals(expectedStatus, corrupted.closeStatus()); + assertEquals(errorMessage, corrupted.getMessage()); + } + @Test public void testWebSocketEncodingAndDecoding() { initTestData(); @@ -108,22 +179,13 @@ private void testTextWithLen(EmbeddedChannel outChannel, EmbeddedChannel inChann String testStr = strTestData.substring(0, testDataLength); outChannel.writeOutbound(new TextWebSocketFrame(testStr)); - // Transfer encoded data into decoder - // Loop because there might be multiple frames (gathering write) - while (true) { - ByteBuf encoded = outChannel.readOutbound(); - if (encoded != null) { - inChannel.writeInbound(encoded); - } else { - break; - } - } + transfer(outChannel, inChannel); Object decoded = inChannel.readInbound(); - Assert.assertNotNull(decoded); - Assert.assertTrue(decoded instanceof TextWebSocketFrame); + assertNotNull(decoded); + assertTrue(decoded instanceof TextWebSocketFrame); TextWebSocketFrame txt = (TextWebSocketFrame) decoded; - Assert.assertEquals(txt.text(), testStr); + assertEquals(txt.text(), testStr); txt.release(); } @@ -132,26 +194,29 @@ private void testBinaryWithLen(EmbeddedChannel outChannel, EmbeddedChannel inCha binTestData.setIndex(0, testDataLength); // Send only len bytes outChannel.writeOutbound(new BinaryWebSocketFrame(binTestData)); - // Transfer encoded data into decoder - // Loop because there might be multiple frames (gathering write) - while (true) { - ByteBuf encoded = outChannel.readOutbound(); - if (encoded != null) { - inChannel.writeInbound(encoded); - } else { - break; - } - } + transfer(outChannel, inChannel); Object decoded = inChannel.readInbound(); - Assert.assertNotNull(decoded); - Assert.assertTrue(decoded instanceof BinaryWebSocketFrame); + assertNotNull(decoded); + assertTrue(decoded instanceof BinaryWebSocketFrame); BinaryWebSocketFrame binFrame = (BinaryWebSocketFrame) decoded; int readable = binFrame.content().readableBytes(); - Assert.assertEquals(readable, testDataLength); + assertEquals(readable, testDataLength); for (int i = 0; i < testDataLength; i++) { - Assert.assertEquals(binTestData.getByte(i), binFrame.content().getByte(i)); + assertEquals(binTestData.getByte(i), binFrame.content().getByte(i)); } binFrame.release(); } + + private void transfer(EmbeddedChannel outChannel, EmbeddedChannel inChannel) { + // Transfer encoded data into decoder + // Loop because there might be multiple frames (gathering write) + for (;;) { + ByteBuf encoded = outChannel.readOutbound(); + if (encoded == null) { + return; + } + inChannel.writeInbound(encoded); + } + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket08FrameDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket08FrameDecoderTest.java index 547eed6174e..4795a8faab8 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket08FrameDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocket08FrameDecoderTest.java @@ -4,7 +4,7 @@ * The Netty Project licenses this file to you under the Apache License, version 2.0 (the "License"); * you may not use this file except in compliance with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License is * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. @@ -12,18 +12,62 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.embedded.EmbeddedChannel; +import org.junit.jupiter.api.Test; -import org.junit.Test; -import org.mockito.Mockito; +import java.util.HashSet; +import java.util.Set; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.verify; public class WebSocket08FrameDecoderTest { @Test public void channelInactive() throws Exception { final WebSocket08FrameDecoder decoder = new WebSocket08FrameDecoder(true, true, 65535, false); - final ChannelHandlerContext ctx = Mockito.mock(ChannelHandlerContext.class); + final ChannelHandlerContext ctx = mock(ChannelHandlerContext.class); + decoder.handlerAdded(ctx); decoder.channelInactive(ctx); - Mockito.verify(ctx).fireChannelInactive(); + verify(ctx).fireChannelInactive(); + } + + @Test + public void supportIanaStatusCodes() throws Exception { + Set forbiddenIanaCodes = new HashSet<>(); + forbiddenIanaCodes.add(1004); + forbiddenIanaCodes.add(1005); + forbiddenIanaCodes.add(1006); + Set validIanaCodes = new HashSet<>(); + for (int i = 1000; i < 1015; i++) { + validIanaCodes.add(i); + } + validIanaCodes.removeAll(forbiddenIanaCodes); + + for (int statusCode: validIanaCodes) { + EmbeddedChannel encoderChannel = new EmbeddedChannel(new WebSocket08FrameEncoder(true)); + EmbeddedChannel decoderChannel = new EmbeddedChannel(new WebSocket08FrameDecoder(true, true, 65535, false)); + + assertTrue(encoderChannel.writeOutbound(new CloseWebSocketFrame(statusCode, "Bye"))); + assertTrue(encoderChannel.finish()); + ByteBuf serializedCloseFrame = encoderChannel.readOutbound(); + assertNull(encoderChannel.readOutbound()); + + assertTrue(decoderChannel.writeInbound(serializedCloseFrame)); + assertTrue(decoderChannel.finish()); + + CloseWebSocketFrame outputFrame = decoderChannel.readInbound(); + assertNull(decoderChannel.readOutbound()); + try { + assertEquals(statusCode, outputFrame.statusCode()); + } finally { + outputFrame.release(); + } + } } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker00Test.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker00Test.java index bda0734ad96..9d1606f3707 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker00Test.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker00Test.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,17 +16,36 @@ package io.netty.handler.codec.http.websocketx; import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; import java.net.URI; public class WebSocketClientHandshaker00Test extends WebSocketClientHandshakerTest { @Override - protected WebSocketClientHandshaker newHandshaker(URI uri) { - return new WebSocketClientHandshaker00(uri, WebSocketVersion.V00, null, null, 1024); + protected WebSocketClientHandshaker newHandshaker(URI uri, String subprotocol, HttpHeaders headers, + boolean absoluteUpgradeUrl) { + return new WebSocketClientHandshaker00(uri, WebSocketVersion.V00, subprotocol, headers, + 1024, 10000, absoluteUpgradeUrl); } @Override protected CharSequence getOriginHeaderName() { return HttpHeaderNames.ORIGIN; } + + @Override + protected CharSequence getProtocolHeaderName() { + return HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL; + } + + @Override + protected CharSequence[] getHandshakeRequiredHeaderNames() { + return new CharSequence[] { + HttpHeaderNames.CONNECTION, + HttpHeaderNames.UPGRADE, + HttpHeaderNames.HOST, + HttpHeaderNames.SEC_WEBSOCKET_KEY1, + HttpHeaderNames.SEC_WEBSOCKET_KEY2, + }; + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker07Test.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker07Test.java index bce6c73a78f..692bc3bbe64 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker07Test.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker07Test.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,18 +15,59 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; +import org.junit.jupiter.api.Test; import java.net.URI; +import static org.junit.jupiter.api.Assertions.assertEquals; + public class WebSocketClientHandshaker07Test extends WebSocketClientHandshakerTest { + + @Test + public void testHostHeaderPreserved() { + URI uri = URI.create("ws://localhost:9999"); + WebSocketClientHandshaker handshaker = newHandshaker(uri, null, + new DefaultHttpHeaders().set(HttpHeaderNames.HOST, "test.netty.io"), false); + + FullHttpRequest request = handshaker.newHandshakeRequest(); + try { + assertEquals("/", request.uri()); + assertEquals("test.netty.io", request.headers().get(HttpHeaderNames.HOST)); + } finally { + request.release(); + } + } + @Override - protected WebSocketClientHandshaker newHandshaker(URI uri) { - return new WebSocketClientHandshaker07(uri, WebSocketVersion.V07, null, false, null, 1024); + protected WebSocketClientHandshaker newHandshaker(URI uri, String subprotocol, HttpHeaders headers, + boolean absoluteUpgradeUrl) { + return new WebSocketClientHandshaker07(uri, WebSocketVersion.V07, subprotocol, false, headers, + 1024, true, false, 10000, + absoluteUpgradeUrl); } @Override protected CharSequence getOriginHeaderName() { return HttpHeaderNames.SEC_WEBSOCKET_ORIGIN; } + + @Override + protected CharSequence getProtocolHeaderName() { + return HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL; + } + + @Override + protected CharSequence[] getHandshakeRequiredHeaderNames() { + return new CharSequence[] { + HttpHeaderNames.UPGRADE, + HttpHeaderNames.CONNECTION, + HttpHeaderNames.SEC_WEBSOCKET_KEY, + HttpHeaderNames.HOST, + HttpHeaderNames.SEC_WEBSOCKET_VERSION, + }; + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker08Test.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker08Test.java index 4ce8016adda..34c5fb76d8b 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker08Test.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker08Test.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,11 +15,16 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.handler.codec.http.HttpHeaders; + import java.net.URI; public class WebSocketClientHandshaker08Test extends WebSocketClientHandshaker07Test { @Override - protected WebSocketClientHandshaker newHandshaker(URI uri) { - return new WebSocketClientHandshaker07(uri, WebSocketVersion.V08, null, false, null, 1024); + protected WebSocketClientHandshaker newHandshaker(URI uri, String subprotocol, HttpHeaders headers, + boolean absoluteUpgradeUrl) { + return new WebSocketClientHandshaker08(uri, WebSocketVersion.V08, subprotocol, false, headers, + 1024, true, true, 10000, + absoluteUpgradeUrl); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker13Test.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker13Test.java index ad89fde6bc1..2371caed598 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker13Test.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshaker13Test.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,11 +15,24 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; + import java.net.URI; public class WebSocketClientHandshaker13Test extends WebSocketClientHandshaker07Test { + + @Override + protected WebSocketClientHandshaker newHandshaker(URI uri, String subprotocol, HttpHeaders headers, + boolean absoluteUpgradeUrl) { + return new WebSocketClientHandshaker13(uri, WebSocketVersion.V13, subprotocol, false, headers, + 1024, true, true, 10000, + absoluteUpgradeUrl); + } + @Override - protected WebSocketClientHandshaker newHandshaker(URI uri) { - return new WebSocketClientHandshaker13(uri, WebSocketVersion.V13, null, false, null, 1024); + protected CharSequence getOriginHeaderName() { + return HttpHeaderNames.ORIGIN; } + } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakerTest.java index eeb0d69d1dc..76d2b2fb9c3 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketClientHandshakerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,28 +21,44 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.DefaultHttpHeaders; import io.netty.handler.codec.http.EmptyHttpHeaders; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpClientCodec; import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaders; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequestEncoder; import io.netty.handler.codec.http.HttpResponseDecoder; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; import io.netty.util.CharsetUtil; -import io.netty.util.internal.PlatformDependent; -import org.junit.Test; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.net.URI; +import java.util.concurrent.TimeUnit; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertTrue; public abstract class WebSocketClientHandshakerTest { - protected abstract WebSocketClientHandshaker newHandshaker(URI uri); + protected abstract WebSocketClientHandshaker newHandshaker(URI uri, String subprotocol, HttpHeaders headers, + boolean absoluteUpgradeUrl); + + protected WebSocketClientHandshaker newHandshaker(URI uri) { + return newHandshaker(uri, null, null, false); + } protected abstract CharSequence getOriginHeaderName(); + protected abstract CharSequence getProtocolHeaderName(); + + protected abstract CharSequence[] getHandshakeRequiredHeaderNames(); + @Test public void hostHeaderWs() { for (String scheme : new String[]{"ws://", "http://"}) { @@ -151,6 +167,19 @@ public void originHeaderWithoutScheme() { testOriginHeader("//LOCALHOST/", "http://localhost"); } + @Test + public void testSetOriginFromCustomHeaders() { + HttpHeaders customHeaders = new DefaultHttpHeaders().set(getOriginHeaderName(), "http://example.com"); + WebSocketClientHandshaker handshaker = newHandshaker(URI.create("ws://server.example.com/chat"), null, + customHeaders, false); + FullHttpRequest request = handshaker.newHandshakeRequest(); + try { + assertEquals("http://example.com", request.headers().get(getOriginHeaderName())); + } finally { + request.release(); + } + } + private void testHostHeader(String uri, String expected) { testHeaderDefaultHttp(uri, HttpHeaderNames.HOST, expected); } @@ -171,7 +200,7 @@ protected void testHeaderDefaultHttp(String uri, CharSequence header, String exp @Test @SuppressWarnings("deprecation") - public void testRawPath() { + public void testUpgradeUrl() { URI uri = URI.create("ws://localhost:9999/path%20with%20ws"); WebSocketClientHandshaker handshaker = newHandshaker(uri); FullHttpRequest request = handshaker.newHandshakeRequest(); @@ -183,7 +212,7 @@ public void testRawPath() { } @Test - public void testRawPathWithQuery() { + public void testUpgradeUrlWithQuery() { URI uri = URI.create("ws://localhost:9999/path%20with%20ws?a=b%20c"); WebSocketClientHandshaker handshaker = newHandshaker(uri); FullHttpRequest request = handshaker.newHandshakeRequest(); @@ -194,12 +223,50 @@ public void testRawPathWithQuery() { } } - @Test(timeout = 3000) + @Test + public void testUpgradeUrlWithoutPath() { + URI uri = URI.create("ws://localhost:9999"); + WebSocketClientHandshaker handshaker = newHandshaker(uri); + FullHttpRequest request = handshaker.newHandshakeRequest(); + try { + assertEquals("/", request.uri()); + } finally { + request.release(); + } + } + + @Test + public void testUpgradeUrlWithoutPathWithQuery() { + URI uri = URI.create("ws://localhost:9999?a=b%20c"); + WebSocketClientHandshaker handshaker = newHandshaker(uri); + FullHttpRequest request = handshaker.newHandshakeRequest(); + try { + assertEquals("/?a=b%20c", request.uri()); + } finally { + request.release(); + } + } + + @Test + public void testAbsoluteUpgradeUrlWithQuery() { + URI uri = URI.create("ws://localhost:9999/path%20with%20ws?a=b%20c"); + WebSocketClientHandshaker handshaker = newHandshaker(uri, null, null, true); + FullHttpRequest request = handshaker.newHandshakeRequest(); + try { + assertEquals("ws://localhost:9999/path%20with%20ws?a=b%20c", request.uri()); + } finally { + request.release(); + } + } + + @Test + @Timeout(value = 3000, unit = TimeUnit.MILLISECONDS) public void testHttpResponseAndFrameInSameBuffer() { testHttpResponseAndFrameInSameBuffer(false); } - @Test(timeout = 3000) + @Test + @Timeout(value = 3000, unit = TimeUnit.MILLISECONDS) public void testHttpResponseAndFrameInSameBufferCodec() { testHttpResponseAndFrameInSameBuffer(true); } @@ -208,7 +275,7 @@ private void testHttpResponseAndFrameInSameBuffer(boolean codec) { String url = "ws://localhost:9999/ws"; final WebSocketClientHandshaker shaker = newHandshaker(URI.create(url)); final WebSocketClientHandshaker handshaker = new WebSocketClientHandshaker( - shaker.uri(), shaker.version(), null, EmptyHttpHeaders.INSTANCE, Integer.MAX_VALUE) { + shaker.uri(), shaker.version(), null, EmptyHttpHeaders.INSTANCE, Integer.MAX_VALUE, -1) { @Override protected FullHttpRequest newHandshakeRequest() { return shaker.newHandshakeRequest(); @@ -231,13 +298,15 @@ protected WebSocketFrameEncoder newWebSocketEncoder() { } }; - byte[] data = new byte[24]; - PlatformDependent.threadLocalRandom().nextBytes(data); + // use randomBytes helper from utils to check that it functions properly + byte[] data = WebSocketUtil.randomBytes(24); // Create a EmbeddedChannel which we will use to encode a BinaryWebsocketFrame to bytes and so use these // to test the actual handshaker. WebSocketServerHandshakerFactory factory = new WebSocketServerHandshakerFactory(url, null, false); - WebSocketServerHandshaker socketServerHandshaker = factory.newHandshaker(shaker.newHandshakeRequest()); + FullHttpRequest request = shaker.newHandshakeRequest(); + WebSocketServerHandshaker socketServerHandshaker = factory.newHandshaker(request); + request.release(); EmbeddedChannel websocketChannel = new EmbeddedChannel(socketServerHandshaker.newWebSocketEncoder(), socketServerHandshaker.newWebsocketDecoder()); assertTrue(websocketChannel.writeOutbound(new BinaryWebSocketFrame(Unpooled.wrappedBuffer(data)))); @@ -257,7 +326,7 @@ protected WebSocketFrameEncoder newWebSocketEncoder() { EmbeddedChannel ch = new EmbeddedChannel(new HttpObjectAggregator(Integer.MAX_VALUE), new SimpleChannelInboundHandler() { @Override - protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg) throws Exception { + protected void messageReceived(ChannelHandlerContext ctx, FullHttpResponse msg) throws Exception { handshaker.finishHandshake(ctx.channel(), msg); ctx.pipeline().remove(this); } @@ -292,4 +361,58 @@ protected void channelRead0(ChannelHandlerContext ctx, FullHttpResponse msg) thr frame.release(); } } + + @Test + public void testDuplicateWebsocketHandshakeHeaders() { + URI uri = URI.create("ws://localhost:9999/foo"); + + HttpHeaders inputHeaders = new DefaultHttpHeaders(); + String bogusSubProtocol = "bogusSubProtocol"; + String bogusHeaderValue = "bogusHeaderValue"; + + // add values for the headers that are reserved for use in the websockets handshake + for (CharSequence header : getHandshakeRequiredHeaderNames()) { + if (!HttpHeaderNames.HOST.equals(header)) { + inputHeaders.add(header, bogusHeaderValue); + } + } + inputHeaders.add(getProtocolHeaderName(), bogusSubProtocol); + + String realSubProtocol = "realSubProtocol"; + WebSocketClientHandshaker handshaker = newHandshaker(uri, realSubProtocol, inputHeaders, false); + FullHttpRequest request = handshaker.newHandshakeRequest(); + HttpHeaders outputHeaders = request.headers(); + + // the header values passed in originally have been replaced with values generated by the Handshaker + for (CharSequence header : getHandshakeRequiredHeaderNames()) { + assertEquals(1, outputHeaders.getAll(header).size()); + assertNotEquals(bogusHeaderValue, outputHeaders.get(header)); + } + + // the subprotocol header value is that of the subprotocol string passed into the Handshaker + assertEquals(1, outputHeaders.getAll(getProtocolHeaderName()).size()); + assertEquals(realSubProtocol, outputHeaders.get(getProtocolHeaderName())); + + request.release(); + } + + @Test + public void testWebSocketClientHandshakeException() { + URI uri = URI.create("ws://localhost:9999/exception"); + WebSocketClientHandshaker handshaker = newHandshaker(uri, null, null, false); + FullHttpResponse response = new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.UNAUTHORIZED); + response.headers().set(HttpHeaderNames.WWW_AUTHENTICATE, "realm = access token required"); + + try { + handshaker.finishHandshake(null, response); + } catch (WebSocketClientHandshakeException exception) { + assertEquals("Invalid handshake response getStatus: 401 Unauthorized", exception.getMessage()); + assertEquals(HttpResponseStatus.UNAUTHORIZED, exception.response().status()); + assertTrue(exception.response().headers().contains(HttpHeaderNames.WWW_AUTHENTICATE, + "realm = access token required", false)); + } finally { + response.release(); + } + } } + diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatusTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatusTest.java new file mode 100644 index 00000000000..9f9e43ea99d --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketCloseStatusTest.java @@ -0,0 +1,154 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under the License is + * distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and limitations under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.SortedSet; +import java.util.TreeSet; + +import org.assertj.core.api.ThrowableAssert; +import org.hamcrest.Matchers; +import org.junit.jupiter.api.Test; + +import static org.assertj.core.api.Assertions.assertThatExceptionOfType; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotSame; +import static org.junit.jupiter.api.Assertions.assertSame; + +import static io.netty.handler.codec.http.websocketx.WebSocketCloseStatus.*; + +public class WebSocketCloseStatusTest { + + private final List validCodes = Arrays.asList( + NORMAL_CLOSURE, + ENDPOINT_UNAVAILABLE, + PROTOCOL_ERROR, + INVALID_MESSAGE_TYPE, + INVALID_PAYLOAD_DATA, + POLICY_VIOLATION, + MESSAGE_TOO_BIG, + MANDATORY_EXTENSION, + INTERNAL_SERVER_ERROR, + SERVICE_RESTART, + TRY_AGAIN_LATER, + BAD_GATEWAY + ); + + @Test + public void testToString() { + assertEquals("1000 Bye", NORMAL_CLOSURE.toString()); + } + + @Test + public void testKnownStatuses() { + assertSame(NORMAL_CLOSURE, valueOf(1000)); + assertSame(ENDPOINT_UNAVAILABLE, valueOf(1001)); + assertSame(PROTOCOL_ERROR, valueOf(1002)); + assertSame(INVALID_MESSAGE_TYPE, valueOf(1003)); + assertSame(EMPTY, valueOf(1005)); + assertSame(ABNORMAL_CLOSURE, valueOf(1006)); + assertSame(INVALID_PAYLOAD_DATA, valueOf(1007)); + assertSame(POLICY_VIOLATION, valueOf(1008)); + assertSame(MESSAGE_TOO_BIG, valueOf(1009)); + assertSame(MANDATORY_EXTENSION, valueOf(1010)); + assertSame(INTERNAL_SERVER_ERROR, valueOf(1011)); + assertSame(SERVICE_RESTART, valueOf(1012)); + assertSame(TRY_AGAIN_LATER, valueOf(1013)); + assertSame(BAD_GATEWAY, valueOf(1014)); + assertSame(TLS_HANDSHAKE_FAILED, valueOf(1015)); + } + + @Test + public void testNaturalOrder() { + assertThat(PROTOCOL_ERROR, Matchers.greaterThan(NORMAL_CLOSURE)); + assertThat(PROTOCOL_ERROR, Matchers.greaterThan(valueOf(1001))); + assertThat(PROTOCOL_ERROR, Matchers.comparesEqualTo(PROTOCOL_ERROR)); + assertThat(PROTOCOL_ERROR, Matchers.comparesEqualTo(valueOf(1002))); + assertThat(PROTOCOL_ERROR, Matchers.lessThan(INVALID_MESSAGE_TYPE)); + assertThat(PROTOCOL_ERROR, Matchers.lessThan(valueOf(1007))); + } + + @Test + public void testUserDefinedStatuses() { + // Given, when + WebSocketCloseStatus feedTimeot = new WebSocketCloseStatus(6033, "Feed timed out"); + WebSocketCloseStatus untradablePrice = new WebSocketCloseStatus(6034, "Untradable price"); + + // Then + assertNotSame(feedTimeot, valueOf(6033)); + assertEquals(feedTimeot.code(), 6033); + assertEquals(feedTimeot.reasonText(), "Feed timed out"); + + assertNotSame(untradablePrice, valueOf(6034)); + assertEquals(untradablePrice.code(), 6034); + assertEquals(untradablePrice.reasonText(), "Untradable price"); + } + + @Test + public void testRfc6455CodeValidation() { + // Given + List knownCodes = Arrays.asList( + NORMAL_CLOSURE.code(), + ENDPOINT_UNAVAILABLE.code(), + PROTOCOL_ERROR.code(), + INVALID_MESSAGE_TYPE.code(), + INVALID_PAYLOAD_DATA.code(), + POLICY_VIOLATION.code(), + MESSAGE_TOO_BIG.code(), + MANDATORY_EXTENSION.code(), + INTERNAL_SERVER_ERROR.code(), + SERVICE_RESTART.code(), + TRY_AGAIN_LATER.code(), + BAD_GATEWAY.code() + ); + + SortedSet invalidCodes = new TreeSet(); + + // When + for (int statusCode = Short.MIN_VALUE; statusCode < Short.MAX_VALUE; statusCode++) { + if (!isValidStatusCode(statusCode)) { + invalidCodes.add(statusCode); + } + } + + // Then + assertEquals(0, invalidCodes.first().intValue()); + assertEquals(2999, invalidCodes.last().intValue()); + assertEquals(3000 - validCodes.size(), invalidCodes.size()); + + invalidCodes.retainAll(knownCodes); + assertEquals(invalidCodes, Collections.emptySet()); + } + + @Test + public void testValidationEnabled() { + assertThatExceptionOfType(IllegalArgumentException.class) + .isThrownBy(new ThrowableAssert.ThrowingCallable() { + + @Override + public void call() throws RuntimeException { + new WebSocketCloseStatus(1006, "validation disabled"); + } + }); + } + + @Test + public void testValidationDisabled() { + WebSocketCloseStatus status = new WebSocketCloseStatus(1006, "validation disabled", false); + assertEquals(1006, status.code()); + assertEquals("validation disabled", status.reasonText()); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketFrameAggregatorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketFrameAggregatorTest.java index 007cbe616c6..2339db61c48 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketFrameAggregatorTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketFrameAggregatorTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,8 +21,13 @@ import io.netty.handler.codec.TooLongFrameException; import io.netty.util.CharsetUtil; import io.netty.util.ReferenceCountUtil; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class WebSocketFrameAggregatorTest { @@ -46,29 +51,29 @@ public void testAggregationBinary() { channel.writeInbound(new PongWebSocketFrame(Unpooled.wrappedBuffer(content1))); channel.writeInbound(new ContinuationWebSocketFrame(true, 0, Unpooled.wrappedBuffer(content3))); - Assert.assertTrue(channel.finish()); + assertTrue(channel.finish()); BinaryWebSocketFrame frame = channel.readInbound(); - Assert.assertTrue(frame.isFinalFragment()); - Assert.assertEquals(1, frame.rsv()); - Assert.assertArrayEquals(content1, toBytes(frame.content())); + assertTrue(frame.isFinalFragment()); + assertEquals(1, frame.rsv()); + assertArrayEquals(content1, toBytes(frame.content())); PingWebSocketFrame frame2 = channel.readInbound(); - Assert.assertTrue(frame2.isFinalFragment()); - Assert.assertEquals(0, frame2.rsv()); - Assert.assertArrayEquals(content1, toBytes(frame2.content())); + assertTrue(frame2.isFinalFragment()); + assertEquals(0, frame2.rsv()); + assertArrayEquals(content1, toBytes(frame2.content())); PongWebSocketFrame frame3 = channel.readInbound(); - Assert.assertTrue(frame3.isFinalFragment()); - Assert.assertEquals(0, frame3.rsv()); - Assert.assertArrayEquals(content1, toBytes(frame3.content())); + assertTrue(frame3.isFinalFragment()); + assertEquals(0, frame3.rsv()); + assertArrayEquals(content1, toBytes(frame3.content())); BinaryWebSocketFrame frame4 = channel.readInbound(); - Assert.assertTrue(frame4.isFinalFragment()); - Assert.assertEquals(0, frame4.rsv()); - Assert.assertArrayEquals(aggregatedContent, toBytes(frame4.content())); + assertTrue(frame4.isFinalFragment()); + assertEquals(0, frame4.rsv()); + assertArrayEquals(aggregatedContent, toBytes(frame4.content())); - Assert.assertNull(channel.readInbound()); + assertNull(channel.readInbound()); } @Test @@ -81,29 +86,29 @@ public void testAggregationText() { channel.writeInbound(new PongWebSocketFrame(Unpooled.wrappedBuffer(content1))); channel.writeInbound(new ContinuationWebSocketFrame(true, 0, Unpooled.wrappedBuffer(content3))); - Assert.assertTrue(channel.finish()); + assertTrue(channel.finish()); TextWebSocketFrame frame = channel.readInbound(); - Assert.assertTrue(frame.isFinalFragment()); - Assert.assertEquals(1, frame.rsv()); - Assert.assertArrayEquals(content1, toBytes(frame.content())); + assertTrue(frame.isFinalFragment()); + assertEquals(1, frame.rsv()); + assertArrayEquals(content1, toBytes(frame.content())); PingWebSocketFrame frame2 = channel.readInbound(); - Assert.assertTrue(frame2.isFinalFragment()); - Assert.assertEquals(0, frame2.rsv()); - Assert.assertArrayEquals(content1, toBytes(frame2.content())); + assertTrue(frame2.isFinalFragment()); + assertEquals(0, frame2.rsv()); + assertArrayEquals(content1, toBytes(frame2.content())); PongWebSocketFrame frame3 = channel.readInbound(); - Assert.assertTrue(frame3.isFinalFragment()); - Assert.assertEquals(0, frame3.rsv()); - Assert.assertArrayEquals(content1, toBytes(frame3.content())); + assertTrue(frame3.isFinalFragment()); + assertEquals(0, frame3.rsv()); + assertArrayEquals(content1, toBytes(frame3.content())); TextWebSocketFrame frame4 = channel.readInbound(); - Assert.assertTrue(frame4.isFinalFragment()); - Assert.assertEquals(0, frame4.rsv()); - Assert.assertArrayEquals(aggregatedContent, toBytes(frame4.content())); + assertTrue(frame4.isFinalFragment()); + assertEquals(0, frame4.rsv()); + assertArrayEquals(aggregatedContent, toBytes(frame4.content())); - Assert.assertNull(channel.readInbound()); + assertNull(channel.readInbound()); } @Test @@ -113,7 +118,7 @@ public void textFrameTooBig() throws Exception { channel.writeInbound(new BinaryWebSocketFrame(false, 0, Unpooled.wrappedBuffer(content1))); try { channel.writeInbound(new ContinuationWebSocketFrame(false, 0, Unpooled.wrappedBuffer(content2))); - Assert.fail(); + fail(); } catch (TooLongFrameException e) { // expected } @@ -124,7 +129,7 @@ public void textFrameTooBig() throws Exception { channel.writeInbound(new BinaryWebSocketFrame(false, 0, Unpooled.wrappedBuffer(content1))); try { channel.writeInbound(new ContinuationWebSocketFrame(false, 0, Unpooled.wrappedBuffer(content2))); - Assert.fail(); + fail(); } catch (TooLongFrameException e) { // expected } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeExceptionTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeExceptionTest.java new file mode 100644 index 00000000000..e9ec9d7b860 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeExceptionTest.java @@ -0,0 +1,76 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.DefaultHttpResponse; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpResponse; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpVersion; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +public class WebSocketHandshakeExceptionTest { + + @Test + public void testClientExceptionWithoutResponse() { + WebSocketClientHandshakeException clientException = new WebSocketClientHandshakeException("client message"); + + assertNull(clientException.response()); + assertEquals("client message", clientException.getMessage()); + } + + @Test + public void testClientExceptionWithResponse() { + HttpResponse httpResponse = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.BAD_REQUEST); + httpResponse.headers().set("x-header", "x-value"); + WebSocketClientHandshakeException clientException = new WebSocketClientHandshakeException("client message", + httpResponse); + + assertNotNull(clientException.response()); + assertEquals("client message", clientException.getMessage()); + assertEquals(HttpResponseStatus.BAD_REQUEST, clientException.response().status()); + assertEquals(httpResponse.headers(), clientException.response().headers()); + } + + @Test + public void testServerExceptionWithoutRequest() { + WebSocketServerHandshakeException serverException = new WebSocketServerHandshakeException("server message"); + + assertNull(serverException.request()); + assertEquals("server message", serverException.getMessage()); + } + + @Test + public void testClientExceptionWithRequest() { + HttpRequest httpRequest = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, + "ws://localhost:9999/ws"); + httpRequest.headers().set("x-header", "x-value"); + WebSocketServerHandshakeException serverException = new WebSocketServerHandshakeException("server message", + httpRequest); + + assertNotNull(serverException.request()); + assertEquals("server message", serverException.getMessage()); + assertEquals(HttpMethod.GET, serverException.request().method()); + assertEquals(httpRequest.headers(), serverException.request().headers()); + assertEquals(httpRequest.uri(), serverException.request().uri()); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeHandOverTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeHandOverTest.java index 10ad7749845..05157408963 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeHandOverTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketHandshakeHandOverTest.java @@ -5,7 +5,7 @@ * 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,17 +21,25 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.SimpleChannelInboundHandler; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.EmptyHttpHeaders; import io.netty.handler.codec.http.HttpClientCodec; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpServerCodec; import io.netty.handler.codec.http.websocketx.WebSocketClientProtocolHandler.ClientHandshakeStateEvent; import io.netty.handler.codec.http.websocketx.WebSocketServerProtocolHandler.ServerHandshakeStateEvent; -import org.junit.Before; -import org.junit.Test; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.Timeout; import java.net.URI; +import java.util.concurrent.CompletionException; +import java.util.concurrent.TimeUnit; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class WebSocketHandshakeHandOverTest { @@ -39,13 +47,38 @@ public class WebSocketHandshakeHandOverTest { private WebSocketServerProtocolHandler.HandshakeComplete serverHandshakeComplete; private boolean clientReceivedHandshake; private boolean clientReceivedMessage; + private boolean serverReceivedCloseHandshake; + private boolean clientForceClosed; + private boolean clientHandshakeTimeout; - @Before + private final class CloseNoOpServerProtocolHandler extends WebSocketServerProtocolHandler { + CloseNoOpServerProtocolHandler(String websocketPath) { + super(WebSocketServerProtocolConfig.newBuilder() + .websocketPath(websocketPath) + .allowExtensions(false) + .sendCloseFrame(null) + .build()); + } + + @Override + protected void decode(ChannelHandlerContext ctx, WebSocketFrame frame) throws Exception { + if (frame instanceof CloseWebSocketFrame) { + serverReceivedCloseHandshake = true; + return; + } + super.decode(ctx, frame); + } + } + + @BeforeEach public void setUp() { serverReceivedHandshake = false; serverHandshakeComplete = null; clientReceivedHandshake = false; clientReceivedMessage = false; + serverReceivedCloseHandshake = false; + clientForceClosed = false; + clientHandshakeTimeout = false; } @Test @@ -62,7 +95,7 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { } } @Override - protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { + protected void messageReceived(ChannelHandlerContext ctx, Object msg) throws Exception { } }); @@ -74,7 +107,7 @@ public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { } } @Override - protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Exception { + protected void messageReceived(ChannelHandlerContext ctx, Object msg) throws Exception { if (msg instanceof TextWebSocketFrame) { clientReceivedMessage = true; } @@ -95,6 +128,156 @@ protected void channelRead0(ChannelHandlerContext ctx, Object msg) throws Except assertTrue(clientReceivedMessage); } + @Test + public void testClientHandshakeTimeout() throws Throwable { + EmbeddedChannel serverChannel = createServerChannel(new SimpleChannelInboundHandler() { + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { + if (evt == ServerHandshakeStateEvent.HANDSHAKE_COMPLETE) { + serverReceivedHandshake = true; + // immediately send a message to the client on connect + ctx.writeAndFlush(new TextWebSocketFrame("abc")); + } else if (evt instanceof WebSocketServerProtocolHandler.HandshakeComplete) { + serverHandshakeComplete = (WebSocketServerProtocolHandler.HandshakeComplete) evt; + } + } + + @Override + protected void messageReceived(ChannelHandlerContext ctx, Object msg) throws Exception { + } + }); + + EmbeddedChannel clientChannel = createClientChannel(new SimpleChannelInboundHandler() { + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { + if (evt == ClientHandshakeStateEvent.HANDSHAKE_COMPLETE) { + clientReceivedHandshake = true; + } else if (evt == ClientHandshakeStateEvent.HANDSHAKE_TIMEOUT) { + clientHandshakeTimeout = true; + } + } + + @Override + protected void messageReceived(ChannelHandlerContext ctx, Object msg) throws Exception { + if (msg instanceof TextWebSocketFrame) { + clientReceivedMessage = true; + } + } + }, 100); + // Client send the handshake request to server + transferAllDataWithMerge(clientChannel, serverChannel); + // Server do not send the response back + // transferAllDataWithMerge(serverChannel, clientChannel); + WebSocketClientProtocolHandshakeHandler handshakeHandler = + (WebSocketClientProtocolHandshakeHandler) clientChannel + .pipeline().get(WebSocketClientProtocolHandshakeHandler.class.getName()); + + while (!handshakeHandler.getHandshakeFuture().isDone()) { + Thread.sleep(10); + // We need to run all pending tasks as the handshake timeout is scheduled on the EventLoop. + clientChannel.runScheduledPendingTasks(); + } + assertTrue(clientHandshakeTimeout); + assertFalse(clientReceivedHandshake); + assertFalse(clientReceivedMessage); + // Should throw WebSocketHandshakeException + try { + assertTrue(assertThrows(CompletionException.class, + () -> handshakeHandler.getHandshakeFuture().syncUninterruptibly()) + .getCause() instanceof WebSocketHandshakeException); + } finally { + serverChannel.finishAndReleaseAll(); + } + } + + /** + * Tests a scenario when channel is closed while the handshake is in progress. Validates that the handshake + * future is notified in such cases. + */ + @Test + public void testHandshakeFutureIsNotifiedOnChannelClose() throws Exception { + EmbeddedChannel clientChannel = createClientChannel(null); + EmbeddedChannel serverChannel = createServerChannel(null); + + try { + // Start handshake from client to server but don't complete the handshake for the purpose of this test. + transferAllDataWithMerge(clientChannel, serverChannel); + + final WebSocketClientProtocolHandler clientWsHandler = + clientChannel.pipeline().get(WebSocketClientProtocolHandler.class); + final WebSocketClientProtocolHandshakeHandler clientWsHandshakeHandler = + clientChannel.pipeline().get(WebSocketClientProtocolHandshakeHandler.class); + + final ChannelHandlerContext ctx = clientChannel.pipeline().context(WebSocketClientProtocolHandler.class); + + // Close the channel while the handshake is in progress. The channel could be closed before the handshake is + // complete due to a number of varied reasons. To reproduce the test scenario for this test case, + // we would manually close the channel. + clientWsHandler.close(ctx); + + // At this stage handshake is incomplete but the handshake future should be completed exceptionally since + // channel is closed. + assertTrue(clientWsHandshakeHandler.getHandshakeFuture().isDone()); + } finally { + serverChannel.finishAndReleaseAll(); + clientChannel.finishAndReleaseAll(); + } + } + + @Test + @Timeout(value = 10000, unit = TimeUnit.MILLISECONDS) + public void testClientHandshakerForceClose() throws Exception { + final WebSocketClientHandshaker handshaker = WebSocketClientHandshakerFactory.newHandshaker( + new URI("ws://localhost:1234/test"), WebSocketVersion.V13, null, true, + EmptyHttpHeaders.INSTANCE, Integer.MAX_VALUE, true, false, 20); + + EmbeddedChannel serverChannel = createServerChannel( + new CloseNoOpServerProtocolHandler("/test"), + new SimpleChannelInboundHandler() { + @Override + protected void messageReceived(ChannelHandlerContext ctx, Object msg) throws Exception { + } + }); + + EmbeddedChannel clientChannel = createClientChannel(handshaker, new SimpleChannelInboundHandler() { + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { + if (evt == ClientHandshakeStateEvent.HANDSHAKE_COMPLETE) { + ctx.channel().closeFuture().addListener(future -> clientForceClosed = true); + handshaker.close(ctx.channel(), new CloseWebSocketFrame()); + } + } + @Override + protected void messageReceived(ChannelHandlerContext ctx, Object msg) throws Exception { + } + }); + + // Transfer the handshake from the client to the server + transferAllDataWithMerge(clientChannel, serverChannel); + // Transfer the handshake from the server to client + transferAllDataWithMerge(serverChannel, clientChannel); + + // Transfer closing handshake + transferAllDataWithMerge(clientChannel, serverChannel); + assertTrue(serverReceivedCloseHandshake); + // Should not be closed yet as we disabled closing the connection on the server + assertFalse(clientForceClosed); + + while (!clientForceClosed) { + Thread.sleep(10); + // We need to run all pending tasks as the force close timeout is scheduled on the EventLoop. + clientChannel.runPendingTasks(); + } + + // clientForceClosed would be set to TRUE after any close, + // so check here that force close timeout was actually fired + assertTrue(handshaker.isForceCloseComplete()); + + // Both should be empty + assertFalse(serverChannel.finishAndReleaseAll()); + assertFalse(clientChannel.finishAndReleaseAll()); + } + /** * Transfers all pending data from the source channel into the destination channel.
    * Merges all data into a single buffer before transmission into the destination. @@ -128,20 +311,50 @@ private static void transferAllDataWithMerge(EmbeddedChannel srcChannel, Embedde } private static EmbeddedChannel createClientChannel(ChannelHandler handler) throws Exception { + return createClientChannel(handler, WebSocketClientProtocolConfig.newBuilder() + .webSocketUri("ws://localhost:1234/test") + .subprotocol("test-proto-2") + .build()); + } + + private static EmbeddedChannel createClientChannel(ChannelHandler handler, long timeoutMillis) throws Exception { + return createClientChannel(handler, WebSocketClientProtocolConfig.newBuilder() + .webSocketUri("ws://localhost:1234/test") + .subprotocol("test-proto-2") + .handshakeTimeoutMillis(timeoutMillis) + .build()); + } + + private static EmbeddedChannel createClientChannel(ChannelHandler handler, WebSocketClientProtocolConfig config) { return new EmbeddedChannel( new HttpClientCodec(), new HttpObjectAggregator(8192), - new WebSocketClientProtocolHandler(new URI("ws://localhost:1234/test"), - WebSocketVersion.V13, "test-proto-2", - false, null, 65536), + new WebSocketClientProtocolHandler(config), + handler); + } + + private static EmbeddedChannel createClientChannel(WebSocketClientHandshaker handshaker, + ChannelHandler handler) throws Exception { + return new EmbeddedChannel( + new HttpClientCodec(), + new HttpObjectAggregator(8192), + // Note that we're switching off close frames handling on purpose to test forced close on timeout. + new WebSocketClientProtocolHandler(handshaker, false, false), handler); } private static EmbeddedChannel createServerChannel(ChannelHandler handler) { + return createServerChannel( + new WebSocketServerProtocolHandler("/test", "test-proto-1, test-proto-2", false), + handler); + } + + private static EmbeddedChannel createServerChannel(WebSocketServerProtocolHandler webSocketHandler, + ChannelHandler handler) { return new EmbeddedChannel( new HttpServerCodec(), new HttpObjectAggregator(8192), - new WebSocketServerProtocolHandler("/test", "test-proto-1, test-proto-2", false), + webSocketHandler, handler); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketProtocolHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketProtocolHandlerTest.java new file mode 100644 index 00000000000..b361ff98d96 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketProtocolHandlerTest.java @@ -0,0 +1,189 @@ +/* + * Copyright 2018 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ + +package io.netty.handler.codec.http.websocketx; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.flow.FlowControlHandler; +import io.netty.util.ReferenceCountUtil; +import io.netty.util.concurrent.Future; +import org.hamcrest.Matchers; +import org.junit.jupiter.api.Test; + +import java.util.concurrent.atomic.AtomicReference; + +import static io.netty.util.CharsetUtil.UTF_8; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +/** + * Tests common, abstract class functionality in {@link WebSocketClientProtocolHandler}. + */ +public class WebSocketProtocolHandlerTest { + + @Test + public void testPingFrame() { + ByteBuf pingData = Unpooled.copiedBuffer("Hello, world", UTF_8); + EmbeddedChannel channel = new EmbeddedChannel(new WebSocketProtocolHandler() { }); + + PingWebSocketFrame inputMessage = new PingWebSocketFrame(pingData); + assertFalse(channel.writeInbound(inputMessage)); // the message was not propagated inbound + + // a Pong frame was written to the channel + PongWebSocketFrame response = channel.readOutbound(); + assertEquals(pingData, response.content()); + + pingData.release(); + assertFalse(channel.finish()); + } + + @Test + public void testPingPongFlowControlWhenAutoReadIsDisabled() { + String text1 = "Hello, world #1"; + String text2 = "Hello, world #2"; + String text3 = "Hello, world #3"; + String text4 = "Hello, world #4"; + + EmbeddedChannel channel = new EmbeddedChannel(); + channel.config().setAutoRead(false); + channel.pipeline().addLast(new FlowControlHandler()); + channel.pipeline().addLast(new WebSocketProtocolHandler() { }); + + // When + assertFalse(channel.writeInbound( + new PingWebSocketFrame(Unpooled.copiedBuffer(text1, UTF_8)), + new TextWebSocketFrame(text2), + new TextWebSocketFrame(text3), + new PingWebSocketFrame(Unpooled.copiedBuffer(text4, UTF_8)) + )); + + // Then - no messages were handled or propagated + assertNull(channel.readInbound()); + assertNull(channel.readOutbound()); + + // When + channel.read(); + + // Then - pong frame was written to the outbound + PongWebSocketFrame response1 = channel.readOutbound(); + assertEquals(text1, response1.content().toString(UTF_8)); + + // And - one requested message was handled and propagated inbound + TextWebSocketFrame message2 = channel.readInbound(); + assertEquals(text2, message2.text()); + + // And - no more messages were handled or propagated + assertNull(channel.readInbound()); + assertNull(channel.readOutbound()); + + // When + channel.read(); + + // Then - one requested message was handled and propagated inbound + TextWebSocketFrame message3 = channel.readInbound(); + assertEquals(text3, message3.text()); + + // And - no more messages were handled or propagated + // Precisely, ping frame 'text4' was NOT read or handled. + // It would be handle ONLY on the next 'channel.read()' call. + assertNull(channel.readInbound()); + assertNull(channel.readOutbound()); + + // Cleanup + response1.release(); + message2.release(); + message3.release(); + assertFalse(channel.finish()); + } + + @Test + public void testPongFrameDropFrameFalse() { + EmbeddedChannel channel = new EmbeddedChannel(new WebSocketProtocolHandler(false) { }); + + PongWebSocketFrame pingResponse = new PongWebSocketFrame(); + assertTrue(channel.writeInbound(pingResponse)); + + assertPropagatedInbound(pingResponse, channel); + + pingResponse.release(); + assertFalse(channel.finish()); + } + + @Test + public void testPongFrameDropFrameTrue() { + EmbeddedChannel channel = new EmbeddedChannel(new WebSocketProtocolHandler(true) { }); + + PongWebSocketFrame pingResponse = new PongWebSocketFrame(); + assertFalse(channel.writeInbound(pingResponse)); // message was not propagated inbound + } + + @Test + public void testTextFrame() { + EmbeddedChannel channel = new EmbeddedChannel(new WebSocketProtocolHandler() { }); + + TextWebSocketFrame textFrame = new TextWebSocketFrame(); + assertTrue(channel.writeInbound(textFrame)); + + assertPropagatedInbound(textFrame, channel); + + textFrame.release(); + assertFalse(channel.finish()); + } + + @Test + public void testTimeout() throws Exception { + final AtomicReference> ref = new AtomicReference<>(); + WebSocketProtocolHandler handler = new WebSocketProtocolHandler( + false, WebSocketCloseStatus.NORMAL_CLOSURE, 1) { }; + EmbeddedChannel channel = new EmbeddedChannel(new ChannelHandler() { + @Override + public Future write(ChannelHandlerContext ctx, Object msg) { + Future future = ctx.newPromise(); + ref.set(future); + ReferenceCountUtil.release(msg); + return future; + } + }, handler); + + Future future = channel.writeAndFlush(new CloseWebSocketFrame()); + ChannelHandlerContext ctx = channel.pipeline().context(WebSocketProtocolHandler.class); + handler.close(ctx); + + do { + Thread.sleep(10); + channel.runPendingTasks(); + } while (!future.isDone()); + + assertThat(future.cause(), Matchers.instanceOf(WebSocketHandshakeException.class)); + assertFalse(ref.get().isDone()); + assertFalse(channel.finish()); + } + + /** + * Asserts that a message was propagated inbound through the channel. + */ + private static void assertPropagatedInbound(T message, EmbeddedChannel channel) { + T propagatedResponse = channel.readInbound(); + assertEquals(message, propagatedResponse); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketRequestBuilder.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketRequestBuilder.java index fd199b864fb..206db33aff0 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketRequestBuilder.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketRequestBuilder.java @@ -5,7 +5,7 @@ * 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -138,7 +138,11 @@ public FullHttpRequest build() { headers.set(HttpHeaderNames.SEC_WEBSOCKET_KEY, key); } if (origin != null) { - headers.set(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, origin); + if (version == WebSocketVersion.V13 || version == WebSocketVersion.V00) { + headers.set(HttpHeaderNames.ORIGIN, origin); + } else { + headers.set(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, origin); + } } if (version != null) { headers.set(HttpHeaderNames.SEC_WEBSOCKET_VERSION, version.toHttpHeaderValue()); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker00Test.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker00Test.java index 76826aba8b0..2283dc1b729 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker00Test.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker00Test.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,7 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; @@ -29,12 +30,25 @@ import io.netty.handler.codec.http.HttpResponseEncoder; import io.netty.handler.codec.http.LastHttpContent; import io.netty.util.CharsetUtil; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static io.netty.handler.codec.http.HttpVersion.*; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.fail; -public class WebSocketServerHandshaker00Test { +public class WebSocketServerHandshaker00Test extends WebSocketServerHandshakerTest { + + @Override + protected WebSocketServerHandshaker newHandshaker(String webSocketURL, String subprotocols, + WebSocketDecoderConfig decoderConfig) { + return new WebSocketServerHandshaker00(webSocketURL, subprotocols, decoderConfig); + } + + @Override + protected WebSocketVersion webSocketVersion() { + return WebSocketVersion.V00; + } @Test public void testPerformOpeningHandshake() { @@ -46,6 +60,34 @@ public void testPerformOpeningHandshakeSubProtocolNotSupported() { testPerformOpeningHandshake0(false); } + @Test + public void testPerformHandshakeWithoutOriginHeader() { + EmbeddedChannel ch = new EmbeddedChannel( + new HttpObjectAggregator(42), new HttpRequestDecoder(), new HttpResponseEncoder()); + + FullHttpRequest req = new DefaultFullHttpRequest( + HTTP_1_1, HttpMethod.GET, "/chat", Unpooled.copiedBuffer("^n:ds[4U", CharsetUtil.US_ASCII)); + + req.headers().set(HttpHeaderNames.HOST, "server.example.com"); + req.headers().set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET); + req.headers().set(HttpHeaderNames.CONNECTION, "Upgrade"); + req.headers().set(HttpHeaderNames.SEC_WEBSOCKET_KEY1, "4 @1 46546xW%0l 1 5"); + req.headers().set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, "chat, superchat"); + + WebSocketServerHandshaker00 handshaker00 = new WebSocketServerHandshaker00( + "ws://example.com/chat", "chat", Integer.MAX_VALUE); + try { + handshaker00.handshake(ch, req); + fail("Expecting WebSocketHandshakeException"); + } catch (WebSocketHandshakeException e) { + assertEquals("Missing origin header, got only " + + "[host, upgrade, connection, sec-websocket-key1, sec-websocket-protocol]", + e.getMessage()); + } finally { + req.release(); + } + } + private static void testPerformOpeningHandshake0(boolean subProtocol) { EmbeddedChannel ch = new EmbeddedChannel( new HttpObjectAggregator(42), new HttpRequestDecoder(), new HttpResponseEncoder()); @@ -70,19 +112,19 @@ private static void testPerformOpeningHandshake0(boolean subProtocol) { } EmbeddedChannel ch2 = new EmbeddedChannel(new HttpResponseDecoder()); - ch2.writeInbound(ch.readOutbound()); + ch2.writeInbound((ByteBuf) ch.readOutbound()); HttpResponse res = ch2.readInbound(); - Assert.assertEquals("ws://example.com/chat", res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_LOCATION)); + assertEquals("ws://example.com/chat", res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_LOCATION)); if (subProtocol) { - Assert.assertEquals("chat", res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); + assertEquals("chat", res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); } else { - Assert.assertNull(res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); + assertNull(res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); } LastHttpContent content = ch2.readInbound(); - Assert.assertEquals("8jKS'y:G*Co,Wxa-", content.content().toString(CharsetUtil.US_ASCII)); + assertEquals("8jKS'y:G*Co,Wxa-", content.content().toString(CharsetUtil.US_ASCII)); content.release(); req.release(); } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker07Test.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker07Test.java new file mode 100644 index 00000000000..0a13f429150 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker07Test.java @@ -0,0 +1,30 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +public class WebSocketServerHandshaker07Test extends WebSocketServerHandshakerTest { + + @Override + protected WebSocketServerHandshaker newHandshaker(String webSocketURL, String subprotocols, + WebSocketDecoderConfig decoderConfig) { + return new WebSocketServerHandshaker07(webSocketURL, subprotocols, decoderConfig); + } + + @Override + protected WebSocketVersion webSocketVersion() { + return WebSocketVersion.V07; + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker08Test.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker08Test.java index 2c8cc6fc255..153d918759b 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker08Test.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker08Test.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -28,12 +28,24 @@ import io.netty.handler.codec.http.HttpResponseDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; import io.netty.util.ReferenceCountUtil; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static io.netty.handler.codec.http.HttpVersion.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNull; -public class WebSocketServerHandshaker08Test { +public class WebSocketServerHandshaker08Test extends WebSocketServerHandshakerTest { + + @Override + protected WebSocketServerHandshaker newHandshaker(String webSocketURL, String subprotocols, + WebSocketDecoderConfig decoderConfig) { + return new WebSocketServerHandshaker08(webSocketURL, subprotocols, decoderConfig); + } + + @Override + protected WebSocketVersion webSocketVersion() { + return WebSocketVersion.V08; + } @Test public void testPerformOpeningHandshake() { @@ -72,12 +84,12 @@ private static void testPerformOpeningHandshake0(boolean subProtocol) { ch2.writeInbound(resBuf); HttpResponse res = ch2.readInbound(); - Assert.assertEquals( + assertEquals( "s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT)); if (subProtocol) { - Assert.assertEquals("chat", res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); + assertEquals("chat", res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); } else { - Assert.assertNull(res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); + assertNull(res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); } ReferenceCountUtil.release(res); req.release(); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker13Test.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker13Test.java index b66851e851e..5dc6590a7f5 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker13Test.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshaker13Test.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,6 +16,8 @@ package io.netty.handler.codec.http.websocketx; import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelHandler; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; @@ -27,13 +29,33 @@ import io.netty.handler.codec.http.HttpResponse; import io.netty.handler.codec.http.HttpResponseDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.codec.http.HttpServerCodec; import io.netty.util.ReferenceCountUtil; -import org.junit.Assert; -import org.junit.Test; +import io.netty.util.ReferenceCounted; +import org.hamcrest.CoreMatchers; +import org.junit.jupiter.api.Test; + +import java.util.Iterator; import static io.netty.handler.codec.http.HttpVersion.*; +import static org.hamcrest.MatcherAssert.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.fail; + +public class WebSocketServerHandshaker13Test extends WebSocketServerHandshakerTest { -public class WebSocketServerHandshaker13Test { + @Override + protected WebSocketServerHandshaker newHandshaker(String webSocketURL, String subprotocols, + WebSocketDecoderConfig decoderConfig) { + return new WebSocketServerHandshaker13(webSocketURL, subprotocols, decoderConfig); + } + + @Override + protected WebSocketVersion webSocketVersion() { + return WebSocketVersion.V13; + } @Test public void testPerformOpeningHandshake() { @@ -47,8 +69,50 @@ public void testPerformOpeningHandshakeSubProtocolNotSupported() { private static void testPerformOpeningHandshake0(boolean subProtocol) { EmbeddedChannel ch = new EmbeddedChannel( - new HttpObjectAggregator(42), new HttpRequestDecoder(), new HttpResponseEncoder()); + new HttpObjectAggregator(42), new HttpResponseEncoder(), new HttpRequestDecoder()); + + if (subProtocol) { + testUpgrade0(ch, new WebSocketServerHandshaker13( + "ws://example.com/chat", "chat", false, Integer.MAX_VALUE, false)); + } else { + testUpgrade0(ch, new WebSocketServerHandshaker13( + "ws://example.com/chat", null, false, Integer.MAX_VALUE, false)); + } + assertFalse(ch.finish()); + } + + @Test + public void testCloseReasonWithEncoderAndDecoder() { + testCloseReason0(new HttpResponseEncoder(), new HttpRequestDecoder()); + } + @Test + public void testCloseReasonWithCodec() { + testCloseReason0(new HttpServerCodec()); + } + + private static void testCloseReason0(ChannelHandler... handlers) { + EmbeddedChannel ch = new EmbeddedChannel( + new HttpObjectAggregator(42)); + ch.pipeline().addLast(handlers); + testUpgrade0(ch, new WebSocketServerHandshaker13("ws://example.com/chat", "chat", + WebSocketDecoderConfig.newBuilder().maxFramePayloadLength(4).closeOnProtocolViolation(true).build())); + + ch.writeOutbound(new BinaryWebSocketFrame(Unpooled.wrappedBuffer(new byte[8]))); + ByteBuf buffer = ch.readOutbound(); + try { + ch.writeInbound(buffer); + fail(); + } catch (CorruptedWebSocketFrameException expected) { + // expected + } + ReferenceCounted closeMessage = ch.readOutbound(); + assertThat(closeMessage, CoreMatchers.instanceOf(ByteBuf.class)); + closeMessage.release(); + assertFalse(ch.finish()); + } + + private static void testUpgrade0(EmbeddedChannel ch, WebSocketServerHandshaker13 handshaker) { FullHttpRequest req = new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, "/chat"); req.headers().set(HttpHeaderNames.HOST, "server.example.com"); req.headers().set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET); @@ -58,13 +122,7 @@ private static void testPerformOpeningHandshake0(boolean subProtocol) { req.headers().set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, "chat, superchat"); req.headers().set(HttpHeaderNames.SEC_WEBSOCKET_VERSION, "13"); - if (subProtocol) { - new WebSocketServerHandshaker13( - "ws://example.com/chat", "chat", false, Integer.MAX_VALUE, false).handshake(ch, req); - } else { - new WebSocketServerHandshaker13( - "ws://example.com/chat", null, false, Integer.MAX_VALUE, false).handshake(ch, req); - } + handshaker.handshake(ch, req); ByteBuf resBuf = ch.readOutbound(); @@ -72,12 +130,14 @@ private static void testPerformOpeningHandshake0(boolean subProtocol) { ch2.writeInbound(resBuf); HttpResponse res = ch2.readInbound(); - Assert.assertEquals( + assertEquals( "s3pPLMBiTxaQ9kYGzzhZRbK+xOo=", res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT)); - if (subProtocol) { - Assert.assertEquals("chat", res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); + Iterator subProtocols = handshaker.subprotocols().iterator(); + if (subProtocols.hasNext()) { + assertEquals(subProtocols.next(), + res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); } else { - Assert.assertNull(res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); + assertNull(res.headers().get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); } ReferenceCountUtil.release(res); req.release(); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerFactoryTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerFactoryTest.java index 787400bba3e..f978e92220e 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerFactoryTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerFactoryTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -22,9 +22,12 @@ import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http.HttpResponseStatus; import io.netty.util.ReferenceCountUtil; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; public class WebSocketServerHandshakerFactoryTest { diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerTest.java new file mode 100644 index 00000000000..754f17ca54b --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerHandshakerTest.java @@ -0,0 +1,113 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import io.netty.handler.codec.http.DefaultFullHttpRequest; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpHeaders; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpVersion; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public abstract class WebSocketServerHandshakerTest { + + protected abstract WebSocketServerHandshaker newHandshaker(String webSocketURL, String subprotocols, + WebSocketDecoderConfig decoderConfig); + + protected abstract WebSocketVersion webSocketVersion(); + + @Test + public void testDuplicateHandshakeResponseHeaders() { + WebSocketServerHandshaker serverHandshaker = newHandshaker("ws://example.com/chat", + "chat", WebSocketDecoderConfig.DEFAULT); + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/chat"); + request.headers() + .set(HttpHeaderNames.HOST, "example.com") + .set(HttpHeaderNames.ORIGIN, "example.com") + .set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) + .set(HttpHeaderNames.SEC_WEBSOCKET_KEY, "dGhlIHNhbXBsZSBub25jZQ==") + .set(HttpHeaderNames.SEC_WEBSOCKET_ORIGIN, "http://example.com") + .set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, "chat, superchat") + .set(HttpHeaderNames.WEBSOCKET_PROTOCOL, "chat, superchat") + .set(HttpHeaderNames.SEC_WEBSOCKET_VERSION, webSocketVersion().toAsciiString()); + HttpHeaders customResponseHeaders = new DefaultHttpHeaders(); + // set duplicate required headers and one custom + customResponseHeaders + .set(HttpHeaderNames.CONNECTION, HttpHeaderValues.UPGRADE) + .set(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET) + .set(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL, "superchat") + .set(HttpHeaderNames.WEBSOCKET_PROTOCOL, "superchat") + .set("custom", "header"); + + if (webSocketVersion() != WebSocketVersion.V00) { + customResponseHeaders.set(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT, "12345"); + } + + FullHttpResponse response = null; + try { + response = serverHandshaker.newHandshakeResponse(request, customResponseHeaders); + HttpHeaders responseHeaders = response.headers(); + + assertEquals(1, responseHeaders.getAll(HttpHeaderNames.CONNECTION).size()); + assertEquals(1, responseHeaders.getAll(HttpHeaderNames.UPGRADE).size()); + assertTrue(responseHeaders.containsValue("custom", "header", true)); + + if (webSocketVersion() != WebSocketVersion.V00) { + assertFalse(responseHeaders.containsValue(HttpHeaderNames.SEC_WEBSOCKET_ACCEPT, "12345", false)); + assertEquals(1, responseHeaders.getAll(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL).size()); + assertEquals("chat", responseHeaders.get(HttpHeaderNames.SEC_WEBSOCKET_PROTOCOL)); + } else { + assertEquals(1, responseHeaders.getAll(HttpHeaderNames.WEBSOCKET_PROTOCOL).size()); + assertEquals("chat", responseHeaders.get(HttpHeaderNames.WEBSOCKET_PROTOCOL)); + } + } finally { + request.release(); + if (response != null) { + response.release(); + } + } + } + + @Test + public void testWebSocketServerHandshakeException() { + WebSocketServerHandshaker serverHandshaker = newHandshaker("ws://example.com/chat", + "chat", WebSocketDecoderConfig.DEFAULT); + + FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, + "ws://example.com/chat"); + request.headers().set("x-client-header", "value"); + try { + serverHandshaker.handshake(null, request, null); + } catch (WebSocketServerHandshakeException exception) { + assertNotNull(exception.getMessage()); + assertEquals(request.headers(), exception.request().headers()); + assertEquals(HttpMethod.GET, exception.request().method()); + } finally { + request.release(); + } + } +} + diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java index 50bd6bebeaa..c1d327ec741 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketServerProtocolHandlerTest.java @@ -5,7 +5,7 @@ * 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,42 +15,50 @@ */ package io.netty.handler.codec.http.websocketx; +import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.channel.ChannelOutboundHandlerAdapter; -import io.netty.channel.ChannelPromise; import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.handler.codec.http.DefaultFullHttpRequest; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpClientCodec; +import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpRequestDecoder; import io.netty.handler.codec.http.HttpResponseEncoder; +import io.netty.handler.codec.http.HttpServerCodec; +import io.netty.util.CharsetUtil; import io.netty.util.ReferenceCountUtil; -import org.junit.Before; -import org.junit.Test; +import io.netty.util.concurrent.Future; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; import java.util.ArrayDeque; import java.util.Queue; -import static io.netty.handler.codec.http.HttpResponseStatus.*; -import static io.netty.handler.codec.http.HttpVersion.*; -import static org.junit.Assert.*; +import static io.netty.handler.codec.http.HttpResponseStatus.BAD_REQUEST; +import static io.netty.handler.codec.http.HttpResponseStatus.SWITCHING_PROTOCOLS; +import static io.netty.handler.codec.http.HttpVersion.HTTP_1_1; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class WebSocketServerProtocolHandlerTest { - private final Queue responses = new ArrayDeque(); + private final Queue responses = new ArrayDeque<>(); - @Before + @BeforeEach public void setUp() { responses.clear(); } @Test - public void testHttpUpgradeRequest() throws Exception { + public void testHttpUpgradeRequest() { EmbeddedChannel ch = createChannel(new MockOutboundHandler()); ChannelHandlerContext handshakerCtx = ch.pipeline().context(WebSocketServerProtocolHandshakeHandler.class); writeUpgradeRequest(ch); @@ -59,22 +67,29 @@ public void testHttpUpgradeRequest() throws Exception { assertEquals(SWITCHING_PROTOCOLS, response.status()); response.release(); assertNotNull(WebSocketServerProtocolHandler.getHandshaker(handshakerCtx.channel())); + assertFalse(ch.finish()); } @Test - public void testSubsequentHttpRequestsAfterUpgradeShouldReturn403() throws Exception { - EmbeddedChannel ch = createChannel(); - + public void testWebSocketServerProtocolHandshakeHandlerRemovedAfterHandshake() { + EmbeddedChannel ch = createChannel(new MockOutboundHandler()); + ChannelHandlerContext handshakerCtx = ch.pipeline().context(WebSocketServerProtocolHandshakeHandler.class); + ch.pipeline().addLast(new ChannelHandler() { + @Override + public void userEventTriggered(ChannelHandlerContext ctx, Object evt) { + if (evt instanceof WebSocketServerProtocolHandler.HandshakeComplete) { + // We should have removed the handler already. + ctx.executor().execute(() -> ctx.pipeline().context(WebSocketServerProtocolHandshakeHandler.class)); + } + } + }); writeUpgradeRequest(ch); FullHttpResponse response = responses.remove(); assertEquals(SWITCHING_PROTOCOLS, response.status()); response.release(); - - ch.writeInbound(new DefaultFullHttpRequest(HTTP_1_1, HttpMethod.GET, "/test")); - response = responses.remove(); - assertEquals(FORBIDDEN, response.status()); - response.release(); + assertNotNull(WebSocketServerProtocolHandler.getHandshaker(handshakerCtx.channel())); + assertFalse(ch.finish()); } @Test @@ -94,6 +109,7 @@ public void testHttpUpgradeRequestInvalidUpgradeHeader() { assertEquals(BAD_REQUEST, response.status()); assertEquals("not a WebSocket handshake request: missing upgrade", getResponseMessage(response)); response.release(); + assertFalse(ch.finish()); } @Test @@ -114,6 +130,49 @@ public void testHttpUpgradeRequestMissingWSKeyHeader() { assertEquals(BAD_REQUEST, response.status()); assertEquals("not a WebSocket request: missing key", getResponseMessage(response)); response.release(); + assertFalse(ch.finish()); + } + + @Test + public void testCreateUTF8Validator() { + WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder() + .websocketPath("/test") + .withUTF8Validator(true) + .build(); + + EmbeddedChannel ch = new EmbeddedChannel( + new WebSocketServerProtocolHandler(config), + new HttpRequestDecoder(), + new HttpResponseEncoder(), + new MockOutboundHandler()); + writeUpgradeRequest(ch); + + FullHttpResponse response = responses.remove(); + assertEquals(SWITCHING_PROTOCOLS, response.status()); + response.release(); + + assertNotNull(ch.pipeline().get(Utf8FrameValidator.class)); + } + + @Test + public void testDoNotCreateUTF8Validator() { + WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder() + .websocketPath("/test") + .withUTF8Validator(false) + .build(); + + EmbeddedChannel ch = new EmbeddedChannel( + new WebSocketServerProtocolHandler(config), + new HttpRequestDecoder(), + new HttpResponseEncoder(), + new MockOutboundHandler()); + writeUpgradeRequest(ch); + + FullHttpResponse response = responses.remove(); + assertEquals(SWITCHING_PROTOCOLS, response.status()); + response.release(); + + assertNull(ch.pipeline().get(Utf8FrameValidator.class)); } @Test @@ -122,6 +181,10 @@ public void testHandleTextFrame() { EmbeddedChannel ch = createChannel(customTextFrameHandler); writeUpgradeRequest(ch); + FullHttpResponse response = responses.remove(); + assertEquals(SWITCHING_PROTOCOLS, response.status()); + response.release(); + if (ch.pipeline().context(HttpRequestDecoder.class) != null) { // Removing the HttpRequestDecoder because we are writing a TextWebSocketFrame and thus // decoding is not necessary. @@ -131,6 +194,237 @@ public void testHandleTextFrame() { ch.writeInbound(new TextWebSocketFrame("payload")); assertEquals("processed: payload", customTextFrameHandler.getContent()); + assertFalse(ch.finish()); + } + + @Test + public void testCheckWebSocketPathStartWithSlash() { + WebSocketRequestBuilder builder = new WebSocketRequestBuilder().httpVersion(HTTP_1_1) + .method(HttpMethod.GET) + .key(HttpHeaderNames.SEC_WEBSOCKET_KEY) + .connection("Upgrade") + .upgrade(HttpHeaderValues.WEBSOCKET) + .version13(); + + WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder() + .websocketPath("/") + .checkStartsWith(true) + .build(); + + FullHttpResponse response; + + createChannel(config, null).writeInbound(builder.uri("/test").build()); + response = responses.remove(); + assertEquals(SWITCHING_PROTOCOLS, response.status()); + response.release(); + + createChannel(config, null).writeInbound(builder.uri("/?q=v").build()); + response = responses.remove(); + assertEquals(SWITCHING_PROTOCOLS, response.status()); + response.release(); + + createChannel(config, null).writeInbound(builder.uri("/").build()); + response = responses.remove(); + assertEquals(SWITCHING_PROTOCOLS, response.status()); + response.release(); + } + + @Test + public void testCheckValidWebSocketPath() { + HttpRequest httpRequest = new WebSocketRequestBuilder().httpVersion(HTTP_1_1) + .method(HttpMethod.GET) + .uri("/test") + .key(HttpHeaderNames.SEC_WEBSOCKET_KEY) + .connection("Upgrade") + .upgrade(HttpHeaderValues.WEBSOCKET) + .version13() + .build(); + + WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder() + .websocketPath("/test") + .checkStartsWith(true) + .build(); + + EmbeddedChannel ch = new EmbeddedChannel( + new WebSocketServerProtocolHandler(config), + new HttpRequestDecoder(), + new HttpResponseEncoder(), + new MockOutboundHandler()); + ch.writeInbound(httpRequest); + + FullHttpResponse response = responses.remove(); + assertEquals(SWITCHING_PROTOCOLS, response.status()); + response.release(); + } + + @Test + public void testCheckInvalidWebSocketPath() { + HttpRequest httpRequest = new WebSocketRequestBuilder().httpVersion(HTTP_1_1) + .method(HttpMethod.GET) + .uri("/testabc") + .key(HttpHeaderNames.SEC_WEBSOCKET_KEY) + .connection("Upgrade") + .upgrade(HttpHeaderValues.WEBSOCKET) + .version13() + .build(); + + WebSocketServerProtocolConfig config = WebSocketServerProtocolConfig.newBuilder() + .websocketPath("/test") + .checkStartsWith(true) + .build(); + + EmbeddedChannel ch = new EmbeddedChannel( + new WebSocketServerProtocolHandler(config), + new HttpRequestDecoder(), + new HttpResponseEncoder(), + new MockOutboundHandler()); + ch.writeInbound(httpRequest); + + ChannelHandlerContext handshakerCtx = ch.pipeline().context(WebSocketServerProtocolHandshakeHandler.class); + assertNull(WebSocketServerProtocolHandler.getHandshaker(handshakerCtx.channel())); + } + + @Test + public void testExplicitCloseFrameSentWhenServerChannelClosed() throws Exception { + WebSocketCloseStatus closeStatus = WebSocketCloseStatus.ENDPOINT_UNAVAILABLE; + EmbeddedChannel client = createClient(); + EmbeddedChannel server = createServer(); + + assertFalse(server.writeInbound((ByteBuf) client.readOutbound())); + assertFalse(client.writeInbound((ByteBuf) server.readOutbound())); + + // When server channel closed with explicit close-frame + assertTrue(server.writeOutbound(new CloseWebSocketFrame(closeStatus))); + server.close(); + + // Then client receives provided close-frame + assertTrue(client.writeInbound((ByteBuf) server.readOutbound())); + assertFalse(server.isOpen()); + + CloseWebSocketFrame closeMessage = client.readInbound(); + assertEquals(closeMessage.statusCode(), closeStatus.code()); + closeMessage.release(); + + client.close(); + assertTrue(ReferenceCountUtil.release(client.readOutbound())); + assertFalse(client.finishAndReleaseAll()); + assertFalse(server.finishAndReleaseAll()); + } + + @Test + public void testCloseFrameSentWhenServerChannelClosedSilently() throws Exception { + EmbeddedChannel client = createClient(); + EmbeddedChannel server = createServer(); + + assertFalse(server.writeInbound((ByteBuf) client.readOutbound())); + assertFalse(client.writeInbound((ByteBuf) server.readOutbound())); + + // When server channel closed without explicit close-frame + server.close(); + + // Then client receives NORMAL_CLOSURE close-frame + assertTrue(client.writeInbound((ByteBuf) server.readOutbound())); + assertFalse(server.isOpen()); + + CloseWebSocketFrame closeMessage = client.readInbound(); + assertEquals(closeMessage.statusCode(), WebSocketCloseStatus.NORMAL_CLOSURE.code()); + closeMessage.release(); + + client.close(); + assertTrue(ReferenceCountUtil.release(client.readOutbound())); + assertFalse(client.finishAndReleaseAll()); + assertFalse(server.finishAndReleaseAll()); + } + + @Test + public void testExplicitCloseFrameSentWhenClientChannelClosed() throws Exception { + WebSocketCloseStatus closeStatus = WebSocketCloseStatus.INVALID_PAYLOAD_DATA; + EmbeddedChannel client = createClient(); + EmbeddedChannel server = createServer(); + + assertFalse(server.writeInbound((ByteBuf) client.readOutbound())); + assertFalse(client.writeInbound((ByteBuf) server.readOutbound())); + + // When client channel closed with explicit close-frame + assertTrue(client.writeOutbound(new CloseWebSocketFrame(closeStatus))); + client.close(); + + // Then client receives provided close-frame + assertFalse(server.writeInbound((ByteBuf) client.readOutbound())); + assertFalse(client.isOpen()); + assertFalse(server.isOpen()); + + CloseWebSocketFrame closeMessage = decode(server.readOutbound(), CloseWebSocketFrame.class); + assertEquals(closeMessage.statusCode(), closeStatus.code()); + closeMessage.release(); + + assertFalse(client.finishAndReleaseAll()); + assertFalse(server.finishAndReleaseAll()); + } + + @Test + public void testCloseFrameSentWhenClientChannelClosedSilently() throws Exception { + EmbeddedChannel client = createClient(); + EmbeddedChannel server = createServer(); + + assertFalse(server.writeInbound((ByteBuf) client.readOutbound())); + assertFalse(client.writeInbound((ByteBuf) server.readOutbound())); + + // When client channel closed without explicit close-frame + client.close(); + + // Then server receives NORMAL_CLOSURE close-frame + assertFalse(server.writeInbound((ByteBuf) client.readOutbound())); + assertFalse(client.isOpen()); + assertFalse(server.isOpen()); + + CloseWebSocketFrame closeMessage = decode(server.readOutbound(), CloseWebSocketFrame.class); + assertEquals(closeMessage, new CloseWebSocketFrame(WebSocketCloseStatus.NORMAL_CLOSURE)); + closeMessage.release(); + + assertFalse(client.finishAndReleaseAll()); + assertFalse(server.finishAndReleaseAll()); + } + + private EmbeddedChannel createClient(ChannelHandler... handlers) throws Exception { + WebSocketClientProtocolConfig clientConfig = WebSocketClientProtocolConfig.newBuilder() + .webSocketUri("http://test/test") + .dropPongFrames(false) + .handleCloseFrames(false) + .build(); + EmbeddedChannel ch = new EmbeddedChannel(false, false, + new HttpClientCodec(), + new HttpObjectAggregator(8192), + new WebSocketClientProtocolHandler(clientConfig) + ); + ch.pipeline().addLast(handlers); + ch.register(); + return ch; + } + + private EmbeddedChannel createServer(ChannelHandler... handlers) throws Exception { + WebSocketServerProtocolConfig serverConfig = WebSocketServerProtocolConfig.newBuilder() + .websocketPath("/test") + .dropPongFrames(false) + .build(); + EmbeddedChannel ch = new EmbeddedChannel(false, false, + new HttpServerCodec(), + new HttpObjectAggregator(8192), + new WebSocketServerProtocolHandler(serverConfig) + ); + ch.pipeline().addLast(handlers); + ch.register(); + return ch; + } + + @SuppressWarnings("SameParameterValue") + private T decode(ByteBuf input, Class clazz) { + EmbeddedChannel ch = new EmbeddedChannel(new WebSocket13FrameDecoder(true, false, 65536, true)); + assertTrue(ch.writeInbound(input)); + Object decoded = ch.readInbound(); + assertNotNull(decoded); + assertFalse(ch.finish()); + return clazz.cast(decoded); } private EmbeddedChannel createChannel() { @@ -138,8 +432,16 @@ private EmbeddedChannel createChannel() { } private EmbeddedChannel createChannel(ChannelHandler handler) { + WebSocketServerProtocolConfig serverConfig = WebSocketServerProtocolConfig.newBuilder() + .websocketPath("/test") + .sendCloseFrame(null) + .build(); + return createChannel(serverConfig, handler); + } + + private EmbeddedChannel createChannel(WebSocketServerProtocolConfig serverConfig, ChannelHandler handler) { return new EmbeddedChannel( - new WebSocketServerProtocolHandler("/test", null, false), + new WebSocketServerProtocolHandler(serverConfig), new HttpRequestDecoder(), new HttpResponseEncoder(), new MockOutboundHandler(), @@ -151,27 +453,27 @@ private static void writeUpgradeRequest(EmbeddedChannel ch) { } private static String getResponseMessage(FullHttpResponse response) { - return new String(response.content().array()); + return response.content().toString(CharsetUtil.UTF_8); } - private class MockOutboundHandler extends ChannelOutboundHandlerAdapter { + private class MockOutboundHandler implements ChannelHandler { @Override - public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) throws Exception { + public Future write(ChannelHandlerContext ctx, Object msg) { responses.add((FullHttpResponse) msg); - promise.setSuccess(); + return ctx.newSucceededFuture(); } @Override - public void flush(ChannelHandlerContext ctx) throws Exception { + public void flush(ChannelHandlerContext ctx) { } } - private static class CustomTextFrameHandler extends ChannelInboundHandlerAdapter { + private static class CustomTextFrameHandler implements ChannelHandler { private String content; @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { + public void channelRead(ChannelHandlerContext ctx, Object msg) { assertNull(content); content = "processed: " + ((TextWebSocketFrame) msg).text(); ReferenceCountUtil.release(msg); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketUtf8FrameValidatorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketUtf8FrameValidatorTest.java new file mode 100644 index 00000000000..42e900269ae --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketUtf8FrameValidatorTest.java @@ -0,0 +1,63 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.CorruptedFrameException; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; + +public class WebSocketUtf8FrameValidatorTest { + + @Test + public void testCorruptedFrameExceptionInFinish() { + assertCorruptedFrameExceptionHandling(new byte[]{-50}); + } + + @Test + public void testCorruptedFrameExceptionInCheck() { + assertCorruptedFrameExceptionHandling(new byte[]{-8, -120, -128, -128, -128}); + } + + private void assertCorruptedFrameExceptionHandling(byte[] data) { + EmbeddedChannel channel = new EmbeddedChannel(new Utf8FrameValidator()); + TextWebSocketFrame frame = new TextWebSocketFrame(Unpooled.copiedBuffer(data)); + try { + channel.writeInbound(frame); + fail(); + } catch (CorruptedFrameException e) { + // expected exception + } + assertTrue(channel.finish()); + ByteBuf buf = channel.readOutbound(); + assertNotNull(buf); + try { + assertFalse(buf.isReadable()); + } finally { + buf.release(); + } + assertNull(channel.readOutbound()); + assertEquals(0, frame.refCnt()); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketUtilTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketUtilTest.java new file mode 100644 index 00000000000..6dc87e4c9d5 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/WebSocketUtilTest.java @@ -0,0 +1,45 @@ +/* + * Copyright 2018 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx; + +import org.junit.jupiter.api.Test; + +import java.util.concurrent.ThreadLocalRandom; + +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class WebSocketUtilTest { + + // how many times do we want to run each random variable checker + private static final int NUM_ITERATIONS = 1000; + + private static void assertRandomWithinBoundaries(int min, int max) { + int r = ThreadLocalRandom.current().nextInt(min, max + 1); + assertTrue(min <= r && r <= max); + } + + @Test + public void testRandomNumberGenerator() { + int iteration = 0; + while (++iteration < NUM_ITERATIONS) { + assertRandomWithinBoundaries(0, 1); + assertRandomWithinBoundaries(0, 1); + assertRandomWithinBoundaries(-1, 1); + assertRandomWithinBoundaries(-1, 0); + } + } + +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandlerTest.java index 1a5d8dcf674..37295b20aee 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketClientExtensionHandlerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -24,10 +24,13 @@ import java.util.Collections; import java.util.List; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.junit.jupiter.api.Assertions.fail; import static org.mockito.Mockito.any; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.mock; @@ -49,10 +52,10 @@ public class WebSocketClientExtensionHandlerTest { public void testMainSuccess() { // initialize when(mainHandshakerMock.newRequestData()). - thenReturn(new WebSocketExtensionData("main", Collections.emptyMap())); + thenReturn(new WebSocketExtensionData("main", Collections.emptyMap())); when(mainHandshakerMock.handshakeExtension(any(WebSocketExtensionData.class))).thenReturn(mainExtensionMock); when(fallbackHandshakerMock.newRequestData()). - thenReturn(new WebSocketExtensionData("fallback", Collections.emptyMap())); + thenReturn(new WebSocketExtensionData("fallback", Collections.emptyMap())); when(mainExtensionMock.rsv()).thenReturn(WebSocketExtension.RSV1); when(mainExtensionMock.newExtensionEncoder()).thenReturn(new DummyEncoder()); when(mainExtensionMock.newExtensionDecoder()).thenReturn(new DummyDecoder()); @@ -98,10 +101,10 @@ public void testMainSuccess() { public void testFallbackSuccess() { // initialize when(mainHandshakerMock.newRequestData()). - thenReturn(new WebSocketExtensionData("main", Collections.emptyMap())); + thenReturn(new WebSocketExtensionData("main", Collections.emptyMap())); when(mainHandshakerMock.handshakeExtension(any(WebSocketExtensionData.class))).thenReturn(null); when(fallbackHandshakerMock.newRequestData()). - thenReturn(new WebSocketExtensionData("fallback", Collections.emptyMap())); + thenReturn(new WebSocketExtensionData("fallback", Collections.emptyMap())); when(fallbackHandshakerMock.handshakeExtension( any(WebSocketExtensionData.class))).thenReturn(fallbackExtensionMock); when(fallbackExtensionMock.rsv()).thenReturn(WebSocketExtension.RSV1); @@ -150,13 +153,13 @@ public void testFallbackSuccess() { public void testAllSuccess() { // initialize when(mainHandshakerMock.newRequestData()). - thenReturn(new WebSocketExtensionData("main", Collections.emptyMap())); + thenReturn(new WebSocketExtensionData("main", Collections.emptyMap())); when(mainHandshakerMock.handshakeExtension( webSocketExtensionDataMatcher("main"))).thenReturn(mainExtensionMock); when(mainHandshakerMock.handshakeExtension( webSocketExtensionDataMatcher("fallback"))).thenReturn(null); when(fallbackHandshakerMock.newRequestData()). - thenReturn(new WebSocketExtensionData("fallback", Collections.emptyMap())); + thenReturn(new WebSocketExtensionData("fallback", Collections.emptyMap())); when(fallbackHandshakerMock.handshakeExtension( webSocketExtensionDataMatcher("main"))).thenReturn(null); when(fallbackHandshakerMock.handshakeExtension( @@ -218,17 +221,17 @@ public void testAllSuccess() { verify(fallbackExtensionMock).newExtensionDecoder(); } - @Test(expected = CodecException.class) + @Test public void testIfMainAndFallbackUseRSV1WillFail() { // initialize when(mainHandshakerMock.newRequestData()). - thenReturn(new WebSocketExtensionData("main", Collections.emptyMap())); + thenReturn(new WebSocketExtensionData("main", Collections.emptyMap())); when(mainHandshakerMock.handshakeExtension( webSocketExtensionDataMatcher("main"))).thenReturn(mainExtensionMock); when(mainHandshakerMock.handshakeExtension( webSocketExtensionDataMatcher("fallback"))).thenReturn(null); when(fallbackHandshakerMock.newRequestData()). - thenReturn(new WebSocketExtensionData("fallback", Collections.emptyMap())); + thenReturn(new WebSocketExtensionData("fallback", Collections.emptyMap())); when(fallbackHandshakerMock.handshakeExtension( webSocketExtensionDataMatcher("main"))).thenReturn(null); when(fallbackHandshakerMock.handshakeExtension( @@ -248,7 +251,12 @@ public void testIfMainAndFallbackUseRSV1WillFail() { req2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); HttpResponse res = newUpgradeResponse("main, fallback"); - ch.writeInbound(res); + try { + ch.writeInbound(res); + } catch (CodecException e) { + return; + } + fail("Expected to encounter a CodecException"); // test assertEquals(2, reqExts.size()); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilterProviderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilterProviderTest.java new file mode 100644 index 00000000000..ef80950e149 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilterProviderTest.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx.extensions; + +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; + +public class WebSocketExtensionFilterProviderTest { + + @Test + public void testDefaultExtensionFilterProvider() { + WebSocketExtensionFilterProvider defaultProvider = WebSocketExtensionFilterProvider.DEFAULT; + assertNotNull(defaultProvider); + + assertEquals(WebSocketExtensionFilter.NEVER_SKIP, defaultProvider.decoderFilter()); + assertEquals(WebSocketExtensionFilter.NEVER_SKIP, defaultProvider.encoderFilter()); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilterTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilterTest.java new file mode 100644 index 00000000000..7eced821e87 --- /dev/null +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionFilterTest.java @@ -0,0 +1,88 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http.websocketx.extensions; + +import io.netty.handler.codec.http.websocketx.BinaryWebSocketFrame; +import io.netty.handler.codec.http.websocketx.CloseWebSocketFrame; +import io.netty.handler.codec.http.websocketx.ContinuationWebSocketFrame; +import io.netty.handler.codec.http.websocketx.PingWebSocketFrame; +import io.netty.handler.codec.http.websocketx.PongWebSocketFrame; +import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; +import org.junit.jupiter.api.Test; + +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; + +public class WebSocketExtensionFilterTest { + + @Test + public void testNeverSkip() { + WebSocketExtensionFilter neverSkip = WebSocketExtensionFilter.NEVER_SKIP; + + BinaryWebSocketFrame binaryFrame = new BinaryWebSocketFrame(); + assertFalse(neverSkip.mustSkip(binaryFrame)); + assertTrue(binaryFrame.release()); + + TextWebSocketFrame textFrame = new TextWebSocketFrame(); + assertFalse(neverSkip.mustSkip(textFrame)); + assertTrue(textFrame.release()); + + PingWebSocketFrame pingFrame = new PingWebSocketFrame(); + assertFalse(neverSkip.mustSkip(pingFrame)); + assertTrue(pingFrame.release()); + + PongWebSocketFrame pongFrame = new PongWebSocketFrame(); + assertFalse(neverSkip.mustSkip(pongFrame)); + assertTrue(pongFrame.release()); + + CloseWebSocketFrame closeFrame = new CloseWebSocketFrame(); + assertFalse(neverSkip.mustSkip(closeFrame)); + assertTrue(closeFrame.release()); + + ContinuationWebSocketFrame continuationFrame = new ContinuationWebSocketFrame(); + assertFalse(neverSkip.mustSkip(continuationFrame)); + assertTrue(continuationFrame.release()); + } + + @Test + public void testAlwaysSkip() { + WebSocketExtensionFilter neverSkip = WebSocketExtensionFilter.ALWAYS_SKIP; + + BinaryWebSocketFrame binaryFrame = new BinaryWebSocketFrame(); + assertTrue(neverSkip.mustSkip(binaryFrame)); + assertTrue(binaryFrame.release()); + + TextWebSocketFrame textFrame = new TextWebSocketFrame(); + assertTrue(neverSkip.mustSkip(textFrame)); + assertTrue(textFrame.release()); + + PingWebSocketFrame pingFrame = new PingWebSocketFrame(); + assertTrue(neverSkip.mustSkip(pingFrame)); + assertTrue(pingFrame.release()); + + PongWebSocketFrame pongFrame = new PongWebSocketFrame(); + assertTrue(neverSkip.mustSkip(pongFrame)); + assertTrue(pongFrame.release()); + + CloseWebSocketFrame closeFrame = new CloseWebSocketFrame(); + assertTrue(neverSkip.mustSkip(closeFrame)); + assertTrue(closeFrame.release()); + + ContinuationWebSocketFrame continuationFrame = new ContinuationWebSocketFrame(); + assertTrue(neverSkip.mustSkip(continuationFrame)); + assertTrue(continuationFrame.release()); + } +} diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionTestUtil.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionTestUtil.java index 411b167f8f3..bbfcac6fe21 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionTestUtil.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionTestUtil.java @@ -5,7 +5,7 @@ * 2.0 (the "License"); you may not use this file except in compliance with the * License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -69,7 +69,7 @@ static final class WebSocketExtensionDataMatcher implements ArgumentMatcher out) throws Exception { + protected void decode(ChannelHandlerContext ctx, WebSocketFrame msg) throws Exception { // unused } } @@ -113,8 +112,7 @@ protected void encode(ChannelHandlerContext ctx, WebSocketFrame msg, static class Dummy2Decoder extends WebSocketExtensionDecoder { @Override - protected void decode(ChannelHandlerContext ctx, WebSocketFrame msg, - List out) throws Exception { + protected void decode(ChannelHandlerContext ctx, WebSocketFrame msg) throws Exception { // unused } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java index 889e9164801..41345661cb3 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketExtensionUtilTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,21 +19,67 @@ import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpHeaderValues; import io.netty.handler.codec.http.HttpHeaders; -import org.junit.Test; +import org.junit.jupiter.api.Test; -import static org.junit.Assert.*; +import java.util.List; + +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionUtil.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertTrue; public class WebSocketExtensionUtilTest { @Test public void testIsWebsocketUpgrade() { HttpHeaders headers = new DefaultHttpHeaders(); - assertFalse(WebSocketExtensionUtil.isWebsocketUpgrade(headers)); + assertFalse(isWebsocketUpgrade(headers)); headers.add(HttpHeaderNames.UPGRADE, HttpHeaderValues.WEBSOCKET); - assertFalse(WebSocketExtensionUtil.isWebsocketUpgrade(headers)); + assertFalse(isWebsocketUpgrade(headers)); headers.add(HttpHeaderNames.CONNECTION, "Keep-Alive, Upgrade"); - assertTrue(WebSocketExtensionUtil.isWebsocketUpgrade(headers)); + assertTrue(isWebsocketUpgrade(headers)); + } + + @Test + public void computeMergeExtensionsHeaderValueWhenNoUserDefinedHeader() { + List extras = extractExtensions("permessage-deflate; client_max_window_bits," + + "permessage-deflate; client_no_context_takeover; client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame"); + String newHeaderValue = computeMergeExtensionsHeaderValue(null, extras); + assertEquals("permessage-deflate;client_max_window_bits," + + "permessage-deflate;client_no_context_takeover;client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame", newHeaderValue); + } + + @Test + public void computeMergeExtensionsHeaderValueWhenNoConflictingUserDefinedHeader() { + List extras = extractExtensions("permessage-deflate; client_max_window_bits," + + "permessage-deflate; client_no_context_takeover; client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame"); + String newHeaderValue = computeMergeExtensionsHeaderValue("foo, bar", extras); + assertEquals("permessage-deflate;client_max_window_bits," + + "permessage-deflate;client_no_context_takeover;client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame," + + "foo," + + "bar", newHeaderValue); + } + + @Test + public void computeMergeExtensionsHeaderValueWhenConflictingUserDefinedHeader() { + List extras = extractExtensions("permessage-deflate; client_max_window_bits," + + "permessage-deflate; client_no_context_takeover; client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame"); + String newHeaderValue = computeMergeExtensionsHeaderValue("permessage-deflate; client_max_window_bits", extras); + assertEquals("permessage-deflate;client_max_window_bits," + + "permessage-deflate;client_no_context_takeover;client_max_window_bits," + + "deflate-frame," + + "x-webkit-deflate-frame", newHeaderValue); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandlerTest.java index c02853f38db..0ff3d8a663a 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/WebSocketServerExtensionHandlerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,15 +19,28 @@ import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; +import org.junit.jupiter.api.Test; import java.util.Collections; import java.util.List; -import org.junit.Test; - -import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.*; -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.Dummy2Decoder; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.Dummy2Encoder; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.DummyDecoder; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.DummyEncoder; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.newUpgradeRequest; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.newUpgradeResponse; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.webSocketExtensionDataMatcher; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; public class WebSocketServerExtensionHandlerTest { @@ -54,16 +67,17 @@ public void testMainSuccess() { thenReturn(null); when(mainExtensionMock.rsv()).thenReturn(WebSocketExtension.RSV1); - when(mainExtensionMock.newReponseData()).thenReturn( - new WebSocketExtensionData("main", Collections.emptyMap())); + when(mainExtensionMock.newResponseData()).thenReturn( + new WebSocketExtensionData("main", Collections.emptyMap())); when(mainExtensionMock.newExtensionEncoder()).thenReturn(new DummyEncoder()); when(mainExtensionMock.newExtensionDecoder()).thenReturn(new DummyDecoder()); when(fallbackExtensionMock.rsv()).thenReturn(WebSocketExtension.RSV1); // execute - EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerExtensionHandler( - mainHandshakerMock, fallbackHandshakerMock)); + WebSocketServerExtensionHandler extensionHandler = + new WebSocketServerExtensionHandler(mainHandshakerMock, fallbackHandshakerMock); + EmbeddedChannel ch = new EmbeddedChannel(extensionHandler); HttpRequest req = newUpgradeRequest("main, fallback"); ch.writeInbound(req); @@ -76,6 +90,7 @@ public void testMainSuccess() { res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); // test + assertNull(ch.pipeline().context(extensionHandler)); assertEquals(1, resExts.size()); assertEquals("main", resExts.get(0).name()); assertTrue(resExts.get(0).parameters().isEmpty()); @@ -87,7 +102,7 @@ public void testMainSuccess() { verify(fallbackHandshakerMock, atLeastOnce()).handshakeExtension(webSocketExtensionDataMatcher("fallback")); verify(mainExtensionMock, atLeastOnce()).rsv(); - verify(mainExtensionMock).newReponseData(); + verify(mainExtensionMock).newResponseData(); verify(mainExtensionMock).newExtensionEncoder(); verify(mainExtensionMock).newExtensionDecoder(); verify(fallbackExtensionMock, atLeastOnce()).rsv(); @@ -107,20 +122,21 @@ public void testCompatibleExtensionTogetherSuccess() { thenReturn(null); when(mainExtensionMock.rsv()).thenReturn(WebSocketExtension.RSV1); - when(mainExtensionMock.newReponseData()).thenReturn( - new WebSocketExtensionData("main", Collections.emptyMap())); + when(mainExtensionMock.newResponseData()).thenReturn( + new WebSocketExtensionData("main", Collections.emptyMap())); when(mainExtensionMock.newExtensionEncoder()).thenReturn(new DummyEncoder()); when(mainExtensionMock.newExtensionDecoder()).thenReturn(new DummyDecoder()); when(fallbackExtensionMock.rsv()).thenReturn(WebSocketExtension.RSV2); - when(fallbackExtensionMock.newReponseData()).thenReturn( - new WebSocketExtensionData("fallback", Collections.emptyMap())); + when(fallbackExtensionMock.newResponseData()).thenReturn( + new WebSocketExtensionData("fallback", Collections.emptyMap())); when(fallbackExtensionMock.newExtensionEncoder()).thenReturn(new Dummy2Encoder()); when(fallbackExtensionMock.newExtensionDecoder()).thenReturn(new Dummy2Decoder()); // execute - EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerExtensionHandler( - mainHandshakerMock, fallbackHandshakerMock)); + WebSocketServerExtensionHandler extensionHandler = + new WebSocketServerExtensionHandler(mainHandshakerMock, fallbackHandshakerMock); + EmbeddedChannel ch = new EmbeddedChannel(extensionHandler); HttpRequest req = newUpgradeRequest("main, fallback"); ch.writeInbound(req); @@ -133,6 +149,7 @@ public void testCompatibleExtensionTogetherSuccess() { res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); // test + assertNull(ch.pipeline().context(extensionHandler)); assertEquals(2, resExts.size()); assertEquals("main", resExts.get(0).name()); assertEquals("fallback", resExts.get(1).name()); @@ -145,13 +162,13 @@ public void testCompatibleExtensionTogetherSuccess() { verify(mainHandshakerMock).handshakeExtension(webSocketExtensionDataMatcher("fallback")); verify(fallbackHandshakerMock).handshakeExtension(webSocketExtensionDataMatcher("fallback")); verify(mainExtensionMock, times(2)).rsv(); - verify(mainExtensionMock).newReponseData(); + verify(mainExtensionMock).newResponseData(); verify(mainExtensionMock).newExtensionEncoder(); verify(mainExtensionMock).newExtensionDecoder(); verify(fallbackExtensionMock, times(2)).rsv(); - verify(fallbackExtensionMock).newReponseData(); + verify(fallbackExtensionMock).newResponseData(); verify(fallbackExtensionMock).newExtensionEncoder(); verify(fallbackExtensionMock).newExtensionDecoder(); } @@ -170,8 +187,9 @@ public void testNoneExtensionMatchingSuccess() { thenReturn(null); // execute - EmbeddedChannel ch = new EmbeddedChannel(new WebSocketServerExtensionHandler( - mainHandshakerMock, fallbackHandshakerMock)); + WebSocketServerExtensionHandler extensionHandler = + new WebSocketServerExtensionHandler(mainHandshakerMock, fallbackHandshakerMock); + EmbeddedChannel ch = new EmbeddedChannel(extensionHandler); HttpRequest req = newUpgradeRequest("unknown, unknown2"); ch.writeInbound(req); @@ -182,6 +200,7 @@ public void testNoneExtensionMatchingSuccess() { HttpResponse res2 = ch.readOutbound(); // test + assertNull(ch.pipeline().context(extensionHandler)); assertFalse(res2.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); verify(mainHandshakerMock).handshakeExtension(webSocketExtensionDataMatcher("unknown")); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameClientExtensionHandshakerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameClientExtensionHandshakerTest.java index d3ee5a4c1f5..e46e0854c71 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameClientExtensionHandshakerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameClientExtensionHandshakerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,11 @@ import static io.netty.handler.codec.http.websocketx.extensions.compression. DeflateFrameServerExtensionHandshaker.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + import io.netty.handler.codec.http.websocketx.extensions.WebSocketClientExtension; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionData; @@ -25,7 +29,7 @@ import java.util.HashMap; import java.util.Map; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class DeflateFrameClientExtensionHandshakerTest { @@ -57,7 +61,7 @@ public void testNormalHandshake() { new DeflateFrameClientExtensionHandshaker(false); WebSocketClientExtension extension = handshaker.handshakeExtension( - new WebSocketExtensionData(DEFLATE_FRAME_EXTENSION, Collections.emptyMap())); + new WebSocketExtensionData(DEFLATE_FRAME_EXTENSION, Collections.emptyMap())); assertNotNull(extension); assertEquals(WebSocketClientExtension.RSV1, extension.rsv()); @@ -71,7 +75,7 @@ public void testFailedHandshake() { DeflateFrameClientExtensionHandshaker handshaker = new DeflateFrameClientExtensionHandshaker(false); - Map parameters = new HashMap(); + Map parameters = new HashMap<>(); parameters.put("invalid", "12"); // execute diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameServerExtensionHandshakerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameServerExtensionHandshakerTest.java index 1d6f8ba5af0..64143cbe4ce 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameServerExtensionHandshakerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/DeflateFrameServerExtensionHandshakerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,11 @@ import static io.netty.handler.codec.http.websocketx.extensions.compression. DeflateFrameServerExtensionHandshaker.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + import io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtension; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionData; @@ -25,7 +29,7 @@ import java.util.HashMap; import java.util.Map; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class DeflateFrameServerExtensionHandshakerTest { @@ -37,7 +41,7 @@ public void testNormalHandshake() { // execute WebSocketServerExtension extension = handshaker.handshakeExtension( - new WebSocketExtensionData(DEFLATE_FRAME_EXTENSION, Collections.emptyMap())); + new WebSocketExtensionData(DEFLATE_FRAME_EXTENSION, Collections.emptyMap())); // test assertNotNull(extension); @@ -54,7 +58,7 @@ public void testWebkitHandshake() { // execute WebSocketServerExtension extension = handshaker.handshakeExtension( - new WebSocketExtensionData(X_WEBKIT_DEFLATE_FRAME_EXTENSION, Collections.emptyMap())); + new WebSocketExtensionData(X_WEBKIT_DEFLATE_FRAME_EXTENSION, Collections.emptyMap())); // test assertNotNull(extension); @@ -70,7 +74,7 @@ public void testFailedHandshake() { new DeflateFrameServerExtensionHandshaker(); Map parameters; - parameters = new HashMap(); + parameters = new HashMap<>(); parameters.put("unknown", "11"); // execute diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateDecoderTest.java index e59d3802870..121fd2011b7 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,6 @@ */ package io.netty.handler.codec.http.websocketx.extensions.compression; -import static org.junit.Assert.*; import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; @@ -23,11 +22,17 @@ import io.netty.handler.codec.compression.ZlibWrapper; import io.netty.handler.codec.http.websocketx.BinaryWebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension; +import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Random; -import org.junit.Test; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension.*; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class PerFrameDeflateDecoderTest { @@ -43,27 +48,26 @@ public void testCompressedFrame() { byte[] payload = new byte[300]; random.nextBytes(payload); - encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload)); + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload))); ByteBuf compressedPayload = encoderChannel.readOutbound(); BinaryWebSocketFrame compressedFrame = new BinaryWebSocketFrame(true, - WebSocketExtension.RSV1 | WebSocketExtension.RSV3, + RSV1 | RSV3, compressedPayload.slice(0, compressedPayload.readableBytes() - 4)); // execute - decoderChannel.writeInbound(compressedFrame); + assertTrue(decoderChannel.writeInbound(compressedFrame)); BinaryWebSocketFrame uncompressedFrame = decoderChannel.readInbound(); // test assertNotNull(uncompressedFrame); assertNotNull(uncompressedFrame.content()); - assertTrue(uncompressedFrame instanceof BinaryWebSocketFrame); - assertEquals(WebSocketExtension.RSV3, uncompressedFrame.rsv()); + assertEquals(RSV3, uncompressedFrame.rsv()); assertEquals(300, uncompressedFrame.content().readableBytes()); byte[] finalPayload = new byte[300]; uncompressedFrame.content().readBytes(finalPayload); - assertTrue(Arrays.equals(finalPayload, payload)); + assertArrayEquals(finalPayload, payload); uncompressedFrame.release(); } @@ -76,22 +80,21 @@ public void testNormalFrame() { random.nextBytes(payload); BinaryWebSocketFrame frame = new BinaryWebSocketFrame(true, - WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload)); + RSV3, Unpooled.wrappedBuffer(payload)); // execute - decoderChannel.writeInbound(frame); + assertTrue(decoderChannel.writeInbound(frame)); BinaryWebSocketFrame newFrame = decoderChannel.readInbound(); // test assertNotNull(newFrame); assertNotNull(newFrame.content()); - assertTrue(newFrame instanceof BinaryWebSocketFrame); - assertEquals(WebSocketExtension.RSV3, newFrame.rsv()); + assertEquals(RSV3, newFrame.rsv()); assertEquals(300, newFrame.content().readableBytes()); byte[] finalPayload = new byte[300]; newFrame.content().readBytes(finalPayload); - assertTrue(Arrays.equals(finalPayload, payload)); + assertArrayEquals(finalPayload, payload); newFrame.release(); } @@ -102,22 +105,51 @@ public void testCompressedEmptyFrame() { ZlibCodecFactory.newZlibEncoder(ZlibWrapper.NONE, 9, 15, 8)); EmbeddedChannel decoderChannel = new EmbeddedChannel(new PerFrameDeflateDecoder(false)); - encoderChannel.writeOutbound(Unpooled.EMPTY_BUFFER); + assertTrue(encoderChannel.writeOutbound(Unpooled.EMPTY_BUFFER)); ByteBuf compressedPayload = encoderChannel.readOutbound(); BinaryWebSocketFrame compressedFrame = - new BinaryWebSocketFrame(true, WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedPayload); + new BinaryWebSocketFrame(true, RSV1 | RSV3, compressedPayload); // execute - decoderChannel.writeInbound(compressedFrame); + assertTrue(decoderChannel.writeInbound(compressedFrame)); BinaryWebSocketFrame uncompressedFrame = decoderChannel.readInbound(); // test assertNotNull(uncompressedFrame); assertNotNull(uncompressedFrame.content()); - assertTrue(uncompressedFrame instanceof BinaryWebSocketFrame); - assertEquals(WebSocketExtension.RSV3, uncompressedFrame.rsv()); + assertEquals(RSV3, uncompressedFrame.rsv()); assertEquals(0, uncompressedFrame.content().readableBytes()); uncompressedFrame.release(); } + @Test + public void testDecompressionSkip() { + EmbeddedChannel encoderChannel = new EmbeddedChannel( + ZlibCodecFactory.newZlibEncoder(ZlibWrapper.NONE, 9, 15, 8)); + EmbeddedChannel decoderChannel = new EmbeddedChannel(new PerFrameDeflateDecoder(false, ALWAYS_SKIP)); + + byte[] payload = new byte[300]; + random.nextBytes(payload); + + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload))); + ByteBuf compressedPayload = encoderChannel.readOutbound(); + + BinaryWebSocketFrame compressedBinaryFrame = new BinaryWebSocketFrame( + true, WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedPayload); + + assertTrue(decoderChannel.writeInbound(compressedBinaryFrame)); + + BinaryWebSocketFrame inboundBinaryFrame = decoderChannel.readInbound(); + + assertNotNull(inboundBinaryFrame); + assertNotNull(inboundBinaryFrame.content()); + assertEquals(compressedPayload, inboundBinaryFrame.content()); + assertEquals(5, inboundBinaryFrame.rsv()); + + assertTrue(inboundBinaryFrame.release()); + + assertTrue(encoderChannel.finishAndReleaseAll()); + assertFalse(decoderChannel.finish()); + } + } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateEncoderTest.java index 5c085e9cee0..ceba96f8e71 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerFrameDeflateEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,8 +15,8 @@ */ package io.netty.handler.codec.http.websocketx.extensions.compression; -import static org.junit.Assert.*; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.compression.ZlibCodecFactory; @@ -24,11 +24,16 @@ import io.netty.handler.codec.http.websocketx.BinaryWebSocketFrame; import io.netty.handler.codec.http.websocketx.ContinuationWebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension; +import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Random; -import org.junit.Test; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class PerFrameDeflateEncoderTest { @@ -47,23 +52,22 @@ public void testCompressedFrame() { WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload)); // execute - encoderChannel.writeOutbound(frame); + assertTrue(encoderChannel.writeOutbound(frame)); BinaryWebSocketFrame compressedFrame = encoderChannel.readOutbound(); // test assertNotNull(compressedFrame); assertNotNull(compressedFrame.content()); - assertTrue(compressedFrame instanceof BinaryWebSocketFrame); assertEquals(WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedFrame.rsv()); - decoderChannel.writeInbound(compressedFrame.content()); - decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL); + assertTrue(decoderChannel.writeInbound(compressedFrame.content())); + assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate())); ByteBuf uncompressedPayload = decoderChannel.readInbound(); assertEquals(300, uncompressedPayload.readableBytes()); byte[] finalPayload = new byte[300]; uncompressedPayload.readBytes(finalPayload); - assertTrue(Arrays.equals(finalPayload, payload)); + assertArrayEquals(finalPayload, payload); uncompressedPayload.release(); } @@ -79,19 +83,18 @@ public void testAlreadyCompressedFrame() { WebSocketExtension.RSV3 | WebSocketExtension.RSV1, Unpooled.wrappedBuffer(payload)); // execute - encoderChannel.writeOutbound(frame); + assertTrue(encoderChannel.writeOutbound(frame)); BinaryWebSocketFrame newFrame = encoderChannel.readOutbound(); // test assertNotNull(newFrame); assertNotNull(newFrame.content()); - assertTrue(newFrame instanceof BinaryWebSocketFrame); assertEquals(WebSocketExtension.RSV3 | WebSocketExtension.RSV1, newFrame.rsv()); assertEquals(300, newFrame.content().readableBytes()); byte[] finalPayload = new byte[300]; newFrame.content().readBytes(finalPayload); - assertTrue(Arrays.equals(finalPayload, payload)); + assertArrayEquals(finalPayload, payload); newFrame.release(); } @@ -117,9 +120,9 @@ public void testFramementedFrame() { WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload3)); // execute - encoderChannel.writeOutbound(frame1); - encoderChannel.writeOutbound(frame2); - encoderChannel.writeOutbound(frame3); + assertTrue(encoderChannel.writeOutbound(frame1)); + assertTrue(encoderChannel.writeOutbound(frame2)); + assertTrue(encoderChannel.writeOutbound(frame3)); BinaryWebSocketFrame compressedFrame1 = encoderChannel.readOutbound(); ContinuationWebSocketFrame compressedFrame2 = encoderChannel.readOutbound(); ContinuationWebSocketFrame compressedFrame3 = encoderChannel.readOutbound(); @@ -135,28 +138,52 @@ public void testFramementedFrame() { assertFalse(compressedFrame2.isFinalFragment()); assertTrue(compressedFrame3.isFinalFragment()); - decoderChannel.writeInbound(compressedFrame1.content()); - decoderChannel.writeInbound(Unpooled.wrappedBuffer(DeflateDecoder.FRAME_TAIL)); + assertTrue(decoderChannel.writeInbound(compressedFrame1.content())); + assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate())); ByteBuf uncompressedPayload1 = decoderChannel.readInbound(); byte[] finalPayload1 = new byte[100]; uncompressedPayload1.readBytes(finalPayload1); - assertTrue(Arrays.equals(finalPayload1, payload1)); + assertArrayEquals(finalPayload1, payload1); uncompressedPayload1.release(); - decoderChannel.writeInbound(compressedFrame2.content()); - decoderChannel.writeInbound(Unpooled.wrappedBuffer(DeflateDecoder.FRAME_TAIL)); + assertTrue(decoderChannel.writeInbound(compressedFrame2.content())); + assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate())); ByteBuf uncompressedPayload2 = decoderChannel.readInbound(); byte[] finalPayload2 = new byte[100]; uncompressedPayload2.readBytes(finalPayload2); - assertTrue(Arrays.equals(finalPayload2, payload2)); + assertArrayEquals(finalPayload2, payload2); uncompressedPayload2.release(); - decoderChannel.writeInbound(compressedFrame3.content()); - decoderChannel.writeInbound(Unpooled.wrappedBuffer(DeflateDecoder.FRAME_TAIL)); + assertTrue(decoderChannel.writeInbound(compressedFrame3.content())); + assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate())); ByteBuf uncompressedPayload3 = decoderChannel.readInbound(); byte[] finalPayload3 = new byte[100]; uncompressedPayload3.readBytes(finalPayload3); - assertTrue(Arrays.equals(finalPayload3, payload3)); + assertArrayEquals(finalPayload3, payload3); uncompressedPayload3.release(); } + + @Test + public void testCompressionSkip() { + EmbeddedChannel encoderChannel = new EmbeddedChannel( + new PerFrameDeflateEncoder(9, 15, false, ALWAYS_SKIP)); + byte[] payload = new byte[300]; + random.nextBytes(payload); + BinaryWebSocketFrame binaryFrame = new BinaryWebSocketFrame(true, + 0, Unpooled.wrappedBuffer(payload)); + + // execute + assertTrue(encoderChannel.writeOutbound(binaryFrame.copy())); + BinaryWebSocketFrame outboundFrame = encoderChannel.readOutbound(); + + // test + assertNotNull(outboundFrame); + assertNotNull(outboundFrame.content()); + assertArrayEquals(payload, ByteBufUtil.getBytes(outboundFrame.content())); + assertEquals(0, outboundFrame.rsv()); + assertTrue(outboundFrame.release()); + + assertFalse(encoderChannel.finish()); + } + } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateClientExtensionHandshakerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateClientExtensionHandshakerTest.java index 7b0fa4a3d3a..a30b19e3c56 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateClientExtensionHandshakerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateClientExtensionHandshakerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,11 +15,19 @@ */ package io.netty.handler.codec.http.websocketx.extensions.compression; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension.RSV1; import static io.netty.handler.codec.http.websocketx.extensions.compression. PerMessageDeflateServerExtensionHandshaker.*; -import static org.junit.Assert.*; - +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.compression.ZlibCodecFactory; +import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketClientExtension; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionData; @@ -27,7 +35,7 @@ import java.util.HashMap; import java.util.Map; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class PerMessageDeflateClientExtensionHandshakerTest { @@ -63,10 +71,10 @@ public void testNormalHandshake() { new PerMessageDeflateClientExtensionHandshaker(); WebSocketClientExtension extension = handshaker.handshakeExtension( - new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, Collections.emptyMap())); + new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, Collections.emptyMap())); assertNotNull(extension); - assertEquals(WebSocketClientExtension.RSV1, extension.rsv()); + assertEquals(RSV1, extension.rsv()); assertTrue(extension.newExtensionDecoder() instanceof PerMessageDeflateDecoder); assertTrue(extension.newExtensionEncoder() instanceof PerMessageDeflateEncoder); } @@ -80,9 +88,9 @@ public void testCustomHandshake() { PerMessageDeflateClientExtensionHandshaker handshaker = new PerMessageDeflateClientExtensionHandshaker(6, true, 10, true, true); - parameters = new HashMap(); + parameters = new HashMap<>(); parameters.put(CLIENT_MAX_WINDOW, "12"); - parameters.put(SERVER_MAX_WINDOW, "10"); + parameters.put(SERVER_MAX_WINDOW, "8"); parameters.put(CLIENT_NO_CONTEXT, null); parameters.put(SERVER_NO_CONTEXT, null); @@ -92,12 +100,12 @@ public void testCustomHandshake() { // test assertNotNull(extension); - assertEquals(WebSocketClientExtension.RSV1, extension.rsv()); + assertEquals(RSV1, extension.rsv()); assertTrue(extension.newExtensionDecoder() instanceof PerMessageDeflateDecoder); assertTrue(extension.newExtensionEncoder() instanceof PerMessageDeflateEncoder); // initialize - parameters = new HashMap(); + parameters = new HashMap<>(); parameters.put(SERVER_MAX_WINDOW, "10"); parameters.put(SERVER_NO_CONTEXT, null); @@ -107,12 +115,12 @@ public void testCustomHandshake() { // test assertNotNull(extension); - assertEquals(WebSocketClientExtension.RSV1, extension.rsv()); + assertEquals(RSV1, extension.rsv()); assertTrue(extension.newExtensionDecoder() instanceof PerMessageDeflateDecoder); assertTrue(extension.newExtensionEncoder() instanceof PerMessageDeflateEncoder); // initialize - parameters = new HashMap(); + parameters = new HashMap<>(); // execute extension = handshaker.handshakeExtension( @@ -121,4 +129,118 @@ public void testCustomHandshake() { // test assertNull(extension); } + + @Test + public void testParameterValidation() { + WebSocketClientExtension extension; + Map parameters; + + PerMessageDeflateClientExtensionHandshaker handshaker = + new PerMessageDeflateClientExtensionHandshaker(6, true, 15, true, false); + + parameters = new HashMap(); + parameters.put(CLIENT_MAX_WINDOW, "15"); + parameters.put(SERVER_MAX_WINDOW, "8"); + extension = handshaker.handshakeExtension(new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, parameters)); + + // Test that handshake succeeds when parameters are valid + assertNotNull(extension); + assertEquals(RSV1, extension.rsv()); + assertTrue(extension.newExtensionDecoder() instanceof PerMessageDeflateDecoder); + assertTrue(extension.newExtensionEncoder() instanceof PerMessageDeflateEncoder); + + parameters = new HashMap(); + parameters.put(CLIENT_MAX_WINDOW, "15"); + parameters.put(SERVER_MAX_WINDOW, "7"); + + extension = handshaker.handshakeExtension(new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, parameters)); + + // Test that handshake fails when parameters are invalid + assertNull(extension); + } + + @Test + public void testServerNoContextTakeover() { + WebSocketClientExtension extension; + Map parameters; + + PerMessageDeflateClientExtensionHandshaker handshaker = + new PerMessageDeflateClientExtensionHandshaker(6, true, 15, true, false); + + parameters = new HashMap(); + parameters.put(SERVER_NO_CONTEXT, null); + extension = handshaker.handshakeExtension(new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, parameters)); + + // Test that handshake succeeds when server responds with `server_no_context_takeover` that we didn't offer + assertNotNull(extension); + assertEquals(RSV1, extension.rsv()); + assertTrue(extension.newExtensionDecoder() instanceof PerMessageDeflateDecoder); + assertTrue(extension.newExtensionEncoder() instanceof PerMessageDeflateEncoder); + + // initialize + handshaker = new PerMessageDeflateClientExtensionHandshaker(6, true, 15, true, true); + + parameters = new HashMap(); + extension = handshaker.handshakeExtension(new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, parameters)); + + // Test that handshake fails when client offers `server_no_context_takeover` but server doesn't support it + assertNull(extension); + } + + @Test + public void testDecoderNoClientContext() { + PerMessageDeflateClientExtensionHandshaker handshaker = + new PerMessageDeflateClientExtensionHandshaker(6, true, MAX_WINDOW_SIZE, true, false); + + byte[] firstPayload = new byte[] { + 76, -50, -53, 10, -62, 48, 20, 4, -48, 95, 41, 89, -37, 36, 77, 90, 31, -39, 41, -72, 112, 33, -120, 20, + 20, 119, -79, 70, 123, -95, 121, -48, 92, -116, 80, -6, -17, -58, -99, -37, -31, 12, 51, 19, 1, -9, -12, + 68, -111, -117, 25, 58, 111, 77, -127, -66, -64, -34, 20, 59, -64, -29, -2, 90, -100, -115, 30, 16, 114, + -68, 61, 29, 40, 89, -112, -73, 25, 35, 120, -105, -67, -32, -43, -70, -84, 120, -55, 69, 43, -124, 106, + -92, 18, -110, 114, -50, 111, 25, -3, 10, 17, -75, 13, 127, -84, 106, 90, -66, 84, -75, 84, 53, -89, + -75, 92, -3, -40, -61, 119, 49, -117, 30, 49, 68, -59, 88, 74, -119, -34, 1, -83, -7, -48, 124, -124, + -23, 16, 88, -118, 121, 54, -53, 1, 44, 32, 81, 19, 25, -115, -43, -32, -64, -67, -120, -110, -101, 121, + -2, 2 + }; + + byte[] secondPayload = new byte[] { + -86, 86, 42, 46, 77, 78, 78, 45, 6, 26, 83, 82, 84, -102, -86, 3, -28, 38, 21, 39, 23, 101, 38, -91, 2, + -51, -51, 47, 74, 73, 45, 114, -54, -49, -49, -10, 49, -78, -118, 112, 10, 9, 13, 118, 1, -102, 84, + -108, 90, 88, 10, 116, 27, -56, -84, 124, -112, -13, 16, 26, 116, -108, 18, -117, -46, -127, 6, 69, 99, + -45, 24, 91, 91, 11, 0 + }; + + Map parameters = Collections.singletonMap(CLIENT_NO_CONTEXT, null); + + WebSocketClientExtension extension = handshaker.handshakeExtension( + new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, parameters)); + assertNotNull(extension); + + EmbeddedChannel decoderChannel = new EmbeddedChannel(extension.newExtensionDecoder()); + assertTrue( + decoderChannel.writeInbound(new TextWebSocketFrame(true, RSV1, Unpooled.copiedBuffer(firstPayload)))); + TextWebSocketFrame firstFrameDecompressed = decoderChannel.readInbound(); + assertTrue( + decoderChannel.writeInbound(new TextWebSocketFrame(true, RSV1, Unpooled.copiedBuffer(secondPayload)))); + TextWebSocketFrame secondFrameDecompressed = decoderChannel.readInbound(); + + assertNotNull(firstFrameDecompressed); + assertNotNull(firstFrameDecompressed.content()); + assertTrue(firstFrameDecompressed instanceof TextWebSocketFrame); + assertEquals(firstFrameDecompressed.text(), + "{\"info\":\"Welcome to the BitMEX Realtime API.\",\"version\"" + + ":\"2018-10-02T22:53:23.000Z\",\"timestamp\":\"2018-10-15T06:43:40.437Z\"," + + "\"docs\":\"https://www.bitmex.com/app/wsAPI\",\"limit\":{\"remaining\":39}}"); + assertTrue(firstFrameDecompressed.release()); + + assertNotNull(secondFrameDecompressed); + assertNotNull(secondFrameDecompressed.content()); + assertTrue(secondFrameDecompressed instanceof TextWebSocketFrame); + assertEquals(secondFrameDecompressed.text(), + "{\"success\":true,\"subscribe\":\"orderBookL2:XBTUSD\"," + + "\"request\":{\"op\":\"subscribe\",\"args\":[\"orderBookL2:XBTUSD\"]}}"); + assertTrue(secondFrameDecompressed.release()); + + assertFalse(decoderChannel.finish()); + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateDecoderTest.java index d5e5868f0cd..7f354669ba1 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,20 +15,32 @@ */ package io.netty.handler.codec.http.websocketx.extensions.compression; -import static org.junit.Assert.*; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.DecoderException; import io.netty.handler.codec.compression.ZlibCodecFactory; import io.netty.handler.codec.compression.ZlibWrapper; import io.netty.handler.codec.http.websocketx.BinaryWebSocketFrame; import io.netty.handler.codec.http.websocketx.ContinuationWebSocketFrame; +import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; +import io.netty.handler.codec.http.websocketx.WebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter; +import org.junit.jupiter.api.Test; -import java.util.Arrays; import java.util.Random; -import org.junit.Test; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter.*; +import static io.netty.handler.codec.http.websocketx.extensions.compression.DeflateDecoder.*; +import static io.netty.util.CharsetUtil.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class PerMessageDeflateDecoderTest { @@ -44,7 +56,7 @@ public void testCompressedFrame() { byte[] payload = new byte[300]; random.nextBytes(payload); - encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload)); + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload))); ByteBuf compressedPayload = encoderChannel.readOutbound(); BinaryWebSocketFrame compressedFrame = new BinaryWebSocketFrame(true, @@ -52,19 +64,18 @@ public void testCompressedFrame() { compressedPayload.slice(0, compressedPayload.readableBytes() - 4)); // execute - decoderChannel.writeInbound(compressedFrame); + assertTrue(decoderChannel.writeInbound(compressedFrame)); BinaryWebSocketFrame uncompressedFrame = decoderChannel.readInbound(); // test assertNotNull(uncompressedFrame); assertNotNull(uncompressedFrame.content()); - assertTrue(uncompressedFrame instanceof BinaryWebSocketFrame); assertEquals(WebSocketExtension.RSV3, uncompressedFrame.rsv()); assertEquals(300, uncompressedFrame.content().readableBytes()); byte[] finalPayload = new byte[300]; uncompressedFrame.content().readBytes(finalPayload); - assertTrue(Arrays.equals(finalPayload, payload)); + assertArrayEquals(finalPayload, payload); uncompressedFrame.release(); } @@ -80,24 +91,23 @@ public void testNormalFrame() { WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload)); // execute - decoderChannel.writeInbound(frame); + assertTrue(decoderChannel.writeInbound(frame)); BinaryWebSocketFrame newFrame = decoderChannel.readInbound(); // test assertNotNull(newFrame); assertNotNull(newFrame.content()); - assertTrue(newFrame instanceof BinaryWebSocketFrame); assertEquals(WebSocketExtension.RSV3, newFrame.rsv()); assertEquals(300, newFrame.content().readableBytes()); byte[] finalPayload = new byte[300]; newFrame.content().readBytes(finalPayload); - assertTrue(Arrays.equals(finalPayload, payload)); + assertArrayEquals(finalPayload, payload); newFrame.release(); } @Test - public void testFramementedFrame() { + public void testFragmentedFrame() { EmbeddedChannel encoderChannel = new EmbeddedChannel( ZlibCodecFactory.newZlibEncoder(ZlibWrapper.NONE, 9, 15, 8)); EmbeddedChannel decoderChannel = new EmbeddedChannel(new PerMessageDeflateDecoder(false)); @@ -106,7 +116,7 @@ public void testFramementedFrame() { byte[] payload = new byte[300]; random.nextBytes(payload); - encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload)); + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload))); ByteBuf compressedPayload = encoderChannel.readOutbound(); compressedPayload = compressedPayload.slice(0, compressedPayload.readableBytes() - 4); @@ -121,9 +131,9 @@ public void testFramementedFrame() { compressedPayload.readableBytes() - oneThird * 2)); // execute - decoderChannel.writeInbound(compressedFrame1.retain()); - decoderChannel.writeInbound(compressedFrame2.retain()); - decoderChannel.writeInbound(compressedFrame3); + assertTrue(decoderChannel.writeInbound(compressedFrame1.retain())); + assertTrue(decoderChannel.writeInbound(compressedFrame2.retain())); + assertTrue(decoderChannel.writeInbound(compressedFrame3)); BinaryWebSocketFrame uncompressedFrame1 = decoderChannel.readInbound(); ContinuationWebSocketFrame uncompressedFrame2 = decoderChannel.readInbound(); ContinuationWebSocketFrame uncompressedFrame3 = decoderChannel.readInbound(); @@ -142,7 +152,7 @@ public void testFramementedFrame() { byte[] finalPayload = new byte[300]; finalPayloadWrapped.readBytes(finalPayload); - assertTrue(Arrays.equals(finalPayload, payload)); + assertArrayEquals(finalPayload, payload); finalPayloadWrapped.release(); } @@ -158,9 +168,9 @@ public void testMultiCompressedPayloadWithinFrame() { byte[] payload2 = new byte[100]; random.nextBytes(payload2); - encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload1)); + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload1))); ByteBuf compressedPayload1 = encoderChannel.readOutbound(); - encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload2)); + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload2))); ByteBuf compressedPayload2 = encoderChannel.readOutbound(); BinaryWebSocketFrame compressedFrame = new BinaryWebSocketFrame(true, @@ -170,23 +180,206 @@ public void testMultiCompressedPayloadWithinFrame() { compressedPayload2.slice(0, compressedPayload2.readableBytes() - 4))); // execute - decoderChannel.writeInbound(compressedFrame); + assertTrue(decoderChannel.writeInbound(compressedFrame)); BinaryWebSocketFrame uncompressedFrame = decoderChannel.readInbound(); // test assertNotNull(uncompressedFrame); assertNotNull(uncompressedFrame.content()); - assertTrue(uncompressedFrame instanceof BinaryWebSocketFrame); assertEquals(WebSocketExtension.RSV3, uncompressedFrame.rsv()); assertEquals(200, uncompressedFrame.content().readableBytes()); byte[] finalPayload1 = new byte[100]; uncompressedFrame.content().readBytes(finalPayload1); - assertTrue(Arrays.equals(finalPayload1, payload1)); + assertArrayEquals(finalPayload1, payload1); byte[] finalPayload2 = new byte[100]; uncompressedFrame.content().readBytes(finalPayload2); - assertTrue(Arrays.equals(finalPayload2, payload2)); + assertArrayEquals(finalPayload2, payload2); uncompressedFrame.release(); } + @Test + public void testDecompressionSkipForBinaryFrame() { + EmbeddedChannel encoderChannel = new EmbeddedChannel( + ZlibCodecFactory.newZlibEncoder(ZlibWrapper.NONE, 9, 15, 8)); + EmbeddedChannel decoderChannel = new EmbeddedChannel(new PerMessageDeflateDecoder(false, ALWAYS_SKIP)); + + byte[] payload = new byte[300]; + random.nextBytes(payload); + + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(payload))); + ByteBuf compressedPayload = encoderChannel.readOutbound(); + + BinaryWebSocketFrame compressedBinaryFrame = new BinaryWebSocketFrame(true, WebSocketExtension.RSV1, + compressedPayload); + assertTrue(decoderChannel.writeInbound(compressedBinaryFrame)); + + WebSocketFrame inboundFrame = decoderChannel.readInbound(); + + assertEquals(WebSocketExtension.RSV1, inboundFrame.rsv()); + assertEquals(compressedPayload, inboundFrame.content()); + assertTrue(inboundFrame.release()); + + assertTrue(encoderChannel.finishAndReleaseAll()); + assertFalse(decoderChannel.finish()); + } + + @Test + public void testSelectivityDecompressionSkip() { + WebSocketExtensionFilter selectivityDecompressionFilter = + frame -> frame instanceof TextWebSocketFrame && frame.content().readableBytes() < 100; + EmbeddedChannel encoderChannel = new EmbeddedChannel( + ZlibCodecFactory.newZlibEncoder(ZlibWrapper.NONE, 9, 15, 8)); + EmbeddedChannel decoderChannel = new EmbeddedChannel( + new PerMessageDeflateDecoder(false, selectivityDecompressionFilter)); + + String textPayload = "compressed payload"; + byte[] binaryPayload = new byte[300]; + random.nextBytes(binaryPayload); + + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(textPayload.getBytes(UTF_8)))); + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(binaryPayload))); + ByteBuf compressedTextPayload = encoderChannel.readOutbound(); + ByteBuf compressedBinaryPayload = encoderChannel.readOutbound(); + + TextWebSocketFrame compressedTextFrame = new TextWebSocketFrame(true, WebSocketExtension.RSV1, + compressedTextPayload); + BinaryWebSocketFrame compressedBinaryFrame = new BinaryWebSocketFrame(true, WebSocketExtension.RSV1, + compressedBinaryPayload); + + assertTrue(decoderChannel.writeInbound(compressedTextFrame)); + assertTrue(decoderChannel.writeInbound(compressedBinaryFrame)); + + TextWebSocketFrame inboundTextFrame = decoderChannel.readInbound(); + BinaryWebSocketFrame inboundBinaryFrame = decoderChannel.readInbound(); + + assertEquals(WebSocketExtension.RSV1, inboundTextFrame.rsv()); + assertEquals(compressedTextPayload, inboundTextFrame.content()); + assertTrue(inboundTextFrame.release()); + + assertEquals(0, inboundBinaryFrame.rsv()); + assertArrayEquals(binaryPayload, ByteBufUtil.getBytes(inboundBinaryFrame.content())); + assertTrue(inboundBinaryFrame.release()); + + assertTrue(encoderChannel.finishAndReleaseAll()); + assertFalse(decoderChannel.finish()); + } + + @Test + public void testIllegalStateWhenDecompressionInProgress() { + WebSocketExtensionFilter selectivityDecompressionFilter = frame -> frame.content().readableBytes() < 100; + + EmbeddedChannel encoderChannel = new EmbeddedChannel( + ZlibCodecFactory.newZlibEncoder(ZlibWrapper.NONE, 9, 15, 8)); + EmbeddedChannel decoderChannel = new EmbeddedChannel( + new PerMessageDeflateDecoder(false, selectivityDecompressionFilter)); + + byte[] firstPayload = new byte[200]; + random.nextBytes(firstPayload); + + byte[] finalPayload = new byte[50]; + random.nextBytes(finalPayload); + + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(firstPayload))); + assertTrue(encoderChannel.writeOutbound(Unpooled.wrappedBuffer(finalPayload))); + ByteBuf compressedFirstPayload = encoderChannel.readOutbound(); + ByteBuf compressedFinalPayload = encoderChannel.readOutbound(); + assertTrue(encoderChannel.finishAndReleaseAll()); + + BinaryWebSocketFrame firstPart = new BinaryWebSocketFrame(false, WebSocketExtension.RSV1, + compressedFirstPayload); + ContinuationWebSocketFrame finalPart = new ContinuationWebSocketFrame(true, WebSocketExtension.RSV1, + compressedFinalPayload); + assertTrue(decoderChannel.writeInbound(firstPart)); + + BinaryWebSocketFrame outboundFirstPart = decoderChannel.readInbound(); + //first part is decompressed + assertEquals(0, outboundFirstPart.rsv()); + assertArrayEquals(firstPayload, ByteBufUtil.getBytes(outboundFirstPart.content())); + assertTrue(outboundFirstPart.release()); + + //final part throwing exception + try { + assertThrows(DecoderException.class, () -> decoderChannel.writeInbound(finalPart)); + } finally { + assertTrue(finalPart.release()); + assertFalse(encoderChannel.finishAndReleaseAll()); + } + } + + @Test + public void testEmptyFrameDecompression() { + EmbeddedChannel decoderChannel = new EmbeddedChannel(new PerMessageDeflateDecoder(false)); + + TextWebSocketFrame emptyDeflateBlockFrame = new TextWebSocketFrame(true, WebSocketExtension.RSV1, + EMPTY_DEFLATE_BLOCK); + + assertTrue(decoderChannel.writeInbound(emptyDeflateBlockFrame)); + TextWebSocketFrame emptyBufferFrame = decoderChannel.readInbound(); + + assertFalse(emptyBufferFrame.content().isReadable()); + + // Composite empty buffer + assertTrue(emptyBufferFrame.release()); + assertFalse(decoderChannel.finish()); + } + + @Test + public void testFragmentedFrameWithLeftOverInLastFragment() { + String hexDump = "677170647a777a737574656b707a787a6f6a7561756578756f6b7868616371716c657a6d64697479766d726f6" + + "269746c6376777464776f6f72767a726f64667278676764687775786f6762766d776d706b76697773777a7072" + + "6a6a737279707a7078697a6c69616d7461656d646278626d786f66666e686e776a7a7461746d7a776668776b6" + + "f6f736e73746575637a6d727a7175707a6e74627578687871767771697a71766c64626d78726d6d7675756877" + + "62667963626b687a726d676e646263776e67797264706d6c6863626577616967706a78636a72697464756e627" + + "977616f79736475676f76736f7178746a7a7479626c64636b6b6778637768746c62"; + EmbeddedChannel encoderChannel = new EmbeddedChannel( + ZlibCodecFactory.newZlibEncoder(ZlibWrapper.NONE, 9, 15, 8)); + EmbeddedChannel decoderChannel = new EmbeddedChannel(new PerMessageDeflateDecoder(false)); + + ByteBuf originPayload = Unpooled.wrappedBuffer(ByteBufUtil.decodeHexDump(hexDump)); + assertTrue(encoderChannel.writeOutbound(originPayload.duplicate().retain())); + + ByteBuf compressedPayload = encoderChannel.readOutbound(); + compressedPayload = compressedPayload.slice(0, compressedPayload.readableBytes() - 4); + + int oneThird = compressedPayload.readableBytes() / 3; + + TextWebSocketFrame compressedFrame1 = new TextWebSocketFrame( + false, WebSocketExtension.RSV1, compressedPayload.slice(0, oneThird)); + ContinuationWebSocketFrame compressedFrame2 = new ContinuationWebSocketFrame( + false, WebSocketExtension.RSV3, compressedPayload.slice(oneThird, oneThird)); + ContinuationWebSocketFrame compressedFrame3 = new ContinuationWebSocketFrame( + false, WebSocketExtension.RSV3, compressedPayload.slice(oneThird * 2, oneThird)); + int offset = oneThird * 3; + ContinuationWebSocketFrame compressedFrameWithExtraData = new ContinuationWebSocketFrame( + true, WebSocketExtension.RSV3, compressedPayload.slice(offset, + compressedPayload.readableBytes() - offset)); + + // check that last fragment contains only one extra byte + assertEquals(1, compressedFrameWithExtraData.content().readableBytes()); + assertEquals(1, compressedFrameWithExtraData.content().getByte(0)); + + // write compressed frames + assertTrue(decoderChannel.writeInbound(compressedFrame1.retain())); + assertTrue(decoderChannel.writeInbound(compressedFrame2.retain())); + assertTrue(decoderChannel.writeInbound(compressedFrame3.retain())); + assertTrue(decoderChannel.writeInbound(compressedFrameWithExtraData)); + + // read uncompressed frames + TextWebSocketFrame uncompressedFrame1 = decoderChannel.readInbound(); + ContinuationWebSocketFrame uncompressedFrame2 = decoderChannel.readInbound(); + ContinuationWebSocketFrame uncompressedFrame3 = decoderChannel.readInbound(); + ContinuationWebSocketFrame uncompressedExtraData = decoderChannel.readInbound(); + assertFalse(uncompressedExtraData.content().isReadable()); + + ByteBuf uncompressedPayload = Unpooled.wrappedBuffer(uncompressedFrame1.content(), uncompressedFrame2.content(), + uncompressedFrame3.content(), uncompressedExtraData.content()); + assertEquals(originPayload, uncompressedPayload); + + assertTrue(originPayload.release()); + assertTrue(uncompressedPayload.release()); + + assertTrue(encoderChannel.finishAndReleaseAll()); + assertFalse(decoderChannel.finish()); + } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateEncoderTest.java index 66ae9627e9b..97e41def27a 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,20 +15,33 @@ */ package io.netty.handler.codec.http.websocketx.extensions.compression; -import static org.junit.Assert.*; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.EncoderException; import io.netty.handler.codec.compression.ZlibCodecFactory; import io.netty.handler.codec.compression.ZlibWrapper; import io.netty.handler.codec.http.websocketx.BinaryWebSocketFrame; import io.netty.handler.codec.http.websocketx.ContinuationWebSocketFrame; +import io.netty.handler.codec.http.websocketx.TextWebSocketFrame; +import io.netty.handler.codec.http.websocketx.WebSocketFrame; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtension; +import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter; +import org.junit.jupiter.api.Test; import java.util.Arrays; import java.util.Random; -import org.junit.Test; +import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionFilter.*; +import static io.netty.handler.codec.http.websocketx.extensions.compression.DeflateDecoder.*; +import static io.netty.util.CharsetUtil.*; +import static org.junit.jupiter.api.Assertions.assertArrayEquals; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertThrows; +import static org.junit.jupiter.api.Assertions.assertTrue; public class PerMessageDeflateEncoderTest { @@ -44,26 +57,25 @@ public void testCompressedFrame() { byte[] payload = new byte[300]; random.nextBytes(payload); BinaryWebSocketFrame frame = new BinaryWebSocketFrame(true, - WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload)); + WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload)); // execute - encoderChannel.writeOutbound(frame); + assertTrue(encoderChannel.writeOutbound(frame)); BinaryWebSocketFrame compressedFrame = encoderChannel.readOutbound(); // test assertNotNull(compressedFrame); assertNotNull(compressedFrame.content()); - assertTrue(compressedFrame instanceof BinaryWebSocketFrame); assertEquals(WebSocketExtension.RSV1 | WebSocketExtension.RSV3, compressedFrame.rsv()); - decoderChannel.writeInbound(compressedFrame.content()); - decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL); + assertTrue(decoderChannel.writeInbound(compressedFrame.content())); + assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate())); ByteBuf uncompressedPayload = decoderChannel.readInbound(); assertEquals(300, uncompressedPayload.readableBytes()); byte[] finalPayload = new byte[300]; uncompressedPayload.readBytes(finalPayload); - assertTrue(Arrays.equals(finalPayload, payload)); + assertArrayEquals(finalPayload, payload); uncompressedPayload.release(); } @@ -76,28 +88,29 @@ public void testAlreadyCompressedFrame() { random.nextBytes(payload); BinaryWebSocketFrame frame = new BinaryWebSocketFrame(true, - WebSocketExtension.RSV3 | WebSocketExtension.RSV1, Unpooled.wrappedBuffer(payload)); + WebSocketExtension.RSV3 | WebSocketExtension.RSV1, + Unpooled.wrappedBuffer(payload)); // execute - encoderChannel.writeOutbound(frame); + assertTrue(encoderChannel.writeOutbound(frame)); BinaryWebSocketFrame newFrame = encoderChannel.readOutbound(); // test assertNotNull(newFrame); assertNotNull(newFrame.content()); - assertTrue(newFrame instanceof BinaryWebSocketFrame); assertEquals(WebSocketExtension.RSV3 | WebSocketExtension.RSV1, newFrame.rsv()); assertEquals(300, newFrame.content().readableBytes()); byte[] finalPayload = new byte[300]; newFrame.content().readBytes(finalPayload); - assertTrue(Arrays.equals(finalPayload, payload)); + assertArrayEquals(finalPayload, payload); newFrame.release(); } @Test - public void testFramementedFrame() { - EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false)); + public void testFragmentedFrame() { + EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false, + NEVER_SKIP)); EmbeddedChannel decoderChannel = new EmbeddedChannel( ZlibCodecFactory.newZlibDecoder(ZlibWrapper.NONE)); @@ -110,16 +123,19 @@ public void testFramementedFrame() { random.nextBytes(payload3); BinaryWebSocketFrame frame1 = new BinaryWebSocketFrame(false, - WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload1)); + WebSocketExtension.RSV3, + Unpooled.wrappedBuffer(payload1)); ContinuationWebSocketFrame frame2 = new ContinuationWebSocketFrame(false, - WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload2)); + WebSocketExtension.RSV3, + Unpooled.wrappedBuffer(payload2)); ContinuationWebSocketFrame frame3 = new ContinuationWebSocketFrame(true, - WebSocketExtension.RSV3, Unpooled.wrappedBuffer(payload3)); + WebSocketExtension.RSV3, + Unpooled.wrappedBuffer(payload3)); // execute - encoderChannel.writeOutbound(frame1); - encoderChannel.writeOutbound(frame2); - encoderChannel.writeOutbound(frame3); + assertTrue(encoderChannel.writeOutbound(frame1)); + assertTrue(encoderChannel.writeOutbound(frame2)); + assertTrue(encoderChannel.writeOutbound(frame3)); BinaryWebSocketFrame compressedFrame1 = encoderChannel.readOutbound(); ContinuationWebSocketFrame compressedFrame2 = encoderChannel.readOutbound(); ContinuationWebSocketFrame compressedFrame3 = encoderChannel.readOutbound(); @@ -135,26 +151,154 @@ public void testFramementedFrame() { assertFalse(compressedFrame2.isFinalFragment()); assertTrue(compressedFrame3.isFinalFragment()); - decoderChannel.writeInbound(compressedFrame1.content()); + assertTrue(decoderChannel.writeInbound(compressedFrame1.content())); ByteBuf uncompressedPayload1 = decoderChannel.readInbound(); byte[] finalPayload1 = new byte[100]; uncompressedPayload1.readBytes(finalPayload1); - assertTrue(Arrays.equals(finalPayload1, payload1)); + assertArrayEquals(finalPayload1, payload1); uncompressedPayload1.release(); - decoderChannel.writeInbound(compressedFrame2.content()); + assertTrue(decoderChannel.writeInbound(compressedFrame2.content())); ByteBuf uncompressedPayload2 = decoderChannel.readInbound(); byte[] finalPayload2 = new byte[100]; uncompressedPayload2.readBytes(finalPayload2); - assertTrue(Arrays.equals(finalPayload2, payload2)); + assertArrayEquals(finalPayload2, payload2); uncompressedPayload2.release(); - decoderChannel.writeInbound(compressedFrame3.content()); - decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL); + assertTrue(decoderChannel.writeInbound(compressedFrame3.content())); + assertTrue(decoderChannel.writeInbound(DeflateDecoder.FRAME_TAIL.duplicate())); ByteBuf uncompressedPayload3 = decoderChannel.readInbound(); byte[] finalPayload3 = new byte[100]; uncompressedPayload3.readBytes(finalPayload3); - assertTrue(Arrays.equals(finalPayload3, payload3)); + assertArrayEquals(finalPayload3, payload3); uncompressedPayload3.release(); } + + @Test + public void testCompressionSkipForBinaryFrame() { + EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false, + ALWAYS_SKIP)); + byte[] payload = new byte[300]; + random.nextBytes(payload); + + WebSocketFrame binaryFrame = new BinaryWebSocketFrame(Unpooled.wrappedBuffer(payload)); + + assertTrue(encoderChannel.writeOutbound(binaryFrame.copy())); + WebSocketFrame outboundFrame = encoderChannel.readOutbound(); + + assertEquals(0, outboundFrame.rsv()); + assertArrayEquals(payload, ByteBufUtil.getBytes(outboundFrame.content())); + assertTrue(outboundFrame.release()); + + assertFalse(encoderChannel.finish()); + } + + @Test + public void testSelectivityCompressionSkip() { + WebSocketExtensionFilter selectivityCompressionFilter = + frame -> (frame instanceof TextWebSocketFrame || frame instanceof BinaryWebSocketFrame) + && frame.content().readableBytes() < 100; + EmbeddedChannel encoderChannel = new EmbeddedChannel( + new PerMessageDeflateEncoder(9, 15, false, selectivityCompressionFilter)); + EmbeddedChannel decoderChannel = new EmbeddedChannel( + ZlibCodecFactory.newZlibDecoder(ZlibWrapper.NONE)); + + String textPayload = "not compressed payload"; + byte[] binaryPayload = new byte[101]; + random.nextBytes(binaryPayload); + + WebSocketFrame textFrame = new TextWebSocketFrame(textPayload); + BinaryWebSocketFrame binaryFrame = new BinaryWebSocketFrame(Unpooled.wrappedBuffer(binaryPayload)); + + assertTrue(encoderChannel.writeOutbound(textFrame)); + assertTrue(encoderChannel.writeOutbound(binaryFrame)); + + WebSocketFrame outboundTextFrame = encoderChannel.readOutbound(); + + //compression skipped for textFrame + assertEquals(0, outboundTextFrame.rsv()); + assertEquals(textPayload, outboundTextFrame.content().toString(UTF_8)); + assertTrue(outboundTextFrame.release()); + + WebSocketFrame outboundBinaryFrame = encoderChannel.readOutbound(); + + //compression not skipped for binaryFrame + assertEquals(WebSocketExtension.RSV1, outboundBinaryFrame.rsv()); + + assertTrue(decoderChannel.writeInbound(outboundBinaryFrame.content().retain())); + ByteBuf uncompressedBinaryPayload = decoderChannel.readInbound(); + + assertArrayEquals(binaryPayload, ByteBufUtil.getBytes(uncompressedBinaryPayload)); + + assertTrue(outboundBinaryFrame.release()); + assertTrue(uncompressedBinaryPayload.release()); + + assertFalse(encoderChannel.finish()); + assertFalse(decoderChannel.finish()); + } + + @Test + public void testIllegalStateWhenCompressionInProgress() { + WebSocketExtensionFilter selectivityCompressionFilter = frame -> frame.content().readableBytes() < 100; + EmbeddedChannel encoderChannel = new EmbeddedChannel( + new PerMessageDeflateEncoder(9, 15, false, selectivityCompressionFilter)); + + byte[] firstPayload = new byte[200]; + random.nextBytes(firstPayload); + + byte[] finalPayload = new byte[90]; + random.nextBytes(finalPayload); + + BinaryWebSocketFrame firstPart = new BinaryWebSocketFrame(false, 0, Unpooled.wrappedBuffer(firstPayload)); + ContinuationWebSocketFrame finalPart = new ContinuationWebSocketFrame(true, 0, + Unpooled.wrappedBuffer(finalPayload)); + assertTrue(encoderChannel.writeOutbound(firstPart)); + + BinaryWebSocketFrame outboundFirstPart = encoderChannel.readOutbound(); + //first part is compressed + assertEquals(WebSocketExtension.RSV1, outboundFirstPart.rsv()); + assertFalse(Arrays.equals(firstPayload, ByteBufUtil.getBytes(outboundFirstPart.content()))); + assertTrue(outboundFirstPart.release()); + + //final part throwing exception + try { + assertThrows(EncoderException.class, () -> encoderChannel.writeOutbound(finalPart)); + } finally { + assertTrue(finalPart.release()); + assertFalse(encoderChannel.finishAndReleaseAll()); + } + } + + @Test + public void testEmptyFrameCompression() { + EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false)); + + TextWebSocketFrame emptyFrame = new TextWebSocketFrame(""); + + assertTrue(encoderChannel.writeOutbound(emptyFrame)); + TextWebSocketFrame emptyDeflateFrame = encoderChannel.readOutbound(); + + assertEquals(WebSocketExtension.RSV1, emptyDeflateFrame.rsv()); + assertTrue(ByteBufUtil.equals(EMPTY_DEFLATE_BLOCK, emptyDeflateFrame.content())); + // Unreleasable buffer + assertFalse(emptyDeflateFrame.release()); + + assertFalse(encoderChannel.finish()); + } + + @Test + public void testCodecExceptionForNotFinEmptyFrame() { + EmbeddedChannel encoderChannel = new EmbeddedChannel(new PerMessageDeflateEncoder(9, 15, false)); + + TextWebSocketFrame emptyNotFinFrame = new TextWebSocketFrame(false, 0, ""); + + try { + assertThrows(EncoderException.class, () -> encoderChannel.writeOutbound(emptyNotFinFrame)); + } finally { + // EmptyByteBuf buffer + assertFalse(emptyNotFinFrame.release()); + assertFalse(encoderChannel.finish()); + } + } + } diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateServerExtensionHandshakerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateServerExtensionHandshakerTest.java index a837c4993bc..8a23a0a828c 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateServerExtensionHandshakerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/PerMessageDeflateServerExtensionHandshakerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -17,7 +17,11 @@ import static io.netty.handler.codec.http.websocketx.extensions.compression. PerMessageDeflateServerExtensionHandshaker.*; -import static org.junit.Assert.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; + import io.netty.handler.codec.http.websocketx.extensions.WebSocketServerExtension; import io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionData; @@ -25,7 +29,7 @@ import java.util.HashMap; import java.util.Map; -import org.junit.Test; +import org.junit.jupiter.api.Test; public class PerMessageDeflateServerExtensionHandshakerTest { @@ -41,7 +45,7 @@ public void testNormalHandshake() { // execute extension = handshaker.handshakeExtension( - new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, Collections.emptyMap())); + new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, Collections.emptyMap())); // test assertNotNull(extension); @@ -50,19 +54,19 @@ public void testNormalHandshake() { assertTrue(extension.newExtensionEncoder() instanceof PerMessageDeflateEncoder); // execute - data = extension.newReponseData(); + data = extension.newResponseData(); assertEquals(PERMESSAGE_DEFLATE_EXTENSION, data.name()); assertTrue(data.parameters().isEmpty()); // initialize - parameters = new HashMap(); + parameters = new HashMap<>(); parameters.put(CLIENT_MAX_WINDOW, null); parameters.put(CLIENT_NO_CONTEXT, null); // execute extension = handshaker.handshakeExtension( - new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, Collections.emptyMap())); + new WebSocketExtensionData(PERMESSAGE_DEFLATE_EXTENSION, Collections.emptyMap())); // test assertNotNull(extension); @@ -71,14 +75,14 @@ public void testNormalHandshake() { assertTrue(extension.newExtensionEncoder() instanceof PerMessageDeflateEncoder); // execute - data = extension.newReponseData(); + data = extension.newResponseData(); // test assertEquals(PERMESSAGE_DEFLATE_EXTENSION, data.name()); assertTrue(data.parameters().isEmpty()); // initialize - parameters = new HashMap(); + parameters = new HashMap<>(); parameters.put(SERVER_MAX_WINDOW, "12"); parameters.put(SERVER_NO_CONTEXT, null); @@ -100,7 +104,7 @@ public void testCustomHandshake() { PerMessageDeflateServerExtensionHandshaker handshaker = new PerMessageDeflateServerExtensionHandshaker(6, true, 10, true, true); - parameters = new HashMap(); + parameters = new HashMap<>(); parameters.put(CLIENT_MAX_WINDOW, null); parameters.put(SERVER_MAX_WINDOW, "12"); parameters.put(CLIENT_NO_CONTEXT, null); @@ -117,7 +121,7 @@ public void testCustomHandshake() { assertTrue(extension.newExtensionEncoder() instanceof PerMessageDeflateEncoder); // execute - data = extension.newReponseData(); + data = extension.newResponseData(); // test assertEquals(PERMESSAGE_DEFLATE_EXTENSION, data.name()); @@ -129,7 +133,7 @@ public void testCustomHandshake() { assertTrue(data.parameters().containsKey(SERVER_MAX_WINDOW)); // initialize - parameters = new HashMap(); + parameters = new HashMap<>(); parameters.put(SERVER_MAX_WINDOW, "12"); parameters.put(SERVER_NO_CONTEXT, null); @@ -144,7 +148,7 @@ public void testCustomHandshake() { assertTrue(extension.newExtensionEncoder() instanceof PerMessageDeflateEncoder); // execute - data = extension.newReponseData(); + data = extension.newResponseData(); // test assertEquals(PERMESSAGE_DEFLATE_EXTENSION, data.name()); @@ -154,7 +158,7 @@ public void testCustomHandshake() { assertTrue(data.parameters().containsKey(SERVER_NO_CONTEXT)); // initialize - parameters = new HashMap(); + parameters = new HashMap<>(); // execute extension = handshaker.handshakeExtension( @@ -163,7 +167,7 @@ public void testCustomHandshake() { assertNotNull(extension); // execute - data = extension.newReponseData(); + data = extension.newResponseData(); // test assertEquals(PERMESSAGE_DEFLATE_EXTENSION, data.name()); diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketServerCompressionHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketServerCompressionHandlerTest.java index 1d383e9adcc..ad3005f5b26 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketServerCompressionHandlerTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/http/websocketx/extensions/compression/WebSocketServerCompressionHandlerTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -25,12 +25,16 @@ import java.util.List; -import org.junit.Assert; -import org.junit.Test; +import org.junit.jupiter.api.Test; import static io.netty.handler.codec.http.websocketx.extensions.compression. PerMessageDeflateServerExtensionHandshaker.*; import static io.netty.handler.codec.http.websocketx.extensions.WebSocketExtensionTestUtil.*; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertFalse; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; +import static org.junit.jupiter.api.Assertions.assertTrue; public class WebSocketServerCompressionHandlerTest { @@ -48,10 +52,10 @@ public void testNormalSuccess() { List exts = WebSocketExtensionUtil.extractExtensions( res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); - Assert.assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); - Assert.assertTrue(exts.get(0).parameters().isEmpty()); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); + assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); + assertTrue(exts.get(0).parameters().isEmpty()); + assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); + assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); } @Test @@ -69,10 +73,10 @@ public void testClientWindowSizeSuccess() { List exts = WebSocketExtensionUtil.extractExtensions( res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); - Assert.assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); - Assert.assertEquals("10", exts.get(0).parameters().get(CLIENT_MAX_WINDOW)); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); + assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); + assertEquals("10", exts.get(0).parameters().get(CLIENT_MAX_WINDOW)); + assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); + assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); } @Test @@ -90,10 +94,10 @@ public void testClientWindowSizeUnavailable() { List exts = WebSocketExtensionUtil.extractExtensions( res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); - Assert.assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); - Assert.assertTrue(exts.get(0).parameters().isEmpty()); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); + assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); + assertTrue(exts.get(0).parameters().isEmpty()); + assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); + assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); } @Test @@ -111,10 +115,10 @@ public void testServerWindowSizeSuccess() { List exts = WebSocketExtensionUtil.extractExtensions( res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); - Assert.assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); - Assert.assertEquals("10", exts.get(0).parameters().get(SERVER_MAX_WINDOW)); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); + assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); + assertEquals("10", exts.get(0).parameters().get(SERVER_MAX_WINDOW)); + assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); + assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); } @Test @@ -130,9 +134,9 @@ public void testServerWindowSizeDisable() { HttpResponse res2 = ch.readOutbound(); - Assert.assertFalse(res2.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); - Assert.assertNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); - Assert.assertNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); + assertFalse(res2.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); + assertNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); + assertNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); } @Test @@ -147,9 +151,9 @@ public void testServerNoContext() { HttpResponse res2 = ch.readOutbound(); - Assert.assertFalse(res2.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); - Assert.assertNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); - Assert.assertNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); + assertFalse(res2.headers().contains(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); + assertNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); + assertNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); } @Test @@ -166,10 +170,10 @@ public void testClientNoContext() { List exts = WebSocketExtensionUtil.extractExtensions( res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); - Assert.assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); - Assert.assertTrue(exts.get(0).parameters().isEmpty()); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); + assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); + assertTrue(exts.get(0).parameters().isEmpty()); + assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); + assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); } @Test @@ -188,10 +192,10 @@ public void testServerWindowSizeDisableThenFallback() { List exts = WebSocketExtensionUtil.extractExtensions( res2.headers().get(HttpHeaderNames.SEC_WEBSOCKET_EXTENSIONS)); - Assert.assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); - Assert.assertTrue(exts.get(0).parameters().isEmpty()); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); - Assert.assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); + assertEquals(PERMESSAGE_DEFLATE_EXTENSION, exts.get(0).name()); + assertTrue(exts.get(0).parameters().isEmpty()); + assertNotNull(ch.pipeline().get(PerMessageDeflateDecoder.class)); + assertNotNull(ch.pipeline().get(PerMessageDeflateEncoder.class)); } } diff --git a/codec-http/src/test/java/io/netty/handler/codec/rtsp/RtspDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/rtsp/RtspDecoderTest.java index d416720a25d..489491214f9 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/rtsp/RtspDecoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/rtsp/RtspDecoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,16 +15,17 @@ */ package io.netty.handler.codec.rtsp; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; import io.netty.buffer.Unpooled; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.FullHttpRequest; import io.netty.handler.codec.http.FullHttpResponse; import io.netty.handler.codec.http.HttpObject; import io.netty.handler.codec.http.HttpObjectAggregator; +import org.junit.jupiter.api.Test; -import org.junit.Test; + +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertTrue; /** * Test cases for RTSP decoder. @@ -59,13 +60,11 @@ public void testReceiveAnnounce() { Unpooled.wrappedBuffer(data2)); HttpObject res1 = ch.readInbound(); - System.out.println(res1); assertNotNull(res1); assertTrue(res1 instanceof FullHttpRequest); ((FullHttpRequest) res1).release(); HttpObject res2 = ch.readInbound(); - System.out.println(res2); assertNotNull(res2); assertTrue(res2 instanceof FullHttpResponse); ((FullHttpResponse) res2).release(); diff --git a/codec-http/src/test/java/io/netty/handler/codec/rtsp/RtspEncoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/rtsp/RtspEncoderTest.java index 00bf6aad1ba..4af61a1073a 100644 --- a/codec-http/src/test/java/io/netty/handler/codec/rtsp/RtspEncoderTest.java +++ b/codec-http/src/test/java/io/netty/handler/codec/rtsp/RtspEncoderTest.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,7 +15,6 @@ */ package io.netty.handler.codec.rtsp; -import static org.junit.Assert.assertEquals; import io.netty.buffer.ByteBuf; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.http.DefaultFullHttpRequest; @@ -27,8 +26,9 @@ import io.netty.handler.codec.http.HttpRequest; import io.netty.handler.codec.http.HttpResponse; import io.netty.util.CharsetUtil; +import org.junit.jupiter.api.Test; -import org.junit.Test; +import static org.junit.jupiter.api.Assertions.assertEquals; /** * Test cases for RTSP encoder. diff --git a/codec-http/src/test/java/io/netty/handler/codec/spdy/DefaultSpdyHeadersTest.java b/codec-http/src/test/java/io/netty/handler/codec/spdy/DefaultSpdyHeadersTest.java deleted file mode 100644 index c78fe5b8f97..00000000000 --- a/codec-http/src/test/java/io/netty/handler/codec/spdy/DefaultSpdyHeadersTest.java +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Copyright 2015 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; - -public class DefaultSpdyHeadersTest { - - @Test - public void testStringKeyRetrievedAsAsciiString() { - final SpdyHeaders headers = new DefaultSpdyHeaders(); - - // Test adding String key and retrieving it using a AsciiString key - final String method = "GET"; - headers.add(":method", method); - - final String value = headers.getAsString(SpdyHeaders.HttpNames.METHOD.toString()); - assertNotNull(value); - assertEquals(method, value); - - final String value2 = headers.getAsString(SpdyHeaders.HttpNames.METHOD); - assertNotNull(value2); - assertEquals(method, value2); - } - - @Test - public void testAsciiStringKeyRetrievedAsString() { - final SpdyHeaders headers = new DefaultSpdyHeaders(); - - // Test adding AsciiString key and retrieving it using a String key - final String path = "/"; - headers.add(SpdyHeaders.HttpNames.PATH, path); - - final String value = headers.getAsString(SpdyHeaders.HttpNames.PATH); - assertNotNull(value); - assertEquals(path, value); - - final String value2 = headers.getAsString(SpdyHeaders.HttpNames.PATH.toString()); - assertNotNull(value2); - assertEquals(path, value2); - } -} diff --git a/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdyFrameDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdyFrameDecoderTest.java deleted file mode 100644 index d8f247e2c22..00000000000 --- a/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdyFrameDecoderTest.java +++ /dev/null @@ -1,1324 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import java.util.ArrayDeque; -import java.util.Queue; -import java.util.Random; - -import static io.netty.handler.codec.spdy.SpdyCodecUtil.SPDY_HEADER_SIZE; -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; - -public class SpdyFrameDecoderTest { - - private static final Random RANDOM = new Random(); - - private final SpdyFrameDecoderDelegate delegate = mock(SpdyFrameDecoderDelegate.class); - private final TestSpdyFrameDecoderDelegate testDelegate = new TestSpdyFrameDecoderDelegate(); - private SpdyFrameDecoder decoder; - - @Before - public void createDecoder() { - decoder = new SpdyFrameDecoder(SpdyVersion.SPDY_3_1, testDelegate); - } - - @After - public void releaseBuffers() { - testDelegate.releaseAll(); - } - - private final class TestSpdyFrameDecoderDelegate implements SpdyFrameDecoderDelegate { - private final Queue buffers = new ArrayDeque(); - - @Override - public void readDataFrame(int streamId, boolean last, ByteBuf data) { - delegate.readDataFrame(streamId, last, data); - buffers.add(data); - } - - @Override - public void readSynStreamFrame(int streamId, int associatedToStreamId, - byte priority, boolean last, boolean unidirectional) { - delegate.readSynStreamFrame(streamId, associatedToStreamId, priority, last, unidirectional); - } - - @Override - public void readSynReplyFrame(int streamId, boolean last) { - delegate.readSynReplyFrame(streamId, last); - } - - @Override - public void readRstStreamFrame(int streamId, int statusCode) { - delegate.readRstStreamFrame(streamId, statusCode); - } - - @Override - public void readSettingsFrame(boolean clearPersisted) { - delegate.readSettingsFrame(clearPersisted); - } - - @Override - public void readSetting(int id, int value, boolean persistValue, boolean persisted) { - delegate.readSetting(id, value, persistValue, persisted); - } - - @Override - public void readSettingsEnd() { - delegate.readSettingsEnd(); - } - - @Override - public void readPingFrame(int id) { - delegate.readPingFrame(id); - } - - @Override - public void readGoAwayFrame(int lastGoodStreamId, int statusCode) { - delegate.readGoAwayFrame(lastGoodStreamId, statusCode); - } - - @Override - public void readHeadersFrame(int streamId, boolean last) { - delegate.readHeadersFrame(streamId, last); - } - - @Override - public void readWindowUpdateFrame(int streamId, int deltaWindowSize) { - delegate.readWindowUpdateFrame(streamId, deltaWindowSize); - } - - @Override - public void readHeaderBlock(ByteBuf headerBlock) { - delegate.readHeaderBlock(headerBlock); - buffers.add(headerBlock); - } - - @Override - public void readHeaderBlockEnd() { - delegate.readHeaderBlockEnd(); - } - - @Override - public void readFrameError(String message) { - delegate.readFrameError(message); - } - - void releaseAll() { - for (;;) { - ByteBuf buf = buffers.poll(); - if (buf == null) { - return; - } - buf.release(); - } - } - } - - private static void encodeDataFrameHeader(ByteBuf buffer, int streamId, byte flags, int length) { - buffer.writeInt(streamId & 0x7FFFFFFF); - buffer.writeByte(flags); - buffer.writeMedium(length); - } - - private static void encodeControlFrameHeader(ByteBuf buffer, short type, byte flags, int length) { - buffer.writeShort(0x8000 | SpdyVersion.SPDY_3_1.getVersion()); - buffer.writeShort(type); - buffer.writeByte(flags); - buffer.writeMedium(length); - } - - @Test - public void testSpdyDataFrame() throws Exception { - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - byte flags = 0; - int length = 1024; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeDataFrameHeader(buf, streamId, flags, length); - for (int i = 0; i < 256; i ++) { - buf.writeInt(RANDOM.nextInt()); - } - decoder.decode(buf); - verify(delegate).readDataFrame(streamId, false, buf.slice(SPDY_HEADER_SIZE, length)); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testEmptySpdyDataFrame() throws Exception { - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - byte flags = 0; - int length = 0; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeDataFrameHeader(buf, streamId, flags, length); - - decoder.decode(buf); - verify(delegate).readDataFrame(streamId, false, Unpooled.EMPTY_BUFFER); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testLastSpdyDataFrame() throws Exception { - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - byte flags = 0x01; // FLAG_FIN - int length = 0; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeDataFrameHeader(buf, streamId, flags, length); - - decoder.decode(buf); - verify(delegate).readDataFrame(streamId, true, Unpooled.EMPTY_BUFFER); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testUnknownSpdyDataFrameFlags() throws Exception { - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - byte flags = (byte) 0xFE; // should ignore any unknown flags - int length = 0; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeDataFrameHeader(buf, streamId, flags, length); - - decoder.decode(buf); - verify(delegate).readDataFrame(streamId, false, Unpooled.EMPTY_BUFFER); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testIllegalSpdyDataFrameStreamId() throws Exception { - int streamId = 0; // illegal stream identifier - byte flags = 0; - int length = 0; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeDataFrameHeader(buf, streamId, flags, length); - - decoder.decode(buf); - verify(delegate).readFrameError((String) any()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testPipelinedSpdyDataFrames() throws Exception { - int streamId1 = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int streamId2 = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - byte flags = 0; - int length = 0; - - ByteBuf buf = Unpooled.buffer(2 * (SPDY_HEADER_SIZE + length)); - encodeDataFrameHeader(buf, streamId1, flags, length); - encodeDataFrameHeader(buf, streamId2, flags, length); - - decoder.decode(buf); - verify(delegate).readDataFrame(streamId1, false, Unpooled.EMPTY_BUFFER); - verify(delegate).readDataFrame(streamId2, false, Unpooled.EMPTY_BUFFER); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdySynStreamFrame() throws Exception { - short type = 1; - byte flags = 0; - int length = 10; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - byte priority = (byte) (RANDOM.nextInt() & 0x07); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(associatedToStreamId); - buf.writeByte(priority << 5); - buf.writeByte(0); - - decoder.decode(buf); - verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, false, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testLastSpdySynStreamFrame() throws Exception { - short type = 1; - byte flags = 0x01; // FLAG_FIN - int length = 10; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - byte priority = (byte) (RANDOM.nextInt() & 0x07); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(associatedToStreamId); - buf.writeByte(priority << 5); - buf.writeByte(0); - - decoder.decode(buf); - verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, true, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testUnidirectionalSpdySynStreamFrame() throws Exception { - short type = 1; - byte flags = 0x02; // FLAG_UNIDIRECTIONAL - int length = 10; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - byte priority = (byte) (RANDOM.nextInt() & 0x07); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(associatedToStreamId); - buf.writeByte(priority << 5); - buf.writeByte(0); - - decoder.decode(buf); - verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, false, true); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testIndependentSpdySynStreamFrame() throws Exception { - short type = 1; - byte flags = 0; - int length = 10; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int associatedToStreamId = 0; // independent of all other streams - byte priority = (byte) (RANDOM.nextInt() & 0x07); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(associatedToStreamId); - buf.writeByte(priority << 5); - buf.writeByte(0); - - decoder.decode(buf); - verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, false, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testUnknownSpdySynStreamFrameFlags() throws Exception { - short type = 1; - byte flags = (byte) 0xFC; // undefined flags - int length = 10; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - byte priority = (byte) (RANDOM.nextInt() & 0x07); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(associatedToStreamId); - buf.writeByte(priority << 5); - buf.writeByte(0); - - decoder.decode(buf); - verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, false, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testReservedSpdySynStreamFrameBits() throws Exception { - short type = 1; - byte flags = 0; - int length = 10; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - byte priority = (byte) (RANDOM.nextInt() & 0x07); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId | 0x80000000); // should ignore reserved bit - buf.writeInt(associatedToStreamId | 0x80000000); // should ignore reserved bit - buf.writeByte(priority << 5 | 0x1F); // should ignore reserved bits - buf.writeByte(0xFF); // should ignore reserved bits - - decoder.decode(buf); - verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, false, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdySynStreamFrameLength() throws Exception { - short type = 1; - byte flags = 0; - int length = 8; // invalid length - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(associatedToStreamId); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testIllegalSpdySynStreamFrameStreamId() throws Exception { - short type = 1; - byte flags = 0; - int length = 10; - int streamId = 0; // invalid stream identifier - int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - byte priority = (byte) (RANDOM.nextInt() & 0x07); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(associatedToStreamId); - buf.writeByte(priority << 5); - buf.writeByte(0); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdySynStreamFrameHeaderBlock() throws Exception { - short type = 1; - byte flags = 0; - int length = 10; - int headerBlockLength = 1024; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int associatedToStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - byte priority = (byte) (RANDOM.nextInt() & 0x07); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length + headerBlockLength); - encodeControlFrameHeader(buf, type, flags, length + headerBlockLength); - buf.writeInt(streamId); - buf.writeInt(associatedToStreamId); - buf.writeByte(priority << 5); - buf.writeByte(0); - - ByteBuf headerBlock = Unpooled.buffer(headerBlockLength); - for (int i = 0; i < 256; i ++) { - headerBlock.writeInt(RANDOM.nextInt()); - } - - decoder.decode(buf); - decoder.decode(headerBlock); - verify(delegate).readSynStreamFrame(streamId, associatedToStreamId, priority, false, false); - verify(delegate).readHeaderBlock(headerBlock.slice(0, headerBlock.writerIndex())); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - assertFalse(headerBlock.isReadable()); - buf.release(); - headerBlock.release(); - } - - @Test - public void testSpdySynReplyFrame() throws Exception { - short type = 2; - byte flags = 0; - int length = 4; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - - decoder.decode(buf); - verify(delegate).readSynReplyFrame(streamId, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testLastSpdySynReplyFrame() throws Exception { - short type = 2; - byte flags = 0x01; // FLAG_FIN - int length = 4; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - - decoder.decode(buf); - verify(delegate).readSynReplyFrame(streamId, true); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testUnknownSpdySynReplyFrameFlags() throws Exception { - short type = 2; - byte flags = (byte) 0xFE; // undefined flags - int length = 4; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - - decoder.decode(buf); - verify(delegate).readSynReplyFrame(streamId, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testReservedSpdySynReplyFrameBits() throws Exception { - short type = 2; - byte flags = 0; - int length = 4; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId | 0x80000000); // should ignore reserved bit - - decoder.decode(buf); - verify(delegate).readSynReplyFrame(streamId, false); - verify(delegate).readHeaderBlockEnd(); - - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdySynReplyFrameLength() throws Exception { - short type = 2; - byte flags = 0; - int length = 0; // invalid length - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testIllegalSpdySynReplyFrameStreamId() throws Exception { - short type = 2; - byte flags = 0; - int length = 4; - int streamId = 0; // invalid stream identifier - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdySynReplyFrameHeaderBlock() throws Exception { - short type = 2; - byte flags = 0; - int length = 4; - int headerBlockLength = 1024; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length + headerBlockLength); - encodeControlFrameHeader(buf, type, flags, length + headerBlockLength); - buf.writeInt(streamId); - - ByteBuf headerBlock = Unpooled.buffer(headerBlockLength); - for (int i = 0; i < 256; i ++) { - headerBlock.writeInt(RANDOM.nextInt()); - } - - decoder.decode(buf); - decoder.decode(headerBlock); - verify(delegate).readSynReplyFrame(streamId, false); - verify(delegate).readHeaderBlock(headerBlock.slice(0, headerBlock.writerIndex())); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - assertFalse(headerBlock.isReadable()); - buf.release(); - headerBlock.release(); - } - - @Test - public void testSpdyRstStreamFrame() throws Exception { - short type = 3; - byte flags = 0; - int length = 8; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int statusCode = RANDOM.nextInt() | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readRstStreamFrame(streamId, statusCode); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testReservedSpdyRstStreamFrameBits() throws Exception { - short type = 3; - byte flags = 0; - int length = 8; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int statusCode = RANDOM.nextInt() | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId | 0x80000000); // should ignore reserved bit - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readRstStreamFrame(streamId, statusCode); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdyRstStreamFrameFlags() throws Exception { - short type = 3; - byte flags = (byte) 0xFF; // invalid flags - int length = 8; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int statusCode = RANDOM.nextInt() | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdyRstStreamFrameLength() throws Exception { - short type = 3; - byte flags = 0; - int length = 12; // invalid length - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int statusCode = RANDOM.nextInt() | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testIllegalSpdyRstStreamFrameStreamId() throws Exception { - short type = 3; - byte flags = 0; - int length = 8; - int streamId = 0; // invalid stream identifier - int statusCode = RANDOM.nextInt() | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testIllegalSpdyRstStreamFrameStatusCode() throws Exception { - short type = 3; - byte flags = 0; - int length = 8; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - int statusCode = 0; // invalid status code - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdySettingsFrame() throws Exception { - short type = 4; - byte flags = 0; - int numSettings = 2; - int length = 8 * numSettings + 4; - byte idFlags = 0; - int id = RANDOM.nextInt() & 0x00FFFFFF; - int value = RANDOM.nextInt(); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(numSettings); - for (int i = 0; i < numSettings; i++) { - buf.writeByte(idFlags); - buf.writeMedium(id); - buf.writeInt(value); - } - - delegate.readSettingsEnd(); - decoder.decode(buf); - verify(delegate).readSettingsFrame(false); - verify(delegate, times(numSettings)).readSetting(id, value, false, false); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testEmptySpdySettingsFrame() throws Exception { - short type = 4; - byte flags = 0; - int numSettings = 0; - int length = 8 * numSettings + 4; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(numSettings); - - decoder.decode(buf); - verify(delegate).readSettingsFrame(false); - verify(delegate).readSettingsEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdySettingsFrameClearFlag() throws Exception { - short type = 4; - byte flags = 0x01; // FLAG_SETTINGS_CLEAR_SETTINGS - int numSettings = 0; - int length = 8 * numSettings + 4; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(numSettings); - - decoder.decode(buf); - verify(delegate).readSettingsFrame(true); - verify(delegate).readSettingsEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdySettingsPersistValues() throws Exception { - short type = 4; - byte flags = 0; - int numSettings = 1; - int length = 8 * numSettings + 4; - byte idFlags = 0x01; // FLAG_SETTINGS_PERSIST_VALUE - int id = RANDOM.nextInt() & 0x00FFFFFF; - int value = RANDOM.nextInt(); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(numSettings); - for (int i = 0; i < numSettings; i++) { - buf.writeByte(idFlags); - buf.writeMedium(id); - buf.writeInt(value); - } - - delegate.readSettingsEnd(); - decoder.decode(buf); - verify(delegate).readSettingsFrame(false); - verify(delegate, times(numSettings)).readSetting(id, value, true, false); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdySettingsPersistedValues() throws Exception { - short type = 4; - byte flags = 0; - int numSettings = 1; - int length = 8 * numSettings + 4; - byte idFlags = 0x02; // FLAG_SETTINGS_PERSISTED - int id = RANDOM.nextInt() & 0x00FFFFFF; - int value = RANDOM.nextInt(); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(numSettings); - for (int i = 0; i < numSettings; i++) { - buf.writeByte(idFlags); - buf.writeMedium(id); - buf.writeInt(value); - } - - delegate.readSettingsEnd(); - decoder.decode(buf); - verify(delegate).readSettingsFrame(false); - verify(delegate, times(numSettings)).readSetting(id, value, false, true); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testUnknownSpdySettingsFrameFlags() throws Exception { - short type = 4; - byte flags = (byte) 0xFE; // undefined flags - int numSettings = 0; - int length = 8 * numSettings + 4; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(numSettings); - - decoder.decode(buf); - verify(delegate).readSettingsFrame(false); - verify(delegate).readSettingsEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testUnknownSpdySettingsFlags() throws Exception { - short type = 4; - byte flags = 0; - int numSettings = 1; - int length = 8 * numSettings + 4; - byte idFlags = (byte) 0xFC; // undefined flags - int id = RANDOM.nextInt() & 0x00FFFFFF; - int value = RANDOM.nextInt(); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(numSettings); - for (int i = 0; i < numSettings; i++) { - buf.writeByte(idFlags); - buf.writeMedium(id); - buf.writeInt(value); - } - - delegate.readSettingsEnd(); - decoder.decode(buf); - verify(delegate).readSettingsFrame(false); - verify(delegate, times(numSettings)).readSetting(id, value, false, false); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdySettingsFrameLength() throws Exception { - short type = 4; - byte flags = 0; - int numSettings = 2; - int length = 8 * numSettings + 8; // invalid length - byte idFlags = 0; - int id = RANDOM.nextInt() & 0x00FFFFFF; - int value = RANDOM.nextInt(); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(numSettings); - for (int i = 0; i < numSettings; i++) { - buf.writeByte(idFlags); - buf.writeMedium(id); - buf.writeInt(value); - } - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdySettingsFrameNumSettings() throws Exception { - short type = 4; - byte flags = 0; - int numSettings = 2; - int length = 8 * numSettings + 4; - byte idFlags = 0; - int id = RANDOM.nextInt() & 0x00FFFFFF; - int value = RANDOM.nextInt(); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(0); // invalid num_settings - for (int i = 0; i < numSettings; i++) { - buf.writeByte(idFlags); - buf.writeMedium(id); - buf.writeInt(value); - } - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testDiscardUnknownFrame() throws Exception { - short type = 5; - byte flags = (byte) 0xFF; - int length = 8; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeLong(RANDOM.nextLong()); - - decoder.decode(buf); - verifyZeroInteractions(delegate); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testDiscardUnknownEmptyFrame() throws Exception { - short type = 5; - byte flags = (byte) 0xFF; - int length = 0; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - - decoder.decode(buf); - verifyZeroInteractions(delegate); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testProgressivelyDiscardUnknownEmptyFrame() throws Exception { - short type = 5; - byte flags = (byte) 0xFF; - int segment = 4; - int length = 2 * segment; - - ByteBuf header = Unpooled.buffer(SPDY_HEADER_SIZE); - ByteBuf segment1 = Unpooled.buffer(segment); - ByteBuf segment2 = Unpooled.buffer(segment); - encodeControlFrameHeader(header, type, flags, length); - segment1.writeInt(RANDOM.nextInt()); - segment2.writeInt(RANDOM.nextInt()); - - decoder.decode(header); - decoder.decode(segment1); - decoder.decode(segment2); - verifyZeroInteractions(delegate); - assertFalse(header.isReadable()); - assertFalse(segment1.isReadable()); - assertFalse(segment2.isReadable()); - header.release(); - segment1.release(); - segment2.release(); - } - - @Test - public void testSpdyPingFrame() throws Exception { - short type = 6; - byte flags = 0; - int length = 4; - int id = RANDOM.nextInt(); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(id); - - decoder.decode(buf); - verify(delegate).readPingFrame(id); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testUnknownSpdyPingFrameFlags() throws Exception { - short type = 6; - byte flags = (byte) 0xFF; // undefined flags - int length = 4; - int id = RANDOM.nextInt(); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(id); - - decoder.decode(buf); - verify(delegate).readPingFrame(id); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdyPingFrameLength() throws Exception { - short type = 6; - byte flags = 0; - int length = 8; // invalid length - int id = RANDOM.nextInt(); - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(id); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdyGoAwayFrame() throws Exception { - short type = 7; - byte flags = 0; - int length = 8; - int lastGoodStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - int statusCode = RANDOM.nextInt() | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(lastGoodStreamId); - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readGoAwayFrame(lastGoodStreamId, statusCode); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testUnknownSpdyGoAwayFrameFlags() throws Exception { - short type = 7; - byte flags = (byte) 0xFF; // undefined flags - int length = 8; - int lastGoodStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - int statusCode = RANDOM.nextInt() | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(lastGoodStreamId); - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readGoAwayFrame(lastGoodStreamId, statusCode); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testReservedSpdyGoAwayFrameBits() throws Exception { - short type = 7; - byte flags = 0; - int length = 8; - int lastGoodStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - int statusCode = RANDOM.nextInt() | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(lastGoodStreamId | 0x80000000); // should ignore reserved bit - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readGoAwayFrame(lastGoodStreamId, statusCode); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdyGoAwayFrameLength() throws Exception { - short type = 7; - byte flags = 0; - int length = 12; // invalid length - int lastGoodStreamId = RANDOM.nextInt() & 0x7FFFFFFF; - int statusCode = RANDOM.nextInt() | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(lastGoodStreamId); - buf.writeInt(statusCode); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdyHeadersFrame() throws Exception { - short type = 8; - byte flags = 0; - int length = 4; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - - decoder.decode(buf); - verify(delegate).readHeadersFrame(streamId, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testLastSpdyHeadersFrame() throws Exception { - short type = 8; - byte flags = 0x01; // FLAG_FIN - int length = 4; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - - decoder.decode(buf); - verify(delegate).readHeadersFrame(streamId, true); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testUnknownSpdyHeadersFrameFlags() throws Exception { - short type = 8; - byte flags = (byte) 0xFE; // undefined flags - int length = 4; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - - decoder.decode(buf); - verify(delegate).readHeadersFrame(streamId, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testReservedSpdyHeadersFrameBits() throws Exception { - short type = 8; - byte flags = 0; - int length = 4; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId | 0x80000000); // should ignore reserved bit - - decoder.decode(buf); - verify(delegate).readHeadersFrame(streamId, false); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdyHeadersFrameLength() throws Exception { - short type = 8; - byte flags = 0; - int length = 0; // invalid length - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdyHeadersFrameStreamId() throws Exception { - short type = 8; - byte flags = 0; - int length = 4; - int streamId = 0; // invalid stream identifier - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testSpdyHeadersFrameHeaderBlock() throws Exception { - short type = 8; - byte flags = 0; - int length = 4; - int headerBlockLength = 1024; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length + headerBlockLength); - buf.writeInt(streamId); - - ByteBuf headerBlock = Unpooled.buffer(headerBlockLength); - for (int i = 0; i < 256; i ++) { - headerBlock.writeInt(RANDOM.nextInt()); - } - decoder.decode(buf); - decoder.decode(headerBlock); - verify(delegate).readHeadersFrame(streamId, false); - verify(delegate).readHeaderBlock(headerBlock.slice(0, headerBlock.writerIndex())); - verify(delegate).readHeaderBlockEnd(); - assertFalse(buf.isReadable()); - assertFalse(headerBlock.isReadable()); - buf.release(); - headerBlock.release(); - } - - @Test - public void testSpdyWindowUpdateFrame() throws Exception { - short type = 9; - byte flags = 0; - int length = 8; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF; - int deltaWindowSize = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(deltaWindowSize); - - decoder.decode(buf); - verify(delegate).readWindowUpdateFrame(streamId, deltaWindowSize); - assertFalse(buf.isReadable()); - } - - @Test - public void testUnknownSpdyWindowUpdateFrameFlags() throws Exception { - short type = 9; - byte flags = (byte) 0xFF; // undefined flags - int length = 8; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF; - int deltaWindowSize = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(deltaWindowSize); - - decoder.decode(buf); - verify(delegate).readWindowUpdateFrame(streamId, deltaWindowSize); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testReservedSpdyWindowUpdateFrameBits() throws Exception { - short type = 9; - byte flags = 0; - int length = 8; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF; - int deltaWindowSize = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId | 0x80000000); // should ignore reserved bit - buf.writeInt(deltaWindowSize | 0x80000000); // should ignore reserved bit - - decoder.decode(buf); - verify(delegate).readWindowUpdateFrame(streamId, deltaWindowSize); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testInvalidSpdyWindowUpdateFrameLength() throws Exception { - short type = 9; - byte flags = 0; - int length = 12; // invalid length - int streamId = RANDOM.nextInt() & 0x7FFFFFFF; - int deltaWindowSize = RANDOM.nextInt() & 0x7FFFFFFF | 0x01; - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(deltaWindowSize); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } - - @Test - public void testIllegalSpdyWindowUpdateFrameDeltaWindowSize() throws Exception { - short type = 9; - byte flags = 0; - int length = 8; - int streamId = RANDOM.nextInt() & 0x7FFFFFFF; - int deltaWindowSize = 0; // invalid delta window size - - ByteBuf buf = Unpooled.buffer(SPDY_HEADER_SIZE + length); - encodeControlFrameHeader(buf, type, flags, length); - buf.writeInt(streamId); - buf.writeInt(deltaWindowSize); - - decoder.decode(buf); - verify(delegate).readFrameError(anyString()); - assertFalse(buf.isReadable()); - buf.release(); - } -} diff --git a/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdyHeaderBlockRawDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdyHeaderBlockRawDecoderTest.java deleted file mode 100644 index bc828397c61..00000000000 --- a/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdyHeaderBlockRawDecoderTest.java +++ /dev/null @@ -1,516 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.Unpooled; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class SpdyHeaderBlockRawDecoderTest { - - private static final int maxHeaderSize = 16; - - private static final String name = "name"; - private static final String value = "value"; - private static final byte[] nameBytes = name.getBytes(); - private static final byte[] valueBytes = value.getBytes(); - - private SpdyHeaderBlockRawDecoder decoder; - private SpdyHeadersFrame frame; - - @Before - public void setUp() { - decoder = new SpdyHeaderBlockRawDecoder(SpdyVersion.SPDY_3_1, maxHeaderSize); - frame = new DefaultSpdyHeadersFrame(1); - } - - @After - public void tearDown() { - decoder.end(); - } - - @Test - public void testEmptyHeaderBlock() throws Exception { - ByteBuf headerBlock = Unpooled.EMPTY_BUFFER; - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testZeroNameValuePairs() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(4); - headerBlock.writeInt(0); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertFalse(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testNegativeNameValuePairs() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(4); - headerBlock.writeInt(-1); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testOneNameValuePair() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(21); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); - headerBlock.writeBytes(valueBytes); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertFalse(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().get(name)); - headerBlock.release(); - } - - @Test - public void testMissingNameLength() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(4); - headerBlock.writeInt(1); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testZeroNameLength() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(8); - headerBlock.writeInt(1); - headerBlock.writeInt(0); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testNegativeNameLength() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(8); - headerBlock.writeInt(1); - headerBlock.writeInt(-1); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testMissingName() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(8); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testIllegalNameOnlyNull() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(18); - headerBlock.writeInt(1); - headerBlock.writeInt(1); - headerBlock.writeByte(0); - headerBlock.writeInt(5); - headerBlock.writeBytes(valueBytes); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testMissingValueLength() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(12); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testZeroValueLength() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(16); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(0); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertFalse(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals("", frame.headers().get(name)); - headerBlock.release(); - } - - @Test - public void testNegativeValueLength() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(16); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(-1); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testMissingValue() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(16); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testIllegalValueOnlyNull() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(17); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(1); - headerBlock.writeByte(0); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testIllegalValueStartsWithNull() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(22); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(6); - headerBlock.writeByte(0); - headerBlock.writeBytes(valueBytes); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testIllegalValueEndsWithNull() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(22); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(6); - headerBlock.writeBytes(valueBytes); - headerBlock.writeByte(0); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testMultipleValues() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(27); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(11); - headerBlock.writeBytes(valueBytes); - headerBlock.writeByte(0); - headerBlock.writeBytes(valueBytes); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertFalse(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(2, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().getAll(name).get(0)); - assertEquals(value, frame.headers().getAll(name).get(1)); - headerBlock.release(); - } - - @Test - public void testMultipleValuesEndsWithNull() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(28); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(12); - headerBlock.writeBytes(valueBytes); - headerBlock.writeByte(0); - headerBlock.writeBytes(valueBytes); - headerBlock.writeByte(0); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().get(name)); - headerBlock.release(); - } - - @Test - public void testIllegalValueMultipleNulls() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(28); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(12); - headerBlock.writeBytes(valueBytes); - headerBlock.writeByte(0); - headerBlock.writeByte(0); - headerBlock.writeBytes(valueBytes); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testMissingNextNameValuePair() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(21); - headerBlock.writeInt(2); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); - headerBlock.writeBytes(valueBytes); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().get(name)); - headerBlock.release(); - } - - @Test - public void testMultipleNames() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(38); - headerBlock.writeInt(2); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); - headerBlock.writeBytes(valueBytes); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); - headerBlock.writeBytes(valueBytes); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().get(name)); - headerBlock.release(); - } - - @Test - public void testExtraData() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(22); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); - headerBlock.writeBytes(valueBytes); - headerBlock.writeByte(0); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().get(name)); - headerBlock.release(); - } - - @Test - public void testMultipleDecodes() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(21); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); - headerBlock.writeBytes(valueBytes); - - int readableBytes = headerBlock.readableBytes(); - for (int i = 0; i < readableBytes; i++) { - ByteBuf headerBlockSegment = headerBlock.slice(i, 1); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlockSegment, frame); - assertFalse(headerBlockSegment.isReadable()); - } - decoder.endHeaderBlock(frame); - - assertFalse(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().get(name)); - headerBlock.release(); - } - - @Test - public void testContinueAfterInvalidHeaders() throws Exception { - ByteBuf numHeaders = Unpooled.buffer(4); - numHeaders.writeInt(1); - - ByteBuf nameBlock = Unpooled.buffer(8); - nameBlock.writeInt(4); - nameBlock.writeBytes(nameBytes); - - ByteBuf valueBlock = Unpooled.buffer(9); - valueBlock.writeInt(5); - valueBlock.writeBytes(valueBytes); - - decoder.decode(ByteBufAllocator.DEFAULT, numHeaders, frame); - decoder.decode(ByteBufAllocator.DEFAULT, nameBlock, frame); - frame.setInvalid(); - decoder.decode(ByteBufAllocator.DEFAULT, valueBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(numHeaders.isReadable()); - assertFalse(nameBlock.isReadable()); - assertFalse(valueBlock.isReadable()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().get(name)); - numHeaders.release(); - nameBlock.release(); - valueBlock.release(); - } - - @Test - public void testTruncatedHeaderName() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(maxHeaderSize + 18); - headerBlock.writeInt(1); - headerBlock.writeInt(maxHeaderSize + 1); - for (int i = 0; i < maxHeaderSize + 1; i++) { - headerBlock.writeByte('a'); - } - headerBlock.writeInt(5); - headerBlock.writeBytes(valueBytes); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isTruncated()); - assertFalse(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } - - @Test - public void testTruncatedHeaderValue() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(maxHeaderSize + 13); - headerBlock.writeInt(1); - headerBlock.writeInt(4); - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(13); - for (int i = 0; i < maxHeaderSize - 3; i++) { - headerBlock.writeByte('a'); - } - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertTrue(frame.isTruncated()); - assertFalse(frame.isInvalid()); - assertEquals(0, frame.headers().names().size()); - headerBlock.release(); - } -} diff --git a/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdyHeaderBlockZlibDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdyHeaderBlockZlibDecoderTest.java deleted file mode 100644 index 404bbd9ec52..00000000000 --- a/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdyHeaderBlockZlibDecoderTest.java +++ /dev/null @@ -1,225 +0,0 @@ -/* - * Copyright 2014 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.ByteBufAllocator; -import io.netty.buffer.Unpooled; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; - -public class SpdyHeaderBlockZlibDecoderTest { - - // zlib header indicating 32K window size fastest deflate algorithm with SPDY dictionary - private static final byte[] zlibHeader = {0x78, 0x3f, (byte) 0xe3, (byte) 0xc6, (byte) 0xa7, (byte) 0xc2}; - private static final byte[] zlibSyncFlush = {0x00, 0x00, 0x00, (byte) 0xff, (byte) 0xff}; - - private static final int maxHeaderSize = 8192; - - private static final String name = "name"; - private static final String value = "value"; - private static final byte[] nameBytes = name.getBytes(); - private static final byte[] valueBytes = value.getBytes(); - - private SpdyHeaderBlockZlibDecoder decoder; - private SpdyHeadersFrame frame; - - @Before - public void setUp() { - decoder = new SpdyHeaderBlockZlibDecoder(SpdyVersion.SPDY_3_1, maxHeaderSize); - frame = new DefaultSpdyHeadersFrame(1); - } - - @After - public void tearDown() { - decoder.end(); - } - - @Test - public void testHeaderBlock() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(37); - headerBlock.writeBytes(zlibHeader); - headerBlock.writeByte(0); // Non-compressed block - headerBlock.writeByte(0x15); // little-endian length (21) - headerBlock.writeByte(0x00); // little-endian length (21) - headerBlock.writeByte(0xea); // one's compliment of length - headerBlock.writeByte(0xff); // one's compliment of length - headerBlock.writeInt(1); // number of Name/Value pairs - headerBlock.writeInt(4); // length of name - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); // length of value - headerBlock.writeBytes(valueBytes); - headerBlock.writeBytes(zlibSyncFlush); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertFalse(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().get(name)); - - headerBlock.release(); - } - - @Test - public void testHeaderBlockMultipleDecodes() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(37); - headerBlock.writeBytes(zlibHeader); - headerBlock.writeByte(0); // Non-compressed block - headerBlock.writeByte(0x15); // little-endian length (21) - headerBlock.writeByte(0x00); // little-endian length (21) - headerBlock.writeByte(0xea); // one's compliment of length - headerBlock.writeByte(0xff); // one's compliment of length - headerBlock.writeInt(1); // number of Name/Value pairs - headerBlock.writeInt(4); // length of name - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); // length of value - headerBlock.writeBytes(valueBytes); - headerBlock.writeBytes(zlibSyncFlush); - - int readableBytes = headerBlock.readableBytes(); - for (int i = 0; i < readableBytes; i++) { - ByteBuf headerBlockSegment = headerBlock.slice(i, 1); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlockSegment, frame); - assertFalse(headerBlockSegment.isReadable()); - } - decoder.endHeaderBlock(frame); - - assertFalse(frame.isInvalid()); - assertEquals(1, frame.headers().names().size()); - assertTrue(frame.headers().contains(name)); - assertEquals(1, frame.headers().getAll(name).size()); - assertEquals(value, frame.headers().get(name)); - - headerBlock.release(); - } - - @Test - public void testLargeHeaderName() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(8220); - headerBlock.writeBytes(zlibHeader); - headerBlock.writeByte(0); // Non-compressed block - headerBlock.writeByte(0x0c); // little-endian length (8204) - headerBlock.writeByte(0x20); // little-endian length (8204) - headerBlock.writeByte(0xf3); // one's compliment of length - headerBlock.writeByte(0xdf); // one's compliment of length - headerBlock.writeInt(1); // number of Name/Value pairs - headerBlock.writeInt(8192); // length of name - for (int i = 0; i < 8192; i++) { - headerBlock.writeByte('n'); - } - headerBlock.writeInt(0); // length of value - headerBlock.writeBytes(zlibSyncFlush); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertFalse(frame.isInvalid()); - assertFalse(frame.isTruncated()); - assertEquals(1, frame.headers().names().size()); - - headerBlock.release(); - } - - @Test - public void testLargeHeaderValue() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(8220); - headerBlock.writeBytes(zlibHeader); - headerBlock.writeByte(0); // Non-compressed block - headerBlock.writeByte(0x0c); // little-endian length (8204) - headerBlock.writeByte(0x20); // little-endian length (8204) - headerBlock.writeByte(0xf3); // one's compliment of length - headerBlock.writeByte(0xdf); // one's compliment of length - headerBlock.writeInt(1); // number of Name/Value pairs - headerBlock.writeInt(1); // length of name - headerBlock.writeByte('n'); - headerBlock.writeInt(8191); // length of value - for (int i = 0; i < 8191; i++) { - headerBlock.writeByte('v'); - } - headerBlock.writeBytes(zlibSyncFlush); - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - decoder.endHeaderBlock(frame); - - assertFalse(headerBlock.isReadable()); - assertFalse(frame.isInvalid()); - assertFalse(frame.isTruncated()); - assertEquals(1, frame.headers().names().size()); - assertEquals(8191, frame.headers().get("n").length()); - - headerBlock.release(); - } - - @Test(expected = SpdyProtocolException.class) - public void testHeaderBlockExtraData() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(37); - headerBlock.writeBytes(zlibHeader); - headerBlock.writeByte(0); // Non-compressed block - headerBlock.writeByte(0x15); // little-endian length (21) - headerBlock.writeByte(0x00); // little-endian length (21) - headerBlock.writeByte(0xea); // one's compliment of length - headerBlock.writeByte(0xff); // one's compliment of length - headerBlock.writeInt(1); // number of Name/Value pairs - headerBlock.writeInt(4); // length of name - headerBlock.writeBytes(nameBytes); - headerBlock.writeInt(5); // length of value - headerBlock.writeBytes(valueBytes); - headerBlock.writeByte(0x19); // adler-32 checksum - headerBlock.writeByte(0xa5); // adler-32 checksum - headerBlock.writeByte(0x03); // adler-32 checksum - headerBlock.writeByte(0xc9); // adler-32 checksum - headerBlock.writeByte(0); // Data following zlib stream - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - headerBlock.release(); - } - - @Test(expected = SpdyProtocolException.class) - public void testHeaderBlockInvalidDictionary() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(7); - headerBlock.writeByte(0x78); - headerBlock.writeByte(0x3f); - headerBlock.writeByte(0x01); // Unknown dictionary - headerBlock.writeByte(0x02); // Unknown dictionary - headerBlock.writeByte(0x03); // Unknown dictionary - headerBlock.writeByte(0x04); // Unknown dictionary - headerBlock.writeByte(0); // Non-compressed block - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - headerBlock.release(); - } - - @Test(expected = SpdyProtocolException.class) - public void testHeaderBlockInvalidDeflateBlock() throws Exception { - ByteBuf headerBlock = Unpooled.buffer(11); - headerBlock.writeBytes(zlibHeader); - headerBlock.writeByte(0); // Non-compressed block - headerBlock.writeByte(0x00); // little-endian length (0) - headerBlock.writeByte(0x00); // little-endian length (0) - headerBlock.writeByte(0x00); // invalid one's compliment - headerBlock.writeByte(0x00); // invalid one's compliment - decoder.decode(ByteBufAllocator.DEFAULT, headerBlock, frame); - - headerBlock.release(); - } -} diff --git a/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdySessionHandlerTest.java b/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdySessionHandlerTest.java deleted file mode 100644 index 362628aa864..00000000000 --- a/codec-http/src/test/java/io/netty/handler/codec/spdy/SpdySessionHandlerTest.java +++ /dev/null @@ -1,389 +0,0 @@ -/* - * Copyright 2013 The Netty Project - * - * The Netty Project licenses this file to you under the Apache License, - * version 2.0 (the "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package io.netty.handler.codec.spdy; - -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.util.internal.logging.InternalLogger; -import io.netty.util.internal.logging.InternalLoggerFactory; -import org.junit.Test; - -import java.util.List; -import java.util.Map; - -import static org.junit.Assert.*; - -public class SpdySessionHandlerTest { - - private static final InternalLogger logger = - InternalLoggerFactory.getInstance(SpdySessionHandlerTest.class); - - private static final int closeSignal = SpdyCodecUtil.SPDY_SETTINGS_MAX_ID; - private static final SpdySettingsFrame closeMessage = new DefaultSpdySettingsFrame(); - - static { - closeMessage.setValue(closeSignal, 0); - } - - private static void assertDataFrame(Object msg, int streamId, boolean last) { - assertNotNull(msg); - assertTrue(msg instanceof SpdyDataFrame); - SpdyDataFrame spdyDataFrame = (SpdyDataFrame) msg; - assertEquals(streamId, spdyDataFrame.streamId()); - assertEquals(last, spdyDataFrame.isLast()); - } - - private static void assertSynReply(Object msg, int streamId, boolean last, SpdyHeaders headers) { - assertNotNull(msg); - assertTrue(msg instanceof SpdySynReplyFrame); - assertHeaders(msg, streamId, last, headers); - } - - private static void assertRstStream(Object msg, int streamId, SpdyStreamStatus status) { - assertNotNull(msg); - assertTrue(msg instanceof SpdyRstStreamFrame); - SpdyRstStreamFrame spdyRstStreamFrame = (SpdyRstStreamFrame) msg; - assertEquals(streamId, spdyRstStreamFrame.streamId()); - assertEquals(status, spdyRstStreamFrame.status()); - } - - private static void assertPing(Object msg, int id) { - assertNotNull(msg); - assertTrue(msg instanceof SpdyPingFrame); - SpdyPingFrame spdyPingFrame = (SpdyPingFrame) msg; - assertEquals(id, spdyPingFrame.id()); - } - - private static void assertGoAway(Object msg, int lastGoodStreamId) { - assertNotNull(msg); - assertTrue(msg instanceof SpdyGoAwayFrame); - SpdyGoAwayFrame spdyGoAwayFrame = (SpdyGoAwayFrame) msg; - assertEquals(lastGoodStreamId, spdyGoAwayFrame.lastGoodStreamId()); - } - - private static void assertHeaders(Object msg, int streamId, boolean last, SpdyHeaders headers) { - assertNotNull(msg); - assertTrue(msg instanceof SpdyHeadersFrame); - SpdyHeadersFrame spdyHeadersFrame = (SpdyHeadersFrame) msg; - assertEquals(streamId, spdyHeadersFrame.streamId()); - assertEquals(last, spdyHeadersFrame.isLast()); - for (CharSequence name: headers.names()) { - List expectedValues = headers.getAll(name); - List receivedValues = spdyHeadersFrame.headers().getAll(name); - assertTrue(receivedValues.containsAll(expectedValues)); - receivedValues.removeAll(expectedValues); - assertTrue(receivedValues.isEmpty()); - spdyHeadersFrame.headers().remove(name); - } - assertTrue(spdyHeadersFrame.headers().isEmpty()); - } - - private static void testSpdySessionHandler(SpdyVersion version, boolean server) { - EmbeddedChannel sessionHandler = new EmbeddedChannel( - new SpdySessionHandler(version, server), new EchoHandler(closeSignal, server)); - - while (sessionHandler.readOutbound() != null) { - continue; - } - - int localStreamId = server ? 1 : 2; - int remoteStreamId = server ? 2 : 1; - - SpdySynStreamFrame spdySynStreamFrame = - new DefaultSpdySynStreamFrame(localStreamId, 0, (byte) 0); - spdySynStreamFrame.headers().set("compression", "test"); - - SpdyDataFrame spdyDataFrame = new DefaultSpdyDataFrame(localStreamId); - spdyDataFrame.setLast(true); - - // Check if session handler returns INVALID_STREAM if it receives - // a data frame for a Stream-ID that is not open - sessionHandler.writeInbound(new DefaultSpdyDataFrame(localStreamId)); - assertRstStream(sessionHandler.readOutbound(), localStreamId, SpdyStreamStatus.INVALID_STREAM); - assertNull(sessionHandler.readOutbound()); - - // Check if session handler returns PROTOCOL_ERROR if it receives - // a data frame for a Stream-ID before receiving a SYN_REPLY frame - sessionHandler.writeInbound(new DefaultSpdyDataFrame(remoteStreamId)); - assertRstStream(sessionHandler.readOutbound(), remoteStreamId, SpdyStreamStatus.PROTOCOL_ERROR); - assertNull(sessionHandler.readOutbound()); - remoteStreamId += 2; - - // Check if session handler returns PROTOCOL_ERROR if it receives - // multiple SYN_REPLY frames for the same active Stream-ID - sessionHandler.writeInbound(new DefaultSpdySynReplyFrame(remoteStreamId)); - assertNull(sessionHandler.readOutbound()); - sessionHandler.writeInbound(new DefaultSpdySynReplyFrame(remoteStreamId)); - assertRstStream(sessionHandler.readOutbound(), remoteStreamId, SpdyStreamStatus.STREAM_IN_USE); - assertNull(sessionHandler.readOutbound()); - remoteStreamId += 2; - - // Check if frame codec correctly compresses/uncompresses headers - sessionHandler.writeInbound(spdySynStreamFrame); - assertSynReply(sessionHandler.readOutbound(), localStreamId, false, spdySynStreamFrame.headers()); - assertNull(sessionHandler.readOutbound()); - SpdyHeadersFrame spdyHeadersFrame = new DefaultSpdyHeadersFrame(localStreamId); - - spdyHeadersFrame.headers().add("header", "test1"); - spdyHeadersFrame.headers().add("header", "test2"); - - sessionHandler.writeInbound(spdyHeadersFrame); - assertHeaders(sessionHandler.readOutbound(), localStreamId, false, spdyHeadersFrame.headers()); - assertNull(sessionHandler.readOutbound()); - localStreamId += 2; - - // Check if session handler closed the streams using the number - // of concurrent streams and that it returns REFUSED_STREAM - // if it receives a SYN_STREAM frame it does not wish to accept - spdySynStreamFrame.setStreamId(localStreamId); - spdySynStreamFrame.setLast(true); - spdySynStreamFrame.setUnidirectional(true); - - sessionHandler.writeInbound(spdySynStreamFrame); - assertRstStream(sessionHandler.readOutbound(), localStreamId, SpdyStreamStatus.REFUSED_STREAM); - assertNull(sessionHandler.readOutbound()); - - // Check if session handler rejects HEADERS for closed streams - int testStreamId = spdyDataFrame.streamId(); - sessionHandler.writeInbound(spdyDataFrame); - assertDataFrame(sessionHandler.readOutbound(), testStreamId, spdyDataFrame.isLast()); - assertNull(sessionHandler.readOutbound()); - spdyHeadersFrame.setStreamId(testStreamId); - - sessionHandler.writeInbound(spdyHeadersFrame); - assertRstStream(sessionHandler.readOutbound(), testStreamId, SpdyStreamStatus.INVALID_STREAM); - assertNull(sessionHandler.readOutbound()); - - // Check if session handler drops active streams if it receives - // a RST_STREAM frame for that Stream-ID - sessionHandler.writeInbound(new DefaultSpdyRstStreamFrame(remoteStreamId, 3)); - assertNull(sessionHandler.readOutbound()); - //remoteStreamId += 2; - - // Check if session handler honors UNIDIRECTIONAL streams - spdySynStreamFrame.setLast(false); - sessionHandler.writeInbound(spdySynStreamFrame); - assertNull(sessionHandler.readOutbound()); - spdySynStreamFrame.setUnidirectional(false); - - // Check if session handler returns PROTOCOL_ERROR if it receives - // multiple SYN_STREAM frames for the same active Stream-ID - sessionHandler.writeInbound(spdySynStreamFrame); - assertRstStream(sessionHandler.readOutbound(), localStreamId, SpdyStreamStatus.PROTOCOL_ERROR); - assertNull(sessionHandler.readOutbound()); - localStreamId += 2; - - // Check if session handler returns PROTOCOL_ERROR if it receives - // a SYN_STREAM frame with an invalid Stream-ID - spdySynStreamFrame.setStreamId(localStreamId - 1); - sessionHandler.writeInbound(spdySynStreamFrame); - assertRstStream(sessionHandler.readOutbound(), localStreamId - 1, SpdyStreamStatus.PROTOCOL_ERROR); - assertNull(sessionHandler.readOutbound()); - spdySynStreamFrame.setStreamId(localStreamId); - - // Check if session handler returns PROTOCOL_ERROR if it receives - // an invalid HEADERS frame - spdyHeadersFrame.setStreamId(localStreamId); - - spdyHeadersFrame.setInvalid(); - sessionHandler.writeInbound(spdyHeadersFrame); - assertRstStream(sessionHandler.readOutbound(), localStreamId, SpdyStreamStatus.PROTOCOL_ERROR); - assertNull(sessionHandler.readOutbound()); - - sessionHandler.finish(); - } - - private static void testSpdySessionHandlerPing(SpdyVersion version, boolean server) { - EmbeddedChannel sessionHandler = new EmbeddedChannel( - new SpdySessionHandler(version, server), new EchoHandler(closeSignal, server)); - - while (sessionHandler.readOutbound() != null) { - continue; - } - - int localStreamId = server ? 1 : 2; - int remoteStreamId = server ? 2 : 1; - - SpdyPingFrame localPingFrame = new DefaultSpdyPingFrame(localStreamId); - SpdyPingFrame remotePingFrame = new DefaultSpdyPingFrame(remoteStreamId); - - // Check if session handler returns identical local PINGs - sessionHandler.writeInbound(localPingFrame); - assertPing(sessionHandler.readOutbound(), localPingFrame.id()); - assertNull(sessionHandler.readOutbound()); - - // Check if session handler ignores un-initiated remote PINGs - sessionHandler.writeInbound(remotePingFrame); - assertNull(sessionHandler.readOutbound()); - - sessionHandler.finish(); - } - - private static void testSpdySessionHandlerGoAway(SpdyVersion version, boolean server) { - EmbeddedChannel sessionHandler = new EmbeddedChannel( - new SpdySessionHandler(version, server), new EchoHandler(closeSignal, server)); - - while (sessionHandler.readOutbound() != null) { - continue; - } - - int localStreamId = server ? 1 : 2; - - SpdySynStreamFrame spdySynStreamFrame = - new DefaultSpdySynStreamFrame(localStreamId, 0, (byte) 0); - spdySynStreamFrame.headers().set("compression", "test"); - - SpdyDataFrame spdyDataFrame = new DefaultSpdyDataFrame(localStreamId); - spdyDataFrame.setLast(true); - - // Send an initial request - sessionHandler.writeInbound(spdySynStreamFrame); - assertSynReply(sessionHandler.readOutbound(), localStreamId, false, spdySynStreamFrame.headers()); - assertNull(sessionHandler.readOutbound()); - sessionHandler.writeInbound(spdyDataFrame); - assertDataFrame(sessionHandler.readOutbound(), localStreamId, true); - assertNull(sessionHandler.readOutbound()); - - // Check if session handler sends a GOAWAY frame when closing - sessionHandler.writeInbound(closeMessage); - assertGoAway(sessionHandler.readOutbound(), localStreamId); - assertNull(sessionHandler.readOutbound()); - localStreamId += 2; - - // Check if session handler returns REFUSED_STREAM if it receives - // SYN_STREAM frames after sending a GOAWAY frame - spdySynStreamFrame.setStreamId(localStreamId); - sessionHandler.writeInbound(spdySynStreamFrame); - assertRstStream(sessionHandler.readOutbound(), localStreamId, SpdyStreamStatus.REFUSED_STREAM); - assertNull(sessionHandler.readOutbound()); - - // Check if session handler ignores Data frames after sending - // a GOAWAY frame - spdyDataFrame.setStreamId(localStreamId); - sessionHandler.writeInbound(spdyDataFrame); - assertNull(sessionHandler.readOutbound()); - - sessionHandler.finish(); - } - - @Test - public void testSpdyClientSessionHandler() { - logger.info("Running: testSpdyClientSessionHandler v3.1"); - testSpdySessionHandler(SpdyVersion.SPDY_3_1, false); - } - - @Test - public void testSpdyClientSessionHandlerPing() { - logger.info("Running: testSpdyClientSessionHandlerPing v3.1"); - testSpdySessionHandlerPing(SpdyVersion.SPDY_3_1, false); - } - - @Test - public void testSpdyClientSessionHandlerGoAway() { - logger.info("Running: testSpdyClientSessionHandlerGoAway v3.1"); - testSpdySessionHandlerGoAway(SpdyVersion.SPDY_3_1, false); - } - - @Test - public void testSpdyServerSessionHandler() { - logger.info("Running: testSpdyServerSessionHandler v3.1"); - testSpdySessionHandler(SpdyVersion.SPDY_3_1, true); - } - - @Test - public void testSpdyServerSessionHandlerPing() { - logger.info("Running: testSpdyServerSessionHandlerPing v3.1"); - testSpdySessionHandlerPing(SpdyVersion.SPDY_3_1, true); - } - - @Test - public void testSpdyServerSessionHandlerGoAway() { - logger.info("Running: testSpdyServerSessionHandlerGoAway v3.1"); - testSpdySessionHandlerGoAway(SpdyVersion.SPDY_3_1, true); - } - - // Echo Handler opens 4 half-closed streams on session connection - // and then sets the number of concurrent streams to 1 - private static class EchoHandler extends ChannelInboundHandlerAdapter { - private final int closeSignal; - private final boolean server; - - EchoHandler(int closeSignal, boolean server) { - this.closeSignal = closeSignal; - this.server = server; - } - - @Override - public void channelActive(ChannelHandlerContext ctx) throws Exception { - // Initiate 4 new streams - int streamId = server ? 2 : 1; - SpdySynStreamFrame spdySynStreamFrame = - new DefaultSpdySynStreamFrame(streamId, 0, (byte) 0); - spdySynStreamFrame.setLast(true); - ctx.writeAndFlush(spdySynStreamFrame); - spdySynStreamFrame.setStreamId(spdySynStreamFrame.streamId() + 2); - ctx.writeAndFlush(spdySynStreamFrame); - spdySynStreamFrame.setStreamId(spdySynStreamFrame.streamId() + 2); - ctx.writeAndFlush(spdySynStreamFrame); - spdySynStreamFrame.setStreamId(spdySynStreamFrame.streamId() + 2); - ctx.writeAndFlush(spdySynStreamFrame); - - // Limit the number of concurrent streams to 1 - SpdySettingsFrame spdySettingsFrame = new DefaultSpdySettingsFrame(); - spdySettingsFrame.setValue(SpdySettingsFrame.SETTINGS_MAX_CONCURRENT_STREAMS, 1); - ctx.writeAndFlush(spdySettingsFrame); - } - - @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception { - if (msg instanceof SpdySynStreamFrame) { - - SpdySynStreamFrame spdySynStreamFrame = (SpdySynStreamFrame) msg; - if (!spdySynStreamFrame.isUnidirectional()) { - int streamId = spdySynStreamFrame.streamId(); - SpdySynReplyFrame spdySynReplyFrame = new DefaultSpdySynReplyFrame(streamId); - spdySynReplyFrame.setLast(spdySynStreamFrame.isLast()); - for (Map.Entry entry: spdySynStreamFrame.headers()) { - spdySynReplyFrame.headers().add(entry.getKey(), entry.getValue()); - } - - ctx.writeAndFlush(spdySynReplyFrame); - } - return; - } - - if (msg instanceof SpdySynReplyFrame) { - return; - } - - if (msg instanceof SpdyDataFrame || - msg instanceof SpdyPingFrame || - msg instanceof SpdyHeadersFrame) { - - ctx.writeAndFlush(msg); - return; - } - - if (msg instanceof SpdySettingsFrame) { - SpdySettingsFrame spdySettingsFrame = (SpdySettingsFrame) msg; - if (spdySettingsFrame.isSet(closeSignal)) { - ctx.close(); - } - } - } - } -} diff --git a/codec-http/src/test/resources/file-03.txt b/codec-http/src/test/resources/file-03.txt new file mode 100644 index 00000000000..b545f1b452b --- /dev/null +++ b/codec-http/src/test/resources/file-03.txt @@ -0,0 +1 @@ +File 03 diff --git a/codec-http2/pom.xml b/codec-http2/pom.xml index aad4a1dc8b3..fd47bcb8704 100644 --- a/codec-http2/pom.xml +++ b/codec-http2/pom.xml @@ -6,7 +6,7 @@ ~ version 2.0 (the "License"); you may not use this file except in compliance ~ with the License. You may obtain a copy of the License at: ~ - ~ http://www.apache.org/licenses/LICENSE-2.0 + ~ https://www.apache.org/licenses/LICENSE-2.0 ~ ~ Unless required by applicable law or agreed to in writing, software ~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -14,13 +14,13 @@ ~ License for the specific language governing permissions and limitations ~ under the License. --> - + 4.0.0 io.netty netty-parent - 4.1.23.Final-SNAPSHOT + 5.0.0.Final-SNAPSHOT netty-codec-http2 @@ -30,12 +30,29 @@ io.netty.codec.http2 + + --add-exports java.base/sun.security.x509=ALL-UNNAMED ${project.groupId} - netty-codec-http + netty-common + ${project.version} + + + ${project.groupId} + netty-buffer + ${project.version} + + + ${project.groupId} + netty-transport + ${project.version} + + + ${project.groupId} + netty-codec ${project.version} @@ -43,6 +60,11 @@ netty-handler ${project.version} + + ${project.groupId} + netty-codec-http + ${project.version} + com.jcraft jzlib @@ -56,6 +78,28 @@ org.mockito mockito-core + + org.bouncycastle + bcpkix-jdk15on + test + + + ${project.groupId} + ${tcnative.artifactId} + ${tcnative.classifier} + test + true + + + com.aayushatharva.brotli4j + brotli4j + true + + + com.github.luben + zstd-jni + true + diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java index 7c52cd2b5c2..be21e2c6934 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,17 +16,15 @@ package io.netty.handler.codec.http2; +import io.netty.channel.Channel; import io.netty.handler.codec.http2.Http2HeadersEncoder.SensitivityDetector; import io.netty.util.internal.UnstableApi; import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_HEADER_LIST_SIZE; -import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_INITIAL_HUFFMAN_DECODE_CAPACITY; import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_MAX_RESERVED_STREAMS; -import static io.netty.util.internal.ObjectUtil.checkNotNull; -import static io.netty.util.internal.ObjectUtil.checkPositive; +import static io.netty.handler.codec.http2.Http2PromisedRequestVerifier.ALWAYS_VERIFY; import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; -import static java.util.concurrent.TimeUnit.MILLISECONDS; -import static java.util.concurrent.TimeUnit.SECONDS; +import static java.util.Objects.requireNonNull; /** * Abstract base class which defines commonly used features required to build {@link Http2ConnectionHandler} instances. @@ -64,7 +62,6 @@ *
  • {@link #headerSensitivityDetector(SensitivityDetector)}
  • *
  • {@link #encoderEnforceMaxConcurrentStreams(boolean)}
  • *
  • {@link #encoderIgnoreMaxHeaderListSize(boolean)}
  • - *
  • {@link #initialHuffmanDecodeCapacity(int)}
  • * * *

    Exposing necessary methods in a subclass

    @@ -84,9 +81,10 @@ public abstract class AbstractHttp2ConnectionHandlerBuilder 0) { + decoder = new Http2EmptyDataFrameConnectionDecoder(decoder, maxConsecutiveEmptyDataFrames); + } final T handler; try { // Call the abstract build method @@ -421,7 +551,7 @@ private T buildFromCodec(Http2ConnectionDecoder decoder, Http2ConnectionEncoder } catch (Throwable t) { encoder.close(); decoder.close(); - throw new IllegalStateException("failed to build a Http2ConnectionHandler", t); + throw new IllegalStateException("failed to build an Http2ConnectionHandler", t); } // Setup post build options diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java new file mode 100644 index 00000000000..22e3602d336 --- /dev/null +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamChannel.java @@ -0,0 +1,950 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.channel.Channel; +import io.netty.channel.ChannelConfig; +import io.netty.channel.ChannelHandler; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelId; +import io.netty.channel.ChannelMetadata; +import io.netty.channel.ChannelOutboundBuffer; +import io.netty.channel.ChannelPipeline; +import io.netty.channel.DefaultChannelConfig; +import io.netty.channel.DefaultChannelPipeline; +import io.netty.channel.EventLoop; +import io.netty.channel.MessageSizeEstimator; +import io.netty.channel.RecvByteBufAllocator; +import io.netty.channel.WriteBufferWaterMark; +import io.netty.handler.codec.http2.Http2FrameCodec.DefaultHttp2FrameStream; +import io.netty.util.DefaultAttributeMap; +import io.netty.util.ReferenceCountUtil; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; +import io.netty.util.internal.StringUtil; +import io.netty.util.internal.logging.InternalLogger; +import io.netty.util.internal.logging.InternalLoggerFactory; + +import java.io.IOException; +import java.net.SocketAddress; +import java.nio.channels.ClosedChannelException; +import java.util.ArrayDeque; +import java.util.Queue; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.atomic.AtomicIntegerFieldUpdater; +import java.util.concurrent.atomic.AtomicLongFieldUpdater; + +import static io.netty.handler.codec.http2.Http2CodecUtil.isStreamIdValid; +import static java.lang.Math.min; + +abstract class AbstractHttp2StreamChannel extends DefaultAttributeMap implements Http2StreamChannel { + + static final Http2FrameStreamVisitor WRITABLE_VISITOR = stream -> { + final AbstractHttp2StreamChannel childChannel = (AbstractHttp2StreamChannel) + ((DefaultHttp2FrameStream) stream).attachment; + childChannel.trySetWritable(); + return true; + }; + + private static final InternalLogger logger = InternalLoggerFactory.getInstance(AbstractHttp2StreamChannel.class); + + private static final ChannelMetadata METADATA = new ChannelMetadata(false, 16); + + /** + * Number of bytes to consider non-payload messages. 9 is arbitrary, but also the minimum size of an HTTP/2 frame. + * Primarily is non-zero. + */ + private static final int MIN_HTTP2_FRAME_SIZE = 9; + + /** + * Returns the flow-control size for DATA frames, and {@value MIN_HTTP2_FRAME_SIZE} for all other frames. + */ + private static final class FlowControlledFrameSizeEstimator implements MessageSizeEstimator { + + static final FlowControlledFrameSizeEstimator INSTANCE = new FlowControlledFrameSizeEstimator(); + + private static final Handle HANDLE_INSTANCE = msg -> { + return msg instanceof Http2DataFrame ? + // Guard against overflow. + (int) min(Integer.MAX_VALUE, ((Http2DataFrame) msg).initialFlowControlledBytes() + + (long) MIN_HTTP2_FRAME_SIZE) : MIN_HTTP2_FRAME_SIZE; + }; + + @Override + public Handle newHandle() { + return HANDLE_INSTANCE; + } + } + + private static final AtomicLongFieldUpdater TOTAL_PENDING_SIZE_UPDATER = + AtomicLongFieldUpdater.newUpdater(AbstractHttp2StreamChannel.class, "totalPendingSize"); + + private static final AtomicIntegerFieldUpdater UNWRITABLE_UPDATER = + AtomicIntegerFieldUpdater.newUpdater(AbstractHttp2StreamChannel.class, "unwritable"); + + private static void windowUpdateFrameWriteComplete(Channel streamChannel, Future future) { + Throwable cause = future.cause(); + if (cause != null) { + Throwable unwrappedCause; + // Unwrap if needed + if (cause instanceof Http2FrameStreamException && (unwrappedCause = cause.getCause()) != null) { + cause = unwrappedCause; + } + + // Notify the child-channel and close it. + streamChannel.pipeline().fireExceptionCaught(cause); + streamChannel.unsafe().close(streamChannel.newPromise()); + } + } + + /** + * The current status of the read-processing for a {@link AbstractHttp2StreamChannel}. + */ + private enum ReadStatus { + /** + * No read in progress and no read was requested (yet) + */ + IDLE, + + /** + * Reading in progress + */ + IN_PROGRESS, + + /** + * A read operation was requested. + */ + REQUESTED + } + + private final Http2StreamChannelConfig config = new Http2StreamChannelConfig(this); + private final Http2ChannelUnsafe unsafe = new Http2ChannelUnsafe(); + private final ChannelId channelId; + private final ChannelPipeline pipeline; + private final DefaultHttp2FrameStream stream; + private final Promise closePromise; + + private volatile boolean registered; + + private volatile long totalPendingSize; + private volatile int unwritable; + + // Cached to reduce GC + private Runnable fireChannelWritabilityChangedTask; + + private boolean outboundClosed; + private int flowControlledBytes; + + /** + * This variable represents if a read is in progress for the current channel or was requested. + * Note that depending upon the {@link RecvByteBufAllocator} behavior a read may extend beyond the + * {@link Http2ChannelUnsafe#beginRead()} method scope. The {@link Http2ChannelUnsafe#beginRead()} loop may + * drain all pending data, and then if the parent channel is reading this channel may still accept frames. + */ + private ReadStatus readStatus = ReadStatus.IDLE; + + private Queue inboundBuffer; + + /** {@code true} after the first HEADERS frame has been written **/ + private boolean firstFrameWritten; + private boolean readCompletePending; + + AbstractHttp2StreamChannel(DefaultHttp2FrameStream stream, int id, ChannelHandler inboundHandler) { + this.stream = stream; + stream.attachment = this; + pipeline = new DefaultChannelPipeline(this) { + @Override + protected void incrementPendingOutboundBytes(long size) { + AbstractHttp2StreamChannel.this.incrementPendingOutboundBytes(size, true); + } + + @Override + protected void decrementPendingOutboundBytes(long size) { + AbstractHttp2StreamChannel.this.decrementPendingOutboundBytes(size, true); + } + }; + + closePromise = pipeline.newPromise(); + channelId = new Http2StreamChannelId(parent().id(), id); + + if (inboundHandler != null) { + // Add the handler to the pipeline now that we are registered. + pipeline.addLast(inboundHandler); + } + } + + private void incrementPendingOutboundBytes(long size, boolean invokeLater) { + if (size == 0) { + return; + } + + long newWriteBufferSize = TOTAL_PENDING_SIZE_UPDATER.addAndGet(this, size); + if (newWriteBufferSize > config().getWriteBufferHighWaterMark()) { + setUnwritable(invokeLater); + } + } + + private void decrementPendingOutboundBytes(long size, boolean invokeLater) { + if (size == 0) { + return; + } + + long newWriteBufferSize = TOTAL_PENDING_SIZE_UPDATER.addAndGet(this, -size); + // Once the totalPendingSize dropped below the low water-mark we can mark the child channel + // as writable again. Before doing so we also need to ensure the parent channel is writable to + // prevent excessive buffering in the parent outbound buffer. If the parent is not writable + // we will mark the child channel as writable once the parent becomes writable by calling + // trySetWritable() later. + if (newWriteBufferSize < config().getWriteBufferLowWaterMark() && parent().isWritable()) { + setWritable(invokeLater); + } + } + + final void trySetWritable() { + // The parent is writable again but the child channel itself may still not be writable. + // Lets try to set the child channel writable to match the state of the parent channel + // if (and only if) the totalPendingSize is smaller then the low water-mark. + // If this is not the case we will try again later once we drop under it. + if (totalPendingSize < config().getWriteBufferLowWaterMark()) { + setWritable(false); + } + } + + private void setWritable(boolean invokeLater) { + for (;;) { + final int oldValue = unwritable; + final int newValue = oldValue & ~1; + if (UNWRITABLE_UPDATER.compareAndSet(this, oldValue, newValue)) { + if (oldValue != 0 && newValue == 0) { + fireChannelWritabilityChanged(invokeLater); + } + break; + } + } + } + + private void setUnwritable(boolean invokeLater) { + for (;;) { + final int oldValue = unwritable; + final int newValue = oldValue | 1; + if (UNWRITABLE_UPDATER.compareAndSet(this, oldValue, newValue)) { + if (oldValue == 0) { + fireChannelWritabilityChanged(invokeLater); + } + break; + } + } + } + + private void fireChannelWritabilityChanged(boolean invokeLater) { + final ChannelPipeline pipeline = pipeline(); + if (invokeLater) { + Runnable task = fireChannelWritabilityChangedTask; + if (task == null) { + fireChannelWritabilityChangedTask = task = pipeline::fireChannelWritabilityChanged; + } + executor().execute(task); + } else { + pipeline.fireChannelWritabilityChanged(); + } + } + @Override + public Http2FrameStream stream() { + return stream; + } + + void closeOutbound() { + outboundClosed = true; + } + + void streamClosed() { + unsafe.readEOS(); + // Attempt to drain any queued data from the queue and deliver it to the application before closing this + // channel. + unsafe.doBeginRead(); + } + + @Override + public ChannelMetadata metadata() { + return METADATA; + } + + @Override + public ChannelConfig config() { + return config; + } + + @Override + public boolean isOpen() { + return !closePromise.isDone(); + } + + @Override + public boolean isActive() { + return isOpen(); + } + + @Override + public boolean isWritable() { + return unwritable == 0; + } + + @Override + public ChannelId id() { + return channelId; + } + + @Override + public EventLoop executor() { + return parent().executor(); + } + + @Override + public Channel parent() { + return parentContext().channel(); + } + + @Override + public boolean isRegistered() { + return registered; + } + + @Override + public SocketAddress localAddress() { + return parent().localAddress(); + } + + @Override + public SocketAddress remoteAddress() { + return parent().remoteAddress(); + } + + @Override + public Future closeFuture() { + return closePromise; + } + + @Override + public long bytesBeforeUnwritable() { + long bytes = config().getWriteBufferHighWaterMark() - totalPendingSize; + // If bytes is negative we know we are not writable, but if bytes is non-negative we have to check + // writability. Note that totalPendingSize and isWritable() use different volatile variables that are not + // synchronized together. totalPendingSize will be updated before isWritable(). + if (bytes > 0) { + return isWritable() ? bytes : 0; + } + return 0; + } + + @Override + public long bytesBeforeWritable() { + long bytes = totalPendingSize - config().getWriteBufferLowWaterMark(); + // If bytes is negative we know we are writable, but if bytes is non-negative we have to check writability. + // Note that totalPendingSize and isWritable() use different volatile variables that are not synchronized + // together. totalPendingSize will be updated before isWritable(). + if (bytes > 0) { + return isWritable() ? 0 : bytes; + } + return 0; + } + + @Override + public Unsafe unsafe() { + return unsafe; + } + + @Override + public ChannelPipeline pipeline() { + return pipeline; + } + + @Override + public int hashCode() { + return id().hashCode(); + } + + @Override + public boolean equals(Object o) { + return this == o; + } + + @Override + public int compareTo(Channel o) { + if (this == o) { + return 0; + } + + return id().compareTo(o.id()); + } + + @Override + public String toString() { + return parent().toString() + "(H2 - " + stream + ')'; + } + + /** + * Receive a read message. This does not notify handlers unless a read is in progress on the + * channel. + */ + void fireChildRead(Http2Frame frame) { + assert executor().inEventLoop(); + if (!isActive()) { + ReferenceCountUtil.release(frame); + } else if (readStatus != ReadStatus.IDLE) { + // If a read is in progress or has been requested, there cannot be anything in the queue, + // otherwise we would have drained it from the queue and processed it during the read cycle. + assert inboundBuffer == null || inboundBuffer.isEmpty(); + final RecvByteBufAllocator.Handle allocHandle = unsafe.recvBufAllocHandle(); + unsafe.doRead0(frame, allocHandle); + // We currently don't need to check for readEOS because the parent channel and child channel are limited + // to the same EventLoop thread. There are a limited number of frame types that may come after EOS is + // read (unknown, reset) and the trade off is less conditionals for the hot path (headers/data) at the + // cost of additional readComplete notifications on the rare path. + if (allocHandle.continueReading()) { + maybeAddChannelToReadCompletePendingQueue(); + } else { + unsafe.notifyReadComplete(allocHandle, true); + } + } else { + if (inboundBuffer == null) { + inboundBuffer = new ArrayDeque<>(4); + } + inboundBuffer.add(frame); + } + } + + void fireChildReadComplete() { + assert executor().inEventLoop(); + assert readStatus != ReadStatus.IDLE || !readCompletePending; + unsafe.notifyReadComplete(unsafe.recvBufAllocHandle(), false); + } + + private final class Http2ChannelUnsafe implements Unsafe { + @SuppressWarnings("deprecation") + private RecvByteBufAllocator.Handle recvHandle; + private boolean writeDoneAndNoFlush; + private boolean closeInitiated; + private boolean readEOS; + + @Override + public void connect(final SocketAddress remoteAddress, + SocketAddress localAddress, Promise promise) { + if (!promise.setUncancellable()) { + return; + } + promise.setFailure(new UnsupportedOperationException()); + } + + @Override + public RecvByteBufAllocator.Handle recvBufAllocHandle() { + if (recvHandle == null) { + recvHandle = config().getRecvByteBufAllocator().newHandle(); + recvHandle.reset(config()); + } + return recvHandle; + } + + @Override + public SocketAddress localAddress() { + return parent().unsafe().localAddress(); + } + + @Override + public SocketAddress remoteAddress() { + return parent().unsafe().remoteAddress(); + } + + @Override + public void register(Promise promise) { + if (!promise.setUncancellable()) { + return; + } + if (registered) { + promise.setFailure(new UnsupportedOperationException("Re-register is not supported")); + return; + } + + registered = true; + + promise.setSuccess(null); + + pipeline().fireChannelRegistered(); + if (isActive()) { + pipeline().fireChannelActive(); + if (config().isAutoRead()) { + read(); + } + } + } + + @Override + public void bind(SocketAddress localAddress, Promise promise) { + if (!promise.setUncancellable()) { + return; + } + promise.setFailure(new UnsupportedOperationException()); + } + + @Override + public void disconnect(Promise promise) { + close(promise); + } + + @Override + public void close(final Promise promise) { + if (!promise.setUncancellable()) { + return; + } + if (closeInitiated) { + if (closePromise.isDone()) { + // Closed already. + promise.setSuccess(null); + } else { + // This means close() was called before so we just register a listener and return + closePromise.addListener(promise, (p, future) -> p.setSuccess(null)); + } + return; + } + closeInitiated = true; + // Just set to false as removing from an underlying queue would even be more expensive. + readCompletePending = false; + + final boolean wasActive = isActive(); + + // There is no need to update the local window as once the stream is closed all the pending bytes will be + // given back to the connection window by the controller itself. + + // Only ever send a reset frame if the connection is still alive and if the stream was created before + // as otherwise we may send a RST on a stream in an invalid state and cause a connection error. + if (parent().isActive() && !readEOS && isStreamIdValid(stream.id())) { + Http2StreamFrame resetFrame = new DefaultHttp2ResetFrame(Http2Error.CANCEL).stream(stream()); + write(resetFrame, newPromise()); + flush(); + } + + if (inboundBuffer != null) { + for (;;) { + Object msg = inboundBuffer.poll(); + if (msg == null) { + break; + } + ReferenceCountUtil.release(msg); + } + inboundBuffer = null; + } + + // The promise should be notified before we call fireChannelInactive(). + outboundClosed = true; + closePromise.setSuccess(null); + promise.setSuccess(null); + + fireChannelInactiveAndDeregister(newPromise(), wasActive); + } + + @Override + public void closeForcibly() { + close(newPromise()); + } + + @Override + public void deregister(Promise promise) { + fireChannelInactiveAndDeregister(promise, false); + } + + private void fireChannelInactiveAndDeregister(Promise promise, + final boolean fireChannelInactive) { + if (!promise.setUncancellable()) { + return; + } + + if (!registered) { + promise.setSuccess(null); + return; + } + + // As a user may call deregister() from within any method while doing processing in the ChannelPipeline, + // we need to ensure we do the actual deregister operation later. This is necessary to preserve the + // behavior of the AbstractChannel, which always invokes channelUnregistered and channelInactive + // events 'later' to ensure the current events in the handler are completed before these events. + // + // See: + // https://github.com/netty/netty/issues/4435 + invokeLater(()-> { + if (fireChannelInactive) { + pipeline.fireChannelInactive(); + } + // The user can fire `deregister` events multiple times but we only want to fire the pipeline + // event if the channel was actually registered. + if (registered) { + registered = false; + pipeline.fireChannelUnregistered(); + } + safeSetSuccess(promise); + }); + } + + private void safeSetSuccess(Promise promise) { + if (!promise.trySuccess(null)) { + logger.warn("Failed to mark a promise as success because it is done already: {}", promise); + } + } + + private void invokeLater(Runnable task) { + try { + // This method is used by outbound operation implementations to trigger an inbound event later. + // They do not trigger an inbound event immediately because an outbound operation might have been + // triggered by another inbound event handler method. If fired immediately, the call stack + // will look like this for example: + // + // handlerA.inboundBufferUpdated() - (1) an inbound handler method closes a connection. + // -> handlerA.ctx.close() + // -> channel.unsafe.close() + // -> handlerA.channelInactive() - (2) another inbound handler method called while in (1) yet + // + // which means the execution of two inbound handler methods of the same handler overlap undesirably. + executor().execute(task); + } catch (RejectedExecutionException e) { + logger.warn("Can't invoke task later as EventLoop rejected it", e); + } + } + + @Override + public void beginRead() { + if (!isActive()) { + return; + } + updateLocalWindowIfNeeded(); + + switch (readStatus) { + case IDLE: + readStatus = ReadStatus.IN_PROGRESS; + doBeginRead(); + break; + case IN_PROGRESS: + readStatus = ReadStatus.REQUESTED; + break; + default: + break; + } + } + + private Object pollQueuedMessage() { + return inboundBuffer == null ? null : inboundBuffer.poll(); + } + + void doBeginRead() { + // Process messages until there are none left (or the user stopped requesting) and also handle EOS. + while (readStatus != ReadStatus.IDLE) { + Object message = pollQueuedMessage(); + if (message == null) { + if (readEOS) { + unsafe.closeForcibly(); + } + // We need to double check that there is nothing left to flush such as a + // window update frame. + flush(); + break; + } + final RecvByteBufAllocator.Handle allocHandle = recvBufAllocHandle(); + allocHandle.reset(config()); + boolean continueReading = false; + do { + doRead0((Http2Frame) message, allocHandle); + } while ((readEOS || (continueReading = allocHandle.continueReading())) + && (message = pollQueuedMessage()) != null); + + if (continueReading && isParentReadInProgress() && !readEOS) { + // Currently the parent and child channel are on the same EventLoop thread. If the parent is + // currently reading it is possible that more frames will be delivered to this child channel. In + // the case that this child channel still wants to read we delay the channelReadComplete on this + // child channel until the parent is done reading. + maybeAddChannelToReadCompletePendingQueue(); + } else { + notifyReadComplete(allocHandle, true); + } + } + } + + void readEOS() { + readEOS = true; + } + + private void updateLocalWindowIfNeeded() { + if (flowControlledBytes != 0) { + int bytes = flowControlledBytes; + flowControlledBytes = 0; + Future future = write0(parentContext(), new DefaultHttp2WindowUpdateFrame(bytes).stream(stream)); + // window update frames are commonly swallowed by the Http2FrameCodec and the promise is synchronously + // completed but the flow controller _may_ have generated a wire level WINDOW_UPDATE. Therefore we need, + // to assume there was a write done that needs to be flushed or we risk flow control starvation. + writeDoneAndNoFlush = true; + // Add a listener which will notify and teardown the stream + // when a window update fails if needed or check the result of the future directly if it was completed + // already. + // See https://github.com/netty/netty/issues/9663 + if (future.isDone()) { + windowUpdateFrameWriteComplete(AbstractHttp2StreamChannel.this, future); + } else { + future.addListener(AbstractHttp2StreamChannel.this, + AbstractHttp2StreamChannel::windowUpdateFrameWriteComplete); + } + } + } + + void notifyReadComplete(RecvByteBufAllocator.Handle allocHandle, boolean forceReadComplete) { + if (!readCompletePending && !forceReadComplete) { + return; + } + // Set to false just in case we added the channel multiple times before. + readCompletePending = false; + + if (readStatus == ReadStatus.REQUESTED) { + readStatus = ReadStatus.IN_PROGRESS; + } else { + readStatus = ReadStatus.IDLE; + } + + allocHandle.readComplete(); + pipeline().fireChannelReadComplete(); + if (config().isAutoRead()) { + read(); + } + + // Reading data may result in frames being written (e.g. WINDOW_UPDATE, RST, etc..). If the parent + // channel is not currently reading we need to force a flush at the child channel, because we cannot + // rely upon flush occurring in channelReadComplete on the parent channel. + flush(); + if (readEOS) { + unsafe.closeForcibly(); + } + } + + @SuppressWarnings("deprecation") + void doRead0(Http2Frame frame, RecvByteBufAllocator.Handle allocHandle) { + final int bytes; + if (frame instanceof Http2DataFrame) { + bytes = ((Http2DataFrame) frame).initialFlowControlledBytes(); + + // It is important that we increment the flowControlledBytes before we call fireChannelRead(...) + // as it may cause a read() that will call updateLocalWindowIfNeeded() and we need to ensure + // in this case that we accounted for it. + // + // See https://github.com/netty/netty/issues/9663 + flowControlledBytes += bytes; + } else { + bytes = MIN_HTTP2_FRAME_SIZE; + } + // Update before firing event through the pipeline to be consistent with other Channel implementation. + allocHandle.attemptedBytesRead(bytes); + allocHandle.lastBytesRead(bytes); + allocHandle.incMessagesRead(1); + + pipeline().fireChannelRead(frame); + } + + @Override + public void write(Object msg, Promise promise) { + // After this point its not possible to cancel a write anymore. + if (!promise.setUncancellable()) { + ReferenceCountUtil.release(msg); + return; + } + + if (!isActive() || + // Once the outbound side was closed we should not allow header / data frames + outboundClosed && (msg instanceof Http2HeadersFrame || msg instanceof Http2DataFrame)) { + ReferenceCountUtil.release(msg); + promise.setFailure(new ClosedChannelException()); + return; + } + + try { + if (msg instanceof Http2StreamFrame) { + Http2StreamFrame frame = validateStreamFrame((Http2StreamFrame) msg).stream(stream()); + writeHttp2StreamFrame(frame, promise); + } else { + String msgStr = msg.toString(); + ReferenceCountUtil.release(msg); + promise.setFailure(new IllegalArgumentException( + "Message must be an " + StringUtil.simpleClassName(Http2StreamFrame.class) + + ": " + msgStr)); + } + } catch (Throwable t) { + promise.tryFailure(t); + } + } + + private void writeHttp2StreamFrame(Http2StreamFrame frame, Promise promise) { + if (!firstFrameWritten && !isStreamIdValid(stream().id()) && !(frame instanceof Http2HeadersFrame)) { + ReferenceCountUtil.release(frame); + promise.setFailure( + new IllegalArgumentException("The first frame must be a headers frame. Was: " + + frame.name())); + return; + } + + final boolean firstWrite; + if (firstFrameWritten) { + firstWrite = false; + } else { + firstWrite = firstFrameWritten = true; + } + + Future f = write0(parentContext(), frame); + if (f.isDone()) { + if (firstWrite) { + firstWriteComplete(f, promise); + } else { + writeComplete(f, promise); + } + } else { + final long bytes = FlowControlledFrameSizeEstimator.HANDLE_INSTANCE.size(frame); + incrementPendingOutboundBytes(bytes, false); + f.addListener(future -> { + if (firstWrite) { + firstWriteComplete(future, promise); + } else { + writeComplete(future, promise); + } + decrementPendingOutboundBytes(bytes, false); + }); + writeDoneAndNoFlush = true; + } + } + + private void firstWriteComplete(Future future, Promise promise) { + Throwable cause = future.cause(); + if (cause == null) { + promise.setSuccess(null); + } else { + // If the first write fails there is not much we can do, just close + closeForcibly(); + promise.setFailure(wrapStreamClosedError(cause)); + } + } + + private void writeComplete(Future future, Promise promise) { + Throwable cause = future.cause(); + if (cause == null) { + promise.setSuccess(null); + } else { + Throwable error = wrapStreamClosedError(cause); + // To make it more consistent with AbstractChannel we handle all IOExceptions here. + if (error instanceof IOException) { + if (config.isAutoClose()) { + // Close channel if needed. + closeForcibly(); + } else { + // TODO: Once Http2StreamChannel extends DuplexChannel we should call shutdownOutput(...) + outboundClosed = true; + } + } + promise.setFailure(error); + } + } + + private Throwable wrapStreamClosedError(Throwable cause) { + // If the error was caused by STREAM_CLOSED we should use a ClosedChannelException to better + // mimic other transports and make it easier to reason about what exceptions to expect. + if (cause instanceof Http2Exception && ((Http2Exception) cause).error() == Http2Error.STREAM_CLOSED) { + return new ClosedChannelException().initCause(cause); + } + return cause; + } + + private Http2StreamFrame validateStreamFrame(Http2StreamFrame frame) { + if (frame.stream() != null && frame.stream() != stream) { + String msgString = frame.toString(); + ReferenceCountUtil.release(frame); + throw new IllegalArgumentException( + "Stream " + frame.stream() + " must not be set on the frame: " + msgString); + } + return frame; + } + + @Override + public void flush() { + // If we are currently in the parent channel's read loop we should just ignore the flush. + // We will ensure we trigger ctx.flush() after we processed all Channels later on and + // so aggregate the flushes. This is done as ctx.flush() is expensive when as it may trigger an + // write(...) or writev(...) operation on the socket. + if (!writeDoneAndNoFlush || isParentReadInProgress()) { + // There is nothing to flush so this is a NOOP. + return; + } + // We need to set this to false before we call flush0(...) as FutureListener may produce more data + // that are explicit flushed. + writeDoneAndNoFlush = false; + flush0(parentContext()); + } + + @Override + public ChannelOutboundBuffer outboundBuffer() { + // Always return null as we not use the ChannelOutboundBuffer and not even support it. + return null; + } + } + + /** + * {@link ChannelConfig} so that the high and low writebuffer watermarks can reflect the outbound flow control + * window, without having to create a new {@link WriteBufferWaterMark} object whenever the flow control window + * changes. + */ + private static final class Http2StreamChannelConfig extends DefaultChannelConfig { + Http2StreamChannelConfig(Channel channel) { + super(channel); + } + + @Override + public MessageSizeEstimator getMessageSizeEstimator() { + return FlowControlledFrameSizeEstimator.INSTANCE; + } + + @Override + public ChannelConfig setMessageSizeEstimator(MessageSizeEstimator estimator) { + throw new UnsupportedOperationException(); + } + + @Override + public ChannelConfig setRecvByteBufAllocator(RecvByteBufAllocator allocator) { + if (!(allocator.newHandle() instanceof RecvByteBufAllocator.ExtendedHandle)) { + throw new IllegalArgumentException("allocator.newHandle() must return an object of type: " + + RecvByteBufAllocator.ExtendedHandle.class); + } + super.setRecvByteBufAllocator(allocator); + return this; + } + } + + private void maybeAddChannelToReadCompletePendingQueue() { + if (!readCompletePending) { + readCompletePending = true; + addChannelToReadCompletePendingQueue(); + } + } + + protected void flush0(ChannelHandlerContext ctx) { + ctx.flush(); + } + + protected Future write0(ChannelHandlerContext ctx, Object msg) { + return ctx.write(msg); + } + + protected abstract boolean isParentReadInProgress(); + protected abstract void addChannelToReadCompletePendingQueue(); + protected abstract ChannelHandlerContext parentContext(); +} diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamFrame.java index a7ad700c2c7..1d5c1d01e24 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamFrame.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2StreamFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -45,7 +45,7 @@ public boolean equals(Object o) { return false; } Http2StreamFrame other = (Http2StreamFrame) o; - return stream == other.stream() || (stream != null && stream.equals(other.stream())); + return stream == other.stream() || stream != null && stream.equals(other.stream()); } @Override diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractInboundHttp2ToHttpAdapterBuilder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractInboundHttp2ToHttpAdapterBuilder.java index 9549740d8d1..a6e820341cf 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractInboundHttp2ToHttpAdapterBuilder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractInboundHttp2ToHttpAdapterBuilder.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -17,7 +17,7 @@ import io.netty.handler.codec.TooLongFrameException; import io.netty.util.internal.UnstableApi; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * A skeletal builder implementation of {@link InboundHttp2ToHttpAdapter} and its subtypes. @@ -38,7 +38,7 @@ public abstract class AbstractInboundHttp2ToHttpAdapterBuilder< * for the current connection */ protected AbstractInboundHttp2ToHttpAdapterBuilder(Http2Connection connection) { - this.connection = checkNotNull(connection, "connection"); + this.connection = requireNonNull(connection, "connection"); } @SuppressWarnings("unchecked") diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/CharSequenceMap.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/CharSequenceMap.java index ee7575a52d1..21371b49e43 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/CharSequenceMap.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/CharSequenceMap.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -33,7 +33,7 @@ public CharSequenceMap() { } public CharSequenceMap(boolean caseSensitive) { - this(caseSensitive, UnsupportedValueConverter.instance()); + this(caseSensitive, UnsupportedValueConverter.instance()); } public CharSequenceMap(boolean caseSensitive, ValueConverter valueConverter) { diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/CleartextHttp2ServerUpgradeHandler.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/CleartextHttp2ServerUpgradeHandler.java index c70b343920b..2e83f666d05 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/CleartextHttp2ServerUpgradeHandler.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/CleartextHttp2ServerUpgradeHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,19 +18,16 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.ByteBufUtil; import io.netty.channel.ChannelHandler; -import io.netty.channel.ChannelHandlerAdapter; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.ByteToMessageDecoder; import io.netty.handler.codec.http.HttpServerCodec; import io.netty.handler.codec.http.HttpServerUpgradeHandler; import io.netty.util.internal.UnstableApi; -import java.util.List; - import static io.netty.buffer.Unpooled.unreleasableBuffer; import static io.netty.handler.codec.http2.Http2CodecUtil.connectionPrefaceBuf; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * Performing cleartext upgrade, by h2c HTTP upgrade or Prior Knowledge. @@ -39,7 +36,7 @@ * prior knowledge or not. */ @UnstableApi -public final class CleartextHttp2ServerUpgradeHandler extends ChannelHandlerAdapter { +public final class CleartextHttp2ServerUpgradeHandler extends ByteToMessageDecoder { private static final ByteBuf CONNECTION_PREFACE = unreleasableBuffer(connectionPrefaceBuf()); private final HttpServerCodec httpServerCodec; @@ -58,44 +55,41 @@ public final class CleartextHttp2ServerUpgradeHandler extends ChannelHandlerAdap public CleartextHttp2ServerUpgradeHandler(HttpServerCodec httpServerCodec, HttpServerUpgradeHandler httpServerUpgradeHandler, ChannelHandler http2ServerHandler) { - this.httpServerCodec = checkNotNull(httpServerCodec, "httpServerCodec"); - this.httpServerUpgradeHandler = checkNotNull(httpServerUpgradeHandler, "httpServerUpgradeHandler"); - this.http2ServerHandler = checkNotNull(http2ServerHandler, "http2ServerHandler"); + this.httpServerCodec = requireNonNull(httpServerCodec, "httpServerCodec"); + this.httpServerUpgradeHandler = requireNonNull(httpServerUpgradeHandler, "httpServerUpgradeHandler"); + this.http2ServerHandler = requireNonNull(http2ServerHandler, "http2ServerHandler"); } @Override - public void handlerAdded(ChannelHandlerContext ctx) throws Exception { + public void handlerAdded0(ChannelHandlerContext ctx) throws Exception { ctx.pipeline() - .addBefore(ctx.name(), null, new PriorKnowledgeHandler()) - .addBefore(ctx.name(), null, httpServerCodec) - .replace(this, null, httpServerUpgradeHandler); + .addAfter(ctx.name(), null, httpServerUpgradeHandler) + .addAfter(ctx.name(), null, httpServerCodec); } /** * Peek inbound message to determine current connection wants to start HTTP/2 * by HTTP upgrade or prior knowledge */ - private final class PriorKnowledgeHandler extends ByteToMessageDecoder { - @Override - protected void decode(ChannelHandlerContext ctx, ByteBuf in, List out) throws Exception { - int prefaceLength = CONNECTION_PREFACE.readableBytes(); - int bytesRead = Math.min(in.readableBytes(), prefaceLength); + @Override + protected void decode(ChannelHandlerContext ctx, ByteBuf in) throws Exception { + int prefaceLength = CONNECTION_PREFACE.readableBytes(); + int bytesRead = Math.min(in.readableBytes(), prefaceLength); - if (!ByteBufUtil.equals(CONNECTION_PREFACE, CONNECTION_PREFACE.readerIndex(), - in, in.readerIndex(), bytesRead)) { - ctx.pipeline().remove(this); - } else if (bytesRead == prefaceLength) { - // Full h2 preface match, removed source codec, using http2 codec to handle - // following network traffic - ctx.pipeline() - .remove(httpServerCodec) - .remove(httpServerUpgradeHandler); + if (!ByteBufUtil.equals(CONNECTION_PREFACE, CONNECTION_PREFACE.readerIndex(), + in, in.readerIndex(), bytesRead)) { + ctx.pipeline().remove(this); + } else if (bytesRead == prefaceLength) { + // Full h2 preface match, removed source codec, using http2 codec to handle + // following network traffic + ctx.pipeline() + .remove(httpServerCodec) + .remove(httpServerUpgradeHandler); - ctx.pipeline().addAfter(ctx.name(), null, http2ServerHandler); - ctx.pipeline().remove(this); + ctx.pipeline().addAfter(ctx.name(), null, http2ServerHandler); + ctx.fireUserEventTriggered(PriorKnowledgeUpgradeEvent.INSTANCE); - ctx.fireUserEventTriggered(PriorKnowledgeUpgradeEvent.INSTANCE); - } + ctx.pipeline().remove(this); } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/CompressorHttp2ConnectionEncoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/CompressorHttp2ConnectionEncoder.java index 3137da21ed2..4436bd872bb 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/CompressorHttp2ConnectionEncoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/CompressorHttp2ConnectionEncoder.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -16,23 +16,34 @@ import io.netty.buffer.ByteBuf; import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.codec.compression.BrotliEncoder; +import io.netty.handler.codec.compression.BrotliOptions; +import io.netty.handler.codec.compression.CompressionOptions; +import io.netty.handler.codec.compression.DeflateOptions; +import io.netty.handler.codec.compression.GzipOptions; +import io.netty.handler.codec.compression.StandardCompressionOptions; import io.netty.handler.codec.compression.ZlibCodecFactory; import io.netty.handler.codec.compression.ZlibWrapper; +import io.netty.handler.codec.compression.ZstdEncoder; +import io.netty.handler.codec.compression.ZstdOptions; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.Promise; import io.netty.util.concurrent.PromiseCombiner; +import io.netty.util.internal.ObjectUtil; import io.netty.util.internal.UnstableApi; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_ENCODING; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; +import static io.netty.handler.codec.http.HttpHeaderValues.BR; import static io.netty.handler.codec.http.HttpHeaderValues.DEFLATE; import static io.netty.handler.codec.http.HttpHeaderValues.GZIP; import static io.netty.handler.codec.http.HttpHeaderValues.IDENTITY; import static io.netty.handler.codec.http.HttpHeaderValues.X_DEFLATE; import static io.netty.handler.codec.http.HttpHeaderValues.X_GZIP; +import static io.netty.handler.codec.http.HttpHeaderValues.ZSTD; /** * A decorating HTTP2 encoder that will compress data frames according to the {@code content-encoding} header for each @@ -40,34 +51,83 @@ */ @UnstableApi public class CompressorHttp2ConnectionEncoder extends DecoratingHttp2ConnectionEncoder { + // We cannot remove this because it'll be breaking change public static final int DEFAULT_COMPRESSION_LEVEL = 6; public static final int DEFAULT_WINDOW_BITS = 15; public static final int DEFAULT_MEM_LEVEL = 8; - private final int compressionLevel; - private final int windowBits; - private final int memLevel; + private int compressionLevel; + private int windowBits; + private int memLevel; private final Http2Connection.PropertyKey propertyKey; + private final boolean supportsCompressionOptions; + + private BrotliOptions brotliOptions; + private GzipOptions gzipCompressionOptions; + private DeflateOptions deflateOptions; + private ZstdOptions zstdOptions; + + /** + * Create a new {@link CompressorHttp2ConnectionEncoder} instance + * with default implementation of {@link StandardCompressionOptions} + */ public CompressorHttp2ConnectionEncoder(Http2ConnectionEncoder delegate) { - this(delegate, DEFAULT_COMPRESSION_LEVEL, DEFAULT_WINDOW_BITS, DEFAULT_MEM_LEVEL); + this(delegate, StandardCompressionOptions.brotli(), StandardCompressionOptions.gzip(), + StandardCompressionOptions.deflate()); } + /** + * Create a new {@link CompressorHttp2ConnectionEncoder} instance + */ + @Deprecated public CompressorHttp2ConnectionEncoder(Http2ConnectionEncoder delegate, int compressionLevel, int windowBits, int memLevel) { super(delegate); - if (compressionLevel < 0 || compressionLevel > 9) { - throw new IllegalArgumentException("compressionLevel: " + compressionLevel + " (expected: 0-9)"); - } - if (windowBits < 9 || windowBits > 15) { - throw new IllegalArgumentException("windowBits: " + windowBits + " (expected: 9-15)"); - } - if (memLevel < 1 || memLevel > 9) { - throw new IllegalArgumentException("memLevel: " + memLevel + " (expected: 1-9)"); + this.compressionLevel = ObjectUtil.checkInRange(compressionLevel, 0, 9, "compressionLevel"); + this.windowBits = ObjectUtil.checkInRange(windowBits, 9, 15, "windowBits"); + this.memLevel = ObjectUtil.checkInRange(memLevel, 1, 9, "memLevel"); + + propertyKey = connection().newKey(); + connection().addListener(new Http2ConnectionAdapter() { + @Override + public void onStreamRemoved(Http2Stream stream) { + final EmbeddedChannel compressor = stream.getProperty(propertyKey); + if (compressor != null) { + cleanup(stream, compressor); + } + } + }); + + supportsCompressionOptions = false; + } + + /** + * Create a new {@link CompressorHttp2ConnectionEncoder} with + * specified {@link StandardCompressionOptions} + */ + public CompressorHttp2ConnectionEncoder(Http2ConnectionEncoder delegate, + CompressionOptions... compressionOptionsArgs) { + super(delegate); + ObjectUtil.checkNotNull(compressionOptionsArgs, "CompressionOptions"); + ObjectUtil.deepCheckNotNull("CompressionOptions", compressionOptionsArgs); + + for (CompressionOptions compressionOptions : compressionOptionsArgs) { + if (compressionOptions instanceof BrotliOptions) { + brotliOptions = (BrotliOptions) compressionOptions; + } else if (compressionOptions instanceof GzipOptions) { + gzipCompressionOptions = (GzipOptions) compressionOptions; + } else if (compressionOptions instanceof DeflateOptions) { + deflateOptions = (DeflateOptions) compressionOptions; + } else if (compressionOptions instanceof ZstdOptions) { + zstdOptions = (ZstdOptions) compressionOptions; + } else { + throw new IllegalArgumentException("Unsupported " + CompressionOptions.class.getSimpleName() + + ": " + compressionOptions); + } } - this.compressionLevel = compressionLevel; - this.windowBits = windowBits; - this.memLevel = memLevel; + + supportsCompressionOptions = true; propertyKey = connection().newKey(); connection().addListener(new Http2ConnectionAdapter() { @@ -82,13 +142,13 @@ public void onStreamRemoved(Http2Stream stream) { } @Override - public ChannelFuture writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding, - final boolean endOfStream, ChannelPromise promise) { + public Future writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding, + final boolean endOfStream) { final Http2Stream stream = connection().stream(streamId); final EmbeddedChannel channel = stream == null ? null : (EmbeddedChannel) stream.getProperty(propertyKey); if (channel == null) { // The compressor may be null if no compatible encoding type was found in this stream's headers - return super.writeData(ctx, streamId, data, padding, endOfStream, promise); + return super.writeData(ctx, streamId, data, padding, endOfStream); } try { @@ -101,14 +161,14 @@ public ChannelFuture writeData(final ChannelHandlerContext ctx, final int stream buf = nextReadableBuf(channel); } return super.writeData(ctx, streamId, buf == null ? Unpooled.EMPTY_BUFFER : buf, padding, - true, promise); + true); } // END_STREAM is not set and the assumption is data is still forthcoming. - promise.setSuccess(); - return promise; + return ctx.newSucceededFuture(); } - PromiseCombiner combiner = new PromiseCombiner(); + Promise promise = ctx.newPromise(); + PromiseCombiner combiner = new PromiseCombiner(ctx.executor()); for (;;) { ByteBuf nextBuf = nextReadableBuf(channel); boolean compressedEndOfStream = nextBuf == null && endOfStream; @@ -117,9 +177,8 @@ public ChannelFuture writeData(final ChannelHandlerContext ctx, final int stream compressedEndOfStream = nextBuf == null; } - ChannelPromise bufPromise = ctx.newPromise(); - combiner.add(bufPromise); - super.writeData(ctx, streamId, buf, padding, compressedEndOfStream, bufPromise); + Future future = super.writeData(ctx, streamId, buf, padding, compressedEndOfStream); + combiner.add(future); if (nextBuf == null) { break; } @@ -128,56 +187,54 @@ public ChannelFuture writeData(final ChannelHandlerContext ctx, final int stream buf = nextBuf; } combiner.finish(promise); + return promise; } catch (Throwable cause) { - promise.tryFailure(cause); + return ctx.newFailedFuture(cause); } finally { if (endOfStream) { cleanup(stream, channel); } } - return promise; } @Override - public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, - boolean endStream, ChannelPromise promise) { + public Future writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, + boolean endStream) { try { // Determine if compression is required and sanitize the headers. EmbeddedChannel compressor = newCompressor(ctx, headers, endStream); // Write the headers and create the stream object. - ChannelFuture future = super.writeHeaders(ctx, streamId, headers, padding, endStream, promise); + Future future = super.writeHeaders(ctx, streamId, headers, padding, endStream); // After the stream object has been created, then attach the compressor as a property for data compression. bindCompressorToStream(compressor, streamId); return future; } catch (Throwable e) { - promise.tryFailure(e); + return ctx.newFailedFuture(e); } - return promise; } @Override - public ChannelFuture writeHeaders(final ChannelHandlerContext ctx, final int streamId, final Http2Headers headers, + public Future writeHeaders(final ChannelHandlerContext ctx, final int streamId, final Http2Headers headers, final int streamDependency, final short weight, final boolean exclusive, final int padding, - final boolean endOfStream, final ChannelPromise promise) { + final boolean endOfStream) { try { // Determine if compression is required and sanitize the headers. EmbeddedChannel compressor = newCompressor(ctx, headers, endOfStream); // Write the headers and create the stream object. - ChannelFuture future = super.writeHeaders(ctx, streamId, headers, streamDependency, weight, exclusive, - padding, endOfStream, promise); + Future future = super.writeHeaders(ctx, streamId, headers, streamDependency, weight, exclusive, + padding, endOfStream); // After the stream object has been created, then attach the compressor as a property for data compression. bindCompressorToStream(compressor, streamId); return future; } catch (Throwable e) { - promise.tryFailure(e); + return ctx.newFailedFuture(e); } - return promise; } /** @@ -198,6 +255,15 @@ protected EmbeddedChannel newContentCompressor(ChannelHandlerContext ctx, CharSe if (DEFLATE.contentEqualsIgnoreCase(contentEncoding) || X_DEFLATE.contentEqualsIgnoreCase(contentEncoding)) { return newCompressionChannel(ctx, ZlibWrapper.ZLIB); } + if (brotliOptions != null && BR.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), new BrotliEncoder(brotliOptions.parameters())); + } + if (zstdOptions != null && ZSTD.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), new ZstdEncoder(zstdOptions.compressionLevel(), + zstdOptions.blockSize(), zstdOptions.maxEncodeSize())); + } // 'identity' or unsupported return null; } @@ -220,9 +286,25 @@ protected CharSequence getTargetContentEncoding(CharSequence contentEncoding) th * @param wrapper Defines what type of encoder should be used */ private EmbeddedChannel newCompressionChannel(final ChannelHandlerContext ctx, ZlibWrapper wrapper) { - return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), - ctx.channel().config(), ZlibCodecFactory.newZlibEncoder(wrapper, compressionLevel, windowBits, - memLevel)); + if (supportsCompressionOptions) { + if (wrapper == ZlibWrapper.GZIP && gzipCompressionOptions != null) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibEncoder(wrapper, + gzipCompressionOptions.compressionLevel(), gzipCompressionOptions.windowBits(), + gzipCompressionOptions.memLevel())); + } else if (wrapper == ZlibWrapper.ZLIB && deflateOptions != null) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibEncoder(wrapper, + deflateOptions.compressionLevel(), deflateOptions.windowBits(), + deflateOptions.memLevel())); + } else { + throw new IllegalArgumentException("Unsupported ZlibWrapper: " + wrapper); + } + } else { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), ZlibCodecFactory.newZlibEncoder(wrapper, compressionLevel, windowBits, + memLevel)); + } } /** @@ -284,16 +366,7 @@ private void bindCompressorToStream(EmbeddedChannel compressor, int streamId) { * @param compressor The compressor for {@code stream} */ void cleanup(Http2Stream stream, EmbeddedChannel compressor) { - if (compressor.finish()) { - for (;;) { - final ByteBuf buf = compressor.readOutbound(); - if (buf == null) { - break; - } - - buf.release(); - } - } + compressor.finishAndReleaseAll(); stream.removeProperty(propertyKey); } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2ConnectionDecoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2ConnectionDecoder.java index 4a1ae29d575..e84b27f3185 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2ConnectionDecoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2ConnectionDecoder.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -14,14 +14,12 @@ */ package io.netty.handler.codec.http2; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; import io.netty.util.internal.UnstableApi; -import java.util.List; - /** * Decorator around another {@link Http2ConnectionDecoder} instance. */ @@ -30,7 +28,7 @@ public class DecoratingHttp2ConnectionDecoder implements Http2ConnectionDecoder private final Http2ConnectionDecoder delegate; public DecoratingHttp2ConnectionDecoder(Http2ConnectionDecoder delegate) { - this.delegate = checkNotNull(delegate, "delegate"); + this.delegate = requireNonNull(delegate, "delegate"); } @Override @@ -59,8 +57,8 @@ public Http2FrameListener frameListener() { } @Override - public void decodeFrame(ChannelHandlerContext ctx, ByteBuf in, List out) throws Http2Exception { - delegate.decodeFrame(ctx, in, out); + public void decodeFrame(ChannelHandlerContext ctx, ByteBuf in) throws Http2Exception { + delegate.decodeFrame(ctx, in); } @Override diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2ConnectionEncoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2ConnectionEncoder.java index 9d591ab9e68..43e2791f44e 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2ConnectionEncoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2ConnectionEncoder.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -16,18 +16,19 @@ import io.netty.util.internal.UnstableApi; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * A decorator around another {@link Http2ConnectionEncoder} instance. */ @UnstableApi -public class DecoratingHttp2ConnectionEncoder extends DecoratingHttp2FrameWriter implements Http2ConnectionEncoder { +public class DecoratingHttp2ConnectionEncoder extends DecoratingHttp2FrameWriter implements Http2ConnectionEncoder, + Http2SettingsReceivedConsumer { private final Http2ConnectionEncoder delegate; public DecoratingHttp2ConnectionEncoder(Http2ConnectionEncoder delegate) { super(delegate); - this.delegate = checkNotNull(delegate, "delegate"); + this.delegate = requireNonNull(delegate, "delegate"); } @Override @@ -59,4 +60,14 @@ public Http2Settings pollSentSettings() { public void remoteSettings(Http2Settings settings) throws Http2Exception { delegate.remoteSettings(settings); } + + @Override + public void consumeReceivedSettings(Http2Settings settings) { + if (delegate instanceof Http2SettingsReceivedConsumer) { + ((Http2SettingsReceivedConsumer) delegate).consumeReceivedSettings(settings); + } else { + throw new IllegalStateException("delegate " + delegate + " is not an instance of " + + Http2SettingsReceivedConsumer.class); + } + } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2FrameWriter.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2FrameWriter.java index f7caa48ddad..b8b0d3e9387 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2FrameWriter.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DecoratingHttp2FrameWriter.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -15,12 +15,11 @@ package io.netty.handler.codec.http2; import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; +import io.netty.util.concurrent.Future; import io.netty.util.internal.UnstableApi; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * Decorator around another {@link Http2FrameWriter} instance. @@ -30,78 +29,75 @@ public class DecoratingHttp2FrameWriter implements Http2FrameWriter { private final Http2FrameWriter delegate; public DecoratingHttp2FrameWriter(Http2FrameWriter delegate) { - this.delegate = checkNotNull(delegate, "delegate"); + this.delegate = requireNonNull(delegate, "delegate"); } @Override - public ChannelFuture writeData(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, - boolean endStream, ChannelPromise promise) { - return delegate.writeData(ctx, streamId, data, padding, endStream, promise); + public Future writeData(ChannelHandlerContext ctx, int streamId, ByteBuf data, int padding, + boolean endStream) { + return delegate.writeData(ctx, streamId, data, padding, endStream); } @Override - public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, - boolean endStream, ChannelPromise promise) { - return delegate.writeHeaders(ctx, streamId, headers, padding, endStream, promise); + public Future writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, + boolean endStream) { + return delegate.writeHeaders(ctx, streamId, headers, padding, endStream); } @Override - public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, - int streamDependency, short weight, boolean exclusive, int padding, - boolean endStream, ChannelPromise promise) { + public Future writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, + int streamDependency, short weight, boolean exclusive, int padding, + boolean endStream) { return delegate - .writeHeaders(ctx, streamId, headers, streamDependency, weight, exclusive, padding, endStream, promise); + .writeHeaders(ctx, streamId, headers, streamDependency, weight, exclusive, padding, endStream); } @Override - public ChannelFuture writePriority(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, - boolean exclusive, ChannelPromise promise) { - return delegate.writePriority(ctx, streamId, streamDependency, weight, exclusive, promise); + public Future writePriority(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, + boolean exclusive) { + return delegate.writePriority(ctx, streamId, streamDependency, weight, exclusive); } @Override - public ChannelFuture writeRstStream(ChannelHandlerContext ctx, int streamId, long errorCode, - ChannelPromise promise) { - return delegate.writeRstStream(ctx, streamId, errorCode, promise); + public Future writeRstStream(ChannelHandlerContext ctx, int streamId, long errorCode) { + return delegate.writeRstStream(ctx, streamId, errorCode); } @Override - public ChannelFuture writeSettings(ChannelHandlerContext ctx, Http2Settings settings, ChannelPromise promise) { - return delegate.writeSettings(ctx, settings, promise); + public Future writeSettings(ChannelHandlerContext ctx, Http2Settings settings) { + return delegate.writeSettings(ctx, settings); } @Override - public ChannelFuture writeSettingsAck(ChannelHandlerContext ctx, ChannelPromise promise) { - return delegate.writeSettingsAck(ctx, promise); + public Future writeSettingsAck(ChannelHandlerContext ctx) { + return delegate.writeSettingsAck(ctx); } @Override - public ChannelFuture writePing(ChannelHandlerContext ctx, boolean ack, long data, ChannelPromise promise) { - return delegate.writePing(ctx, ack, data, promise); + public Future writePing(ChannelHandlerContext ctx, boolean ack, long data) { + return delegate.writePing(ctx, ack, data); } @Override - public ChannelFuture writePushPromise(ChannelHandlerContext ctx, int streamId, int promisedStreamId, - Http2Headers headers, int padding, ChannelPromise promise) { - return delegate.writePushPromise(ctx, streamId, promisedStreamId, headers, padding, promise); + public Future writePushPromise(ChannelHandlerContext ctx, int streamId, int promisedStreamId, + Http2Headers headers, int padding) { + return delegate.writePushPromise(ctx, streamId, promisedStreamId, headers, padding); } @Override - public ChannelFuture writeGoAway(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData, - ChannelPromise promise) { - return delegate.writeGoAway(ctx, lastStreamId, errorCode, debugData, promise); + public Future writeGoAway(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData) { + return delegate.writeGoAway(ctx, lastStreamId, errorCode, debugData); } @Override - public ChannelFuture writeWindowUpdate(ChannelHandlerContext ctx, int streamId, int windowSizeIncrement, - ChannelPromise promise) { - return delegate.writeWindowUpdate(ctx, streamId, windowSizeIncrement, promise); + public Future writeWindowUpdate(ChannelHandlerContext ctx, int streamId, int windowSizeIncrement) { + return delegate.writeWindowUpdate(ctx, streamId, windowSizeIncrement); } @Override - public ChannelFuture writeFrame(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags, - ByteBuf payload, ChannelPromise promise) { - return delegate.writeFrame(ctx, frameType, streamId, flags, payload, promise); + public Future writeFrame(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags, + ByteBuf payload) { + return delegate.writeFrame(ctx, frameType, streamId, flags, payload); } @Override diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2Connection.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2Connection.java index 12815c225c8..82d03cc93cf 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2Connection.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2Connection.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -16,16 +16,13 @@ package io.netty.handler.codec.http2; import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelPromise; import io.netty.handler.codec.http2.Http2Stream.State; import io.netty.util.collection.IntObjectHashMap; import io.netty.util.collection.IntObjectMap; import io.netty.util.collection.IntObjectMap.PrimitiveEntry; -import io.netty.util.concurrent.Future; import io.netty.util.concurrent.Promise; import io.netty.util.concurrent.UnaryPromiseNotifier; import io.netty.util.internal.EmptyArrays; -import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.UnstableApi; import io.netty.util.internal.logging.InternalLogger; import io.netty.util.internal.logging.InternalLoggerFactory; @@ -54,9 +51,9 @@ import static io.netty.handler.codec.http2.Http2Stream.State.OPEN; import static io.netty.handler.codec.http2.Http2Stream.State.RESERVED_LOCAL; import static io.netty.handler.codec.http2.Http2Stream.State.RESERVED_REMOTE; -import static io.netty.util.internal.ObjectUtil.checkNotNull; import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; import static java.lang.Integer.MAX_VALUE; +import static java.util.Objects.requireNonNull; /** * Simple implementation of {@link Http2Connection}. @@ -79,7 +76,7 @@ public class DefaultHttp2Connection implements Http2Connection { * (local/remote flow controller and {@link StreamByteDistributor}) and we leave room for 1 extra. * We could be more aggressive but the ArrayList resize will double the size if we are too small. */ - final List listeners = new ArrayList(4); + final List listeners = new ArrayList<>(4); final ActiveStreams activeStreams; Promise closePromise; @@ -103,8 +100,8 @@ public DefaultHttp2Connection(boolean server, int maxReservedStreams) { // in response to any locally enforced limits being exceeded [2]. // [1] https://tools.ietf.org/html/rfc7540#section-5.1.2 // [2] https://tools.ietf.org/html/rfc7540#section-8.2.2 - localEndpoint = new DefaultEndpoint(server, server ? MAX_VALUE : maxReservedStreams); - remoteEndpoint = new DefaultEndpoint(!server, maxReservedStreams); + localEndpoint = new DefaultEndpoint<>(server, server ? MAX_VALUE : maxReservedStreams); + remoteEndpoint = new DefaultEndpoint<>(!server, maxReservedStreams); // Add the connection stream to the map. streamMap.put(connectionStream.id(), connectionStream); @@ -118,24 +115,22 @@ final boolean isClosed() { } @Override - public Future close(final Promise promise) { - checkNotNull(promise, "promise"); + public void close(final Promise promise) { + requireNonNull(promise, "promise"); // Since we allow this method to be called multiple times, we must make sure that all the promises are notified // when all streams are removed and the close operation completes. if (closePromise != null) { if (closePromise == promise) { // Do nothing - } else if ((promise instanceof ChannelPromise) && ((ChannelPromise) closePromise).isVoid()) { - closePromise = promise; } else { - closePromise.addListener(new UnaryPromiseNotifier(promise)); + closePromise.addListener(new UnaryPromiseNotifier<>(promise)); } } else { closePromise = promise; } if (isStreamMapEmpty()) { promise.trySuccess(null); - return promise; + return; } Iterator> itr = streamMap.entries().iterator(); @@ -166,7 +161,6 @@ public Future close(final Promise promise) { } } } - return closePromise; } @Override @@ -225,7 +219,12 @@ public boolean goAwayReceived() { } @Override - public void goAwayReceived(final int lastKnownStream, long errorCode, ByteBuf debugData) { + public void goAwayReceived(final int lastKnownStream, long errorCode, ByteBuf debugData) throws Http2Exception { + if (localEndpoint.lastStreamKnownByPeer() >= 0 && localEndpoint.lastStreamKnownByPeer() < lastKnownStream) { + throw connectionError(PROTOCOL_ERROR, "lastStreamId MUST NOT increase. Current value: %d new value: %d", + localEndpoint.lastStreamKnownByPeer(), lastKnownStream); + } + localEndpoint.lastStreamKnownByPeer(lastKnownStream); for (int i = 0; i < listeners.size(); ++i) { try { @@ -235,19 +234,7 @@ public void goAwayReceived(final int lastKnownStream, long errorCode, ByteBuf de } } - try { - forEachActiveStream(new Http2StreamVisitor() { - @Override - public boolean visit(Http2Stream stream) { - if (stream.id() > lastKnownStream && localEndpoint.isValidStreamId(stream.id())) { - stream.close(); - } - return true; - } - }); - } catch (Http2Exception e) { - PlatformDependent.throwException(e); - } + closeStreamsGreaterThanLastKnownStreamId(lastKnownStream, localEndpoint); } @Override @@ -256,7 +243,20 @@ public boolean goAwaySent() { } @Override - public void goAwaySent(final int lastKnownStream, long errorCode, ByteBuf debugData) { + public boolean goAwaySent(final int lastKnownStream, long errorCode, ByteBuf debugData) throws Http2Exception { + if (remoteEndpoint.lastStreamKnownByPeer() >= 0) { + // Protect against re-entrancy. Could happen if writing the frame fails, and error handling + // treating this is a connection handler and doing a graceful shutdown... + if (lastKnownStream == remoteEndpoint.lastStreamKnownByPeer()) { + return false; + } + if (lastKnownStream > remoteEndpoint.lastStreamKnownByPeer()) { + throw connectionError(PROTOCOL_ERROR, "Last stream identifier must not increase between " + + "sending multiple GOAWAY frames (was '%d', is '%d').", + remoteEndpoint.lastStreamKnownByPeer(), lastKnownStream); + } + } + remoteEndpoint.lastStreamKnownByPeer(lastKnownStream); for (int i = 0; i < listeners.size(); ++i) { try { @@ -266,19 +266,18 @@ public void goAwaySent(final int lastKnownStream, long errorCode, ByteBuf debugD } } - try { - forEachActiveStream(new Http2StreamVisitor() { - @Override - public boolean visit(Http2Stream stream) { - if (stream.id() > lastKnownStream && remoteEndpoint.isValidStreamId(stream.id())) { - stream.close(); - } - return true; - } - }); - } catch (Http2Exception e) { - PlatformDependent.throwException(e); - } + closeStreamsGreaterThanLastKnownStreamId(lastKnownStream, remoteEndpoint); + return true; + } + + private void closeStreamsGreaterThanLastKnownStreamId(final int lastKnownStream, + final DefaultEndpoint endpoint) throws Http2Exception { + forEachActiveStream(stream -> { + if (stream.id() > lastKnownStream && endpoint.isValidStreamId(stream.id())) { + stream.close(); + } + return true; + }); } /** @@ -366,7 +365,7 @@ public PropertyKey newKey() { * @throws IllegalArgumentException if the key was not created by this connection. */ final DefaultPropertyKey verifyKey(PropertyKey key) { - return checkNotNull((DefaultPropertyKey) key, "key").verifyConnection(this); + return requireNonNull((DefaultPropertyKey) key, "key").verifyConnection(this); } /** @@ -478,11 +477,19 @@ public Http2Stream open(boolean halfClosed) throws Http2Exception { if (!createdBy().canOpenStream()) { throw connectionError(PROTOCOL_ERROR, "Maximum active streams violated for this endpoint."); } + activate(); return this; } void activate() { + // If the stream is opened in a half-closed state, the headers must have either + // been sent if this is a local stream, or received if it is a remote stream. + if (state == HALF_CLOSED_LOCAL) { + headersSent(/*isInformational*/ false); + } else if (state == HALF_CLOSED_REMOTE) { + headersReceived(/*isInformational*/ false); + } activeStreams.activate(this); } @@ -666,7 +673,7 @@ private final class DefaultEndpoint implements En */ private int nextReservationStreamId; private int lastStreamKnownByPeer = -1; - private boolean pushToAllowed = true; + private boolean pushToAllowed; private F flowController; private int maxStreams; private int maxActiveStreams; @@ -831,7 +838,7 @@ public int lastStreamKnownByPeer() { } private void lastStreamKnownByPeer(int lastKnownStream) { - this.lastStreamKnownByPeer = lastKnownStream; + lastStreamKnownByPeer = lastKnownStream; } @Override @@ -841,7 +848,7 @@ public F flowController() { @Override public void flowController(F flowController) { - this.flowController = checkNotNull(flowController, "flowController"); + this.flowController = requireNonNull(flowController, "flowController"); } @Override @@ -855,10 +862,10 @@ private void updateMaxStreams() { private void checkNewStreamAllowed(int streamId, State state) throws Http2Exception { assert state != IDLE; - if (goAwayReceived() && streamId > localEndpoint.lastStreamKnownByPeer()) { - throw connectionError(PROTOCOL_ERROR, "Cannot create stream %d since this endpoint has received a " + - "GOAWAY frame with last stream id %d.", streamId, - localEndpoint.lastStreamKnownByPeer()); + if (lastStreamKnownByPeer >= 0 && streamId > lastStreamKnownByPeer) { + throw streamError(streamId, REFUSED_STREAM, + "Cannot create stream %d greater than Last-Stream-ID %d from GOAWAY.", + streamId, lastStreamKnownByPeer); } if (!isValidStreamId(streamId)) { if (streamId < 0) { @@ -874,7 +881,10 @@ private void checkNewStreamAllowed(int streamId, State state) throws Http2Except streamId, nextStreamIdToCreate); } if (nextStreamIdToCreate <= 0) { - throw connectionError(REFUSED_STREAM, "Stream IDs are exhausted for this endpoint."); + // We exhausted the stream id space that we can use. Let's signal this back but also signal that + // we still may want to process active streams. + throw new Http2Exception(REFUSED_STREAM, "Stream IDs are exhausted for this endpoint.", + Http2Exception.ShutdownHint.GRACEFUL_SHUTDOWN); } boolean isReserved = state == RESERVED_LOCAL || state == RESERVED_REMOTE; if (!isReserved && !canOpenStream() || isReserved && numStreams >= maxStreams) { @@ -911,11 +921,11 @@ interface Event { */ private final class ActiveStreams { private final List listeners; - private final Queue pendingEvents = new ArrayDeque(4); - private final Set streams = new LinkedHashSet(); + private final Queue pendingEvents = new ArrayDeque<>(4); + private final Set streams = new LinkedHashSet<>(); private int pendingIterations; - public ActiveStreams(List listeners) { + ActiveStreams(List listeners) { this.listeners = listeners; } @@ -927,12 +937,7 @@ public void activate(final DefaultStream stream) { if (allowModifications()) { addToActiveStreams(stream); } else { - pendingEvents.add(new Event() { - @Override - public void process() { - addToActiveStreams(stream); - } - }); + pendingEvents.add(() -> addToActiveStreams(stream)); } } @@ -940,12 +945,7 @@ public void deactivate(final DefaultStream stream, final Iterator itr) { if (allowModifications() || itr != null) { removeFromActiveStreams(stream, itr); } else { - pendingEvents.add(new Event() { - @Override - public void process() { - removeFromActiveStreams(stream, itr); - } - }); + pendingEvents.add(() -> removeFromActiveStreams(stream, itr)); } } @@ -1040,7 +1040,7 @@ private final class PropertyKeyRegistry { * (local/remote flow controller and {@link StreamByteDistributor}) and we leave room for 1 extra. * We could be more aggressive but the ArrayList resize will double the size if we are too small. */ - final List keys = new ArrayList(4); + final List keys = new ArrayList<>(4); /** * Registers a new property key. diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionDecoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionDecoder.java index 49335e95e73..34d3faeea8a 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionDecoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionDecoder.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -16,7 +16,9 @@ import io.netty.buffer.ByteBuf; import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.http.HttpHeaderNames; import io.netty.handler.codec.http.HttpStatusClass; +import io.netty.handler.codec.http.HttpUtil; import io.netty.handler.codec.http2.Http2Connection.Endpoint; import io.netty.util.internal.UnstableApi; import io.netty.util.internal.logging.InternalLogger; @@ -34,9 +36,9 @@ import static io.netty.handler.codec.http2.Http2PromisedRequestVerifier.ALWAYS_VERIFY; import static io.netty.handler.codec.http2.Http2Stream.State.CLOSED; import static io.netty.handler.codec.http2.Http2Stream.State.HALF_CLOSED_REMOTE; -import static io.netty.util.internal.ObjectUtil.checkNotNull; import static java.lang.Integer.MAX_VALUE; import static java.lang.Math.min; +import static java.util.Objects.requireNonNull; /** * Provides the default implementation for processing inbound frame events and delegates to a @@ -57,6 +59,9 @@ public class DefaultHttp2ConnectionDecoder implements Http2ConnectionDecoder { private final Http2FrameReader frameReader; private Http2FrameListener listener; private final Http2PromisedRequestVerifier requestVerifier; + private final Http2SettingsReceivedConsumer settingsReceivedConsumer; + private final boolean autoAckPing; + private final Http2Connection.PropertyKey contentLengthKey; public DefaultHttp2ConnectionDecoder(Http2Connection connection, Http2ConnectionEncoder encoder, @@ -68,10 +73,65 @@ public DefaultHttp2ConnectionDecoder(Http2Connection connection, Http2ConnectionEncoder encoder, Http2FrameReader frameReader, Http2PromisedRequestVerifier requestVerifier) { - this.connection = checkNotNull(connection, "connection"); - this.frameReader = checkNotNull(frameReader, "frameReader"); - this.encoder = checkNotNull(encoder, "encoder"); - this.requestVerifier = checkNotNull(requestVerifier, "requestVerifier"); + this(connection, encoder, frameReader, requestVerifier, true); + } + + /** + * Create a new instance. + * @param connection The {@link Http2Connection} associated with this decoder. + * @param encoder The {@link Http2ConnectionEncoder} associated with this decoder. + * @param frameReader Responsible for reading/parsing the raw frames. As opposed to this object which applies + * h2 semantics on top of the frames. + * @param requestVerifier Determines if push promised streams are valid. + * @param autoAckSettings {@code false} to disable automatically applying and sending settings acknowledge frame. + * The {@code Http2ConnectionEncoder} is expected to be an instance of {@link Http2SettingsReceivedConsumer} and + * will apply the earliest received but not yet ACKed SETTINGS when writing the SETTINGS ACKs. + * {@code true} to enable automatically applying and sending settings acknowledge frame. + */ + public DefaultHttp2ConnectionDecoder(Http2Connection connection, + Http2ConnectionEncoder encoder, + Http2FrameReader frameReader, + Http2PromisedRequestVerifier requestVerifier, + boolean autoAckSettings) { + this(connection, encoder, frameReader, requestVerifier, autoAckSettings, true); + } + + /** + * Create a new instance. + * @param connection The {@link Http2Connection} associated with this decoder. + * @param encoder The {@link Http2ConnectionEncoder} associated with this decoder. + * @param frameReader Responsible for reading/parsing the raw frames. As opposed to this object which applies + * h2 semantics on top of the frames. + * @param requestVerifier Determines if push promised streams are valid. + * @param autoAckSettings {@code false} to disable automatically applying and sending settings acknowledge frame. + * The {@code Http2ConnectionEncoder} is expected to be an instance of + * {@link Http2SettingsReceivedConsumer} and will apply the earliest received but not yet + * ACKed SETTINGS when writing the SETTINGS ACKs. {@code true} to enable automatically + * applying and sending settings acknowledge frame. + * @param autoAckPing {@code false} to disable automatically sending ping acknowledge frame. {@code true} to enable + * automatically sending ping ack frame. + */ + public DefaultHttp2ConnectionDecoder(Http2Connection connection, + Http2ConnectionEncoder encoder, + Http2FrameReader frameReader, + Http2PromisedRequestVerifier requestVerifier, + boolean autoAckSettings, + boolean autoAckPing) { + this.autoAckPing = autoAckPing; + if (autoAckSettings) { + settingsReceivedConsumer = null; + } else { + if (!(encoder instanceof Http2SettingsReceivedConsumer)) { + throw new IllegalArgumentException("disabling autoAckSettings requires the encoder to be a " + + Http2SettingsReceivedConsumer.class); + } + settingsReceivedConsumer = (Http2SettingsReceivedConsumer) encoder; + } + this.connection = requireNonNull(connection, "connection"); + contentLengthKey = this.connection.newKey(); + this.frameReader = requireNonNull(frameReader, "frameReader"); + this.encoder = requireNonNull(encoder, "encoder"); + this.requestVerifier = requireNonNull(requestVerifier, "requestVerifier"); if (connection.local().flowController() == null) { connection.local().flowController(new DefaultHttp2LocalFlowController(connection)); } @@ -80,7 +140,7 @@ public DefaultHttp2ConnectionDecoder(Http2Connection connection, @Override public void lifecycleManager(Http2LifecycleManager lifecycleManager) { - this.lifecycleManager = checkNotNull(lifecycleManager, "lifecycleManager"); + this.lifecycleManager = requireNonNull(lifecycleManager, "lifecycleManager"); } @Override @@ -95,7 +155,7 @@ public final Http2LocalFlowController flowController() { @Override public void frameListener(Http2FrameListener listener) { - this.listener = checkNotNull(listener, "listener"); + this.listener = requireNonNull(listener, "listener"); } @Override @@ -103,18 +163,13 @@ public Http2FrameListener frameListener() { return listener; } - // Visible for testing - Http2FrameListener internalFrameListener() { - return internalFrameListener; - } - @Override public boolean prefaceReceived() { return FrameReadListener.class == internalFrameListener.getClass(); } @Override - public void decodeFrame(ChannelHandlerContext ctx, ByteBuf in, List out) throws Http2Exception { + public void decodeFrame(ChannelHandlerContext ctx, ByteBuf in) throws Http2Exception { frameReader.readFrame(ctx, in, internalFrameListener); } @@ -158,10 +213,6 @@ private int unconsumedBytes(Http2Stream stream) { void onGoAwayRead0(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData) throws Http2Exception { - if (connection.goAwayReceived() && connection.local().lastStreamKnownByPeer() < lastStreamId) { - throw connectionError(PROTOCOL_ERROR, "lastStreamId MUST NOT increase. Current value: %d new value: %d", - connection.local().lastStreamKnownByPeer(), lastStreamId); - } listener.onGoAwayRead(ctx, lastStreamId, errorCode, debugData); connection.goAwayReceived(lastStreamId, errorCode, debugData); } @@ -171,6 +222,20 @@ void onUnknownFrame0(ChannelHandlerContext ctx, byte frameType, int streamId, Ht listener.onUnknownFrame(ctx, frameType, streamId, flags, payload); } + // See https://tools.ietf.org/html/rfc7540#section-8.1.2.6 + private void verifyContentLength(Http2Stream stream, int data, boolean isEnd) throws Http2Exception { + ContentLength contentLength = stream.getProperty(contentLengthKey); + if (contentLength != null) { + try { + contentLength.increaseReceivedBytes(connection.isServer(), stream.id(), data, isEnd); + } finally { + if (isEnd) { + stream.removeProperty(contentLengthKey); + } + } + } + } + /** * Handles all inbound frames from the network. */ @@ -180,7 +245,8 @@ public int onDataRead(final ChannelHandlerContext ctx, int streamId, ByteBuf dat boolean endOfStream) throws Http2Exception { Http2Stream stream = connection.stream(streamId); Http2LocalFlowController flowController = flowController(); - int bytesToReturn = data.readableBytes() + padding; + int readable = data.readableBytes(); + int bytesToReturn = readable + padding; final boolean shouldIgnore; try { @@ -207,7 +273,6 @@ public int onDataRead(final ChannelHandlerContext ctx, int streamId, ByteBuf dat // All bytes have been consumed. return bytesToReturn; } - Http2Exception error = null; switch (stream.state()) { case OPEN: @@ -235,18 +300,18 @@ public int onDataRead(final ChannelHandlerContext ctx, int streamId, ByteBuf dat throw error; } + verifyContentLength(stream, readable, endOfStream); + // Call back the application and retrieve the number of bytes that have been // immediately processed. bytesToReturn = listener.onDataRead(ctx, streamId, data, padding, endOfStream); + + if (endOfStream) { + lifecycleManager.closeStreamRemote(stream, ctx.newSucceededFuture()); + } + return bytesToReturn; - } catch (Http2Exception e) { - // If an exception happened during delivery, the listener may have returned part - // of the bytes before the error occurred. If that's the case, subtract that from - // the total processed bytes so that we don't return too many bytes. - int delta = unconsumedBytes - unconsumedBytes(stream); - bytesToReturn -= delta; - throw e; - } catch (RuntimeException e) { + } catch (Http2Exception | RuntimeException e) { // If an exception happened during delivery, the listener may have returned part // of the bytes before the error occurred. If that's the case, subtract that from // the total processed bytes so that we don't return too many bytes. @@ -256,10 +321,6 @@ public int onDataRead(final ChannelHandlerContext ctx, int streamId, ByteBuf dat } finally { // If appropriate, return the processed bytes to the flow controller. flowController.consumeBytes(stream, bytesToReturn); - - if (endOfStream) { - lifecycleManager.closeStreamRemote(stream, ctx.newSucceededFuture()); - } } } @@ -274,10 +335,13 @@ public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers short weight, boolean exclusive, int padding, boolean endOfStream) throws Http2Exception { Http2Stream stream = connection.stream(streamId); boolean allowHalfClosedRemote = false; + boolean isTrailers = false; if (stream == null && !connection.streamMayHaveExisted(streamId)) { stream = connection.remote().createStream(streamId, endOfStream); // Allow the state to be HALF_CLOSE_REMOTE if we're creating it in that state. allowHalfClosedRemote = stream.state() == HALF_CLOSED_REMOTE; + } else if (stream != null) { + isTrailers = stream.isHeadersReceived(); } if (shouldIgnoreHeadersOrDataFrame(ctx, streamId, stream, "HEADERS")) { @@ -315,11 +379,28 @@ public void onHeadersRead(ChannelHandlerContext ctx, int streamId, Http2Headers stream.state()); } + if (!isTrailers) { + // extract the content-length header + List contentLength = headers.getAll(HttpHeaderNames.CONTENT_LENGTH); + if (contentLength != null && !contentLength.isEmpty()) { + try { + long cLength = HttpUtil.normalizeAndGetContentLength(contentLength, false, true); + if (cLength != -1) { + headers.setLong(HttpHeaderNames.CONTENT_LENGTH, cLength); + stream.setProperty(contentLengthKey, new ContentLength(cLength)); + } + } catch (IllegalArgumentException e) { + throw streamError(stream.id(), PROTOCOL_ERROR, e, + "Multiple content-length headers received"); + } + } + } + stream.headersReceived(isInformational); + verifyContentLength(stream, 0, endOfStream); encoder.flowController().updateDependencyTree(streamId, streamDependency, weight, exclusive); - - listener.onHeadersRead(ctx, streamId, headers, streamDependency, weight, exclusive, padding, endOfStream); - + listener.onHeadersRead(ctx, streamId, headers, streamDependency, + weight, exclusive, padding, endOfStream); // If the headers completes this stream, close it. if (endOfStream) { lifecycleManager.closeStreamRemote(stream, ctx.newSucceededFuture()); @@ -412,23 +493,27 @@ private void applyLocalSettings(Http2Settings settings) throws Http2Exception { } @Override - public void onSettingsRead(ChannelHandlerContext ctx, Http2Settings settings) throws Http2Exception { - // Acknowledge receipt of the settings. We should do this before we process the settings to ensure our - // remote peer applies these settings before any subsequent frames that we may send which depend upon these - // new settings. See https://github.com/netty/netty/issues/6520. - encoder.writeSettingsAck(ctx, ctx.newPromise()); - - encoder.remoteSettings(settings); + public void onSettingsRead(final ChannelHandlerContext ctx, Http2Settings settings) throws Http2Exception { + if (settingsReceivedConsumer == null) { + // Acknowledge receipt of the settings. We should do this before we process the settings to ensure our + // remote peer applies these settings before any subsequent frames that we may send which depend upon + // these new settings. See https://github.com/netty/netty/issues/6520. + encoder.writeSettingsAck(ctx); + + encoder.remoteSettings(settings); + } else { + settingsReceivedConsumer.consumeReceivedSettings(settings); + } listener.onSettingsRead(ctx, settings); } @Override public void onPingRead(ChannelHandlerContext ctx, long data) throws Http2Exception { - // Send an ack back to the remote client. - // Need to retain the buffer here since it will be released after the write completes. - encoder.writePing(ctx, true, data, ctx.newPromise()); - + if (autoAckPing) { + // Send an ack back to the remote client. + encoder.writePing(ctx, true, data); + } listener.onPingRead(ctx, data); } @@ -451,10 +536,6 @@ public void onPushPromiseRead(ChannelHandlerContext ctx, int streamId, int promi return; } - if (parentStream == null) { - throw connectionError(PROTOCOL_ERROR, "Stream %d does not exist", streamId); - } - switch (parentStream.state()) { case OPEN: case HALF_CLOSED_LOCAL: @@ -529,18 +610,30 @@ private boolean shouldIgnoreHeadersOrDataFrame(ChannelHandlerContext ctx, int st ctx.channel(), frameName, streamId); return true; } + + // Make sure it's not an out-of-order frame, like a rogue DATA frame, for a stream that could + // never have existed. + verifyStreamMayHaveExisted(streamId); + // Its possible that this frame would result in stream ID out of order creation (PROTOCOL ERROR) and its // also possible that this frame is received on a CLOSED stream (STREAM_CLOSED after a RST_STREAM is // sent). We don't have enough information to know for sure, so we choose the lesser of the two errors. throw streamError(streamId, STREAM_CLOSED, "Received %s frame for an unknown stream %d", frameName, streamId); - } else if (stream.isResetSent() || streamCreatedAfterGoAwaySent(streamId)) { + } + if (stream.isResetSent() || streamCreatedAfterGoAwaySent(streamId)) { + // If we have sent a reset stream it is assumed the stream will be closed after the write completes. + // If we have not sent a reset, but the stream was created after a GoAway this is not supported by + // DefaultHttp2Connection and if a custom Http2Connection is used it is assumed the lifetime is managed + // elsewhere so we don't close the stream or otherwise modify the stream's state. + if (logger.isInfoEnabled()) { - logger.info("{} ignoring {} frame for stream {} {}", ctx.channel(), frameName, + logger.info("{} ignoring {} frame for stream {}", ctx.channel(), frameName, stream.isResetSent() ? "RST_STREAM sent." : - ("Stream created after GOAWAY sent. Last known stream by peer " + - connection.remote().lastStreamKnownByPeer())); + "Stream created after GOAWAY sent. Last known stream by peer " + + connection.remote().lastStreamKnownByPeer()); } + return true; } return false; @@ -673,4 +766,40 @@ public void onUnknownFrame(ChannelHandlerContext ctx, byte frameType, int stream onUnknownFrame0(ctx, frameType, streamId, flags, payload); } } + + private static final class ContentLength { + private final long expected; + private long seen; + + ContentLength(long expected) { + this.expected = expected; + } + + void increaseReceivedBytes(boolean server, int streamId, int bytes, boolean isEnd) throws Http2Exception { + seen += bytes; + // Check for overflow + if (seen < 0) { + throw streamError(streamId, PROTOCOL_ERROR, + "Received amount of data did overflow and so not match content-length header %d", expected); + } + // Check if we received more data then what was advertised via the content-length header. + if (seen > expected) { + throw streamError(streamId, PROTOCOL_ERROR, + "Received amount of data %d does not match content-length header %d", seen, expected); + } + + if (isEnd) { + if (seen == 0 && !server) { + // This may be a response to a HEAD request, let's just allow it. + return; + } + + // Check that we really saw what was told via the content-length header. + if (expected > seen) { + throw streamError(streamId, PROTOCOL_ERROR, + "Received amount of data %d does not match content-length header %d", seen, expected); + } + } + } + } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java index d0c5944cb9f..63b9d837cce 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ConnectionEncoder.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -15,39 +15,44 @@ package io.netty.handler.codec.http2; import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelFuture; -import io.netty.channel.ChannelFutureListener; +import io.netty.channel.Channel; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; import io.netty.channel.CoalescingBufferQueue; import io.netty.handler.codec.http.HttpStatusClass; +import io.netty.handler.codec.http2.Http2CodecUtil.SimpleChannelPromiseAggregator; +import io.netty.util.concurrent.Future; +import io.netty.util.concurrent.FutureListener; +import io.netty.util.concurrent.Promise; import io.netty.util.internal.UnstableApi; import java.util.ArrayDeque; +import java.util.Queue; import static io.netty.handler.codec.http.HttpStatusClass.INFORMATIONAL; -import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_PRIORITY_WEIGHT; +import static io.netty.handler.codec.http2.Http2Error.INTERNAL_ERROR; import static io.netty.handler.codec.http2.Http2Error.PROTOCOL_ERROR; import static io.netty.handler.codec.http2.Http2Exception.connectionError; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; import static java.lang.Integer.MAX_VALUE; import static java.lang.Math.min; +import static java.util.Objects.requireNonNull; /** * Default implementation of {@link Http2ConnectionEncoder}. */ @UnstableApi -public class DefaultHttp2ConnectionEncoder implements Http2ConnectionEncoder { +public class DefaultHttp2ConnectionEncoder implements Http2ConnectionEncoder, Http2SettingsReceivedConsumer { private final Http2FrameWriter frameWriter; private final Http2Connection connection; private Http2LifecycleManager lifecycleManager; // We prefer ArrayDeque to LinkedList because later will produce more GC. // This initial capacity is plenty for SETTINGS traffic. - private final ArrayDeque outstandingLocalSettingsQueue = new ArrayDeque(4); + private final Queue outstandingLocalSettingsQueue = new ArrayDeque<>(4); + private Queue outstandingRemoteSettingsQueue; public DefaultHttp2ConnectionEncoder(Http2Connection connection, Http2FrameWriter frameWriter) { - this.connection = checkNotNull(connection, "connection"); - this.frameWriter = checkNotNull(frameWriter, "frameWriter"); + this.connection = requireNonNull(connection, "connection"); + this.frameWriter = requireNonNull(frameWriter, "frameWriter"); if (connection.remote().flowController() == null) { connection.remote().flowController(new DefaultHttp2RemoteFlowController(connection)); } @@ -55,7 +60,7 @@ public DefaultHttp2ConnectionEncoder(Http2Connection connection, Http2FrameWrite @Override public void lifecycleManager(Http2LifecycleManager lifecycleManager) { - this.lifecycleManager = checkNotNull(lifecycleManager, "lifecycleManager"); + this.lifecycleManager = requireNonNull(lifecycleManager, "lifecycleManager"); } @Override @@ -114,8 +119,8 @@ public void remoteSettings(Http2Settings settings) throws Http2Exception { } @Override - public ChannelFuture writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding, - final boolean endOfStream, ChannelPromise promise) { + public Future writeData(final ChannelHandlerContext ctx, final int streamId, ByteBuf data, int padding, + final boolean endOfStream) { final Http2Stream stream; try { stream = requireStream(streamId); @@ -131,19 +136,20 @@ public ChannelFuture writeData(final ChannelHandlerContext ctx, final int stream } } catch (Throwable e) { data.release(); - return promise.setFailure(e); + return ctx.newFailedFuture(e); } + Promise promise = ctx.newPromise(); // Hand control of the frame to the flow controller. flowController().addFlowControlled(stream, - new FlowControlledData(stream, data, padding, endOfStream, promise)); + new FlowControlledData(stream, data, padding, endOfStream, promise, ctx.channel())); return promise; } @Override - public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, - boolean endStream, ChannelPromise promise) { - return writeHeaders(ctx, streamId, headers, 0, DEFAULT_PRIORITY_WEIGHT, false, padding, endStream, promise); + public Future writeHeaders(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, + boolean endStream) { + return writeHeaders0(ctx, streamId, headers, false, 0, (short) 0, false, padding, endStream); } private static boolean validateHeadersSentState(Http2Stream stream, Http2Headers headers, boolean isServer, @@ -156,18 +162,48 @@ private static boolean validateHeadersSentState(Http2Stream stream, Http2Headers } @Override - public ChannelFuture writeHeaders(final ChannelHandlerContext ctx, final int streamId, - final Http2Headers headers, final int streamDependency, final short weight, - final boolean exclusive, final int padding, final boolean endOfStream, ChannelPromise promise) { + public Future writeHeaders(final ChannelHandlerContext ctx, final int streamId, + final Http2Headers headers, final int streamDependency, final short weight, + final boolean exclusive, final int padding, final boolean endOfStream) { + return writeHeaders0(ctx, streamId, headers, true, streamDependency, + weight, exclusive, padding, endOfStream); + } + + /** + * Write headers via {@link Http2FrameWriter}. If {@code hasPriority} is {@code false} it will ignore the + * {@code streamDependency}, {@code weight} and {@code exclusive} parameters. + */ + private static Future sendHeaders(Http2FrameWriter frameWriter, ChannelHandlerContext ctx, int streamId, + Http2Headers headers, final boolean hasPriority, + int streamDependency, final short weight, + boolean exclusive, final int padding, + boolean endOfStream) { + if (hasPriority) { + return frameWriter.writeHeaders(ctx, streamId, headers, streamDependency, + weight, exclusive, padding, endOfStream); + } + return frameWriter.writeHeaders(ctx, streamId, headers, padding, endOfStream); + } + + private Future writeHeaders0(final ChannelHandlerContext ctx, final int streamId, + final Http2Headers headers, final boolean hasPriority, + final int streamDependency, final short weight, + final boolean exclusive, final int padding, + final boolean endOfStream) { try { Http2Stream stream = connection.stream(streamId); if (stream == null) { try { - stream = connection.local().createStream(streamId, endOfStream); + // We don't create the stream in a `halfClosed` state because if this is an initial + // HEADERS frame we don't want the connection state to signify that the HEADERS have + // been sent until after they have been encoded and placed in the outbound buffer. + // Therefore, we let the `LifeCycleManager` will take care of transitioning the state + // as appropriate. + stream = connection.local().createStream(streamId, /*endOfStream*/ false); } catch (Http2Exception cause) { if (connection.remote().mayHaveCreatedStream(streamId)) { - promise.tryFailure(new IllegalStateException("Stream no longer exists: " + streamId, cause)); - return promise; + return ctx.newFailedFuture( + new IllegalStateException("Stream no longer exists: " + streamId, cause)); } throw cause; } @@ -190,23 +226,16 @@ public ChannelFuture writeHeaders(final ChannelHandlerContext ctx, final int str // for this stream. Http2RemoteFlowController flowController = flowController(); if (!endOfStream || !flowController.hasFlowControlled(stream)) { + // The behavior here should mirror that in FlowControlledHeaders + boolean isInformational = validateHeadersSentState(stream, headers, connection.isServer(), endOfStream); - if (endOfStream) { - final Http2Stream finalStream = stream; - final ChannelFutureListener closeStreamLocalListener = new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - lifecycleManager.closeStreamLocal(finalStream, future); - } - }; - promise = promise.unvoid().addListener(closeStreamLocalListener); - } - ChannelFuture future = frameWriter.writeHeaders(ctx, streamId, headers, streamDependency, - weight, exclusive, padding, endOfStream, promise); + Future future = sendHeaders(frameWriter, ctx, streamId, headers, hasPriority, streamDependency, + weight, exclusive, padding, endOfStream); + // Writing headers may fail during the encode state if they violate HPACK limits. - Throwable failureCause = future.cause(); - if (failureCause == null) { + + if (future.isSuccess() || !future.isDone()) { // Synchronously set the headersSent flag to ensure that we do not subsequently write // other headers containing pseudo-header fields. // @@ -219,40 +248,46 @@ public void operationComplete(ChannelFuture future) throws Exception { notifyLifecycleManagerOnError(future, ctx); } } else { + Throwable failureCause = future.cause(); lifecycleManager.onError(ctx, true, failureCause); } + if (endOfStream) { + // Must handle calling onError before calling closeStreamLocal, otherwise the error handler will + // incorrectly think the stream no longer exists and so may not send RST_STREAM or perform similar + // appropriate action. + lifecycleManager.closeStreamLocal(stream, future); + } + return future; } else { + Promise promise = ctx.newPromise(); // Pass headers to the flow-controller so it can maintain their sequence relative to DATA frames. flowController.addFlowControlled(stream, - new FlowControlledHeaders(stream, headers, streamDependency, weight, exclusive, padding, - true, promise)); + new FlowControlledHeaders(stream, headers, hasPriority, streamDependency, + weight, exclusive, padding, true, promise)); return promise; } } catch (Throwable t) { lifecycleManager.onError(ctx, true, t); - promise.tryFailure(t); - return promise; + return ctx.newFailedFuture(t); } } @Override - public ChannelFuture writePriority(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, - boolean exclusive, ChannelPromise promise) { - return frameWriter.writePriority(ctx, streamId, streamDependency, weight, exclusive, promise); + public Future writePriority(ChannelHandlerContext ctx, int streamId, int streamDependency, short weight, + boolean exclusive) { + return frameWriter.writePriority(ctx, streamId, streamDependency, weight, exclusive); } @Override - public ChannelFuture writeRstStream(ChannelHandlerContext ctx, int streamId, long errorCode, - ChannelPromise promise) { + public Future writeRstStream(ChannelHandlerContext ctx, int streamId, long errorCode) { // Delegate to the lifecycle manager for proper updating of connection state. - return lifecycleManager.resetStream(ctx, streamId, errorCode, promise); + return lifecycleManager.resetStream(ctx, streamId, errorCode); } @Override - public ChannelFuture writeSettings(ChannelHandlerContext ctx, Http2Settings settings, - ChannelPromise promise) { + public Future writeSettings(ChannelHandlerContext ctx, Http2Settings settings) { outstandingLocalSettingsQueue.add(settings); try { Boolean pushEnabled = settings.pushEnabled(); @@ -260,25 +295,50 @@ public ChannelFuture writeSettings(ChannelHandlerContext ctx, Http2Settings sett throw connectionError(PROTOCOL_ERROR, "Server sending SETTINGS frame with ENABLE_PUSH specified"); } } catch (Throwable e) { - return promise.setFailure(e); + return ctx.newFailedFuture(e); } - return frameWriter.writeSettings(ctx, settings, promise); + return frameWriter.writeSettings(ctx, settings); } @Override - public ChannelFuture writeSettingsAck(ChannelHandlerContext ctx, ChannelPromise promise) { - return frameWriter.writeSettingsAck(ctx, promise); + public Future writeSettingsAck(ChannelHandlerContext ctx) { + if (outstandingRemoteSettingsQueue == null) { + return frameWriter.writeSettingsAck(ctx); + } + Http2Settings settings = outstandingRemoteSettingsQueue.poll(); + if (settings == null) { + return ctx.newFailedFuture(new Http2Exception(INTERNAL_ERROR, "attempted to write a SETTINGS ACK with no " + + " pending SETTINGS")); + } + SimpleChannelPromiseAggregator aggregator = + new SimpleChannelPromiseAggregator(ctx.newPromise(), ctx.executor()); + // Acknowledge receipt of the settings. We should do this before we process the settings to ensure our + // remote peer applies these settings before any subsequent frames that we may send which depend upon + // these new settings. See https://github.com/netty/netty/issues/6520. + frameWriter.writeSettingsAck(ctx).cascadeTo(aggregator.newPromise()); + + // We create a "new promise" to make sure that status from both the write and the application are taken into + // account independently. + Promise applySettingsPromise = aggregator.newPromise(); + try { + remoteSettings(settings); + applySettingsPromise.setSuccess(null); + } catch (Throwable e) { + applySettingsPromise.setFailure(e); + lifecycleManager.onError(ctx, true, e); + } + return aggregator.doneAllocatingPromises(); } @Override - public ChannelFuture writePing(ChannelHandlerContext ctx, boolean ack, long data, ChannelPromise promise) { - return frameWriter.writePing(ctx, ack, data, promise); + public Future writePing(ChannelHandlerContext ctx, boolean ack, long data) { + return frameWriter.writePing(ctx, ack, data); } @Override - public ChannelFuture writePushPromise(ChannelHandlerContext ctx, int streamId, int promisedStreamId, - Http2Headers headers, int padding, ChannelPromise promise) { + public Future writePushPromise(ChannelHandlerContext ctx, int streamId, int promisedStreamId, + Http2Headers headers, int padding) { try { if (connection.goAwayReceived()) { throw connectionError(PROTOCOL_ERROR, "Sending PUSH_PROMISE after GO_AWAY received."); @@ -288,11 +348,9 @@ public ChannelFuture writePushPromise(ChannelHandlerContext ctx, int streamId, i // Reserve the promised stream. connection.local().reservePushStream(promisedStreamId, stream); - ChannelFuture future = frameWriter.writePushPromise(ctx, streamId, promisedStreamId, headers, padding, - promise); + Future future = frameWriter.writePushPromise(ctx, streamId, promisedStreamId, headers, padding); // Writing headers may fail during the encode state if they violate HPACK limits. - Throwable failureCause = future.cause(); - if (failureCause == null) { + if (future.isSuccess() || !future.isDone()) { // This just sets internal stream state which is used elsewhere in the codec and doesn't // necessarily mean the write will complete successfully. stream.pushPromiseSent(); @@ -302,33 +360,31 @@ public ChannelFuture writePushPromise(ChannelHandlerContext ctx, int streamId, i notifyLifecycleManagerOnError(future, ctx); } } else { + Throwable failureCause = future.cause(); lifecycleManager.onError(ctx, true, failureCause); } return future; } catch (Throwable t) { lifecycleManager.onError(ctx, true, t); - promise.tryFailure(t); - return promise; + return ctx.newFailedFuture(t); } } @Override - public ChannelFuture writeGoAway(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData, - ChannelPromise promise) { - return lifecycleManager.goAway(ctx, lastStreamId, errorCode, debugData, promise); + public Future writeGoAway(ChannelHandlerContext ctx, int lastStreamId, long errorCode, ByteBuf debugData) { + return lifecycleManager.goAway(ctx, lastStreamId, errorCode, debugData); } @Override - public ChannelFuture writeWindowUpdate(ChannelHandlerContext ctx, int streamId, int windowSizeIncrement, - ChannelPromise promise) { - return promise.setFailure(new UnsupportedOperationException("Use the Http2[Inbound|Outbound]FlowController" + - " objects to control window sizes")); + public Future writeWindowUpdate(ChannelHandlerContext ctx, int streamId, int windowSizeIncrement) { + return ctx.newFailedFuture(new UnsupportedOperationException("Use the Http2[Inbound|Outbound]FlowController" + + " objects to control window sizes")); } @Override - public ChannelFuture writeFrame(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags, - ByteBuf payload, ChannelPromise promise) { - return frameWriter.writeFrame(ctx, frameType, streamId, flags, payload, promise); + public Future writeFrame(ChannelHandlerContext ctx, byte frameType, int streamId, Http2Flags flags, + ByteBuf payload) { + return frameWriter.writeFrame(ctx, frameType, streamId, flags, payload); } @Override @@ -360,6 +416,14 @@ private Http2Stream requireStream(int streamId) { return stream; } + @Override + public void consumeReceivedSettings(Http2Settings settings) { + if (outstandingRemoteSettingsQueue == null) { + outstandingRemoteSettingsQueue = new ArrayDeque<>(2); + } + outstandingRemoteSettingsQueue.add(settings); + } + /** * Wrap a DATA frame so it can be written subject to flow-control. Note that this implementation assumes it * only writes padding once for the entire payload as opposed to writing it once per-frame. This makes the @@ -374,9 +438,9 @@ private final class FlowControlledData extends FlowControlledBase { private int dataSize; FlowControlledData(Http2Stream stream, ByteBuf buf, int padding, boolean endOfStream, - ChannelPromise promise) { + Promise promise, Channel channel) { super(stream, padding, endOfStream, promise); - queue = new CoalescingBufferQueue(promise.channel()); + queue = new CoalescingBufferQueue(channel); queue.add(buf, promise); dataSize = queue.readableBytes(); } @@ -391,6 +455,9 @@ public void error(ChannelHandlerContext ctx, Throwable cause) { queue.releaseAndFailAll(cause); // Don't update dataSize because we need to ensure the size() method returns a consistent size even after // error so we don't invalidate flow control when returning bytes to flow control. + // + // That said we will set dataSize and padding to 0 in the write(...) method if we cleared the queue + // because of an error. lifecycleManager.onError(ctx, true, cause); } @@ -399,11 +466,22 @@ public void write(ChannelHandlerContext ctx, int allowedBytes) { int queuedData = queue.readableBytes(); if (!endOfStream) { if (queuedData == 0) { - // There's no need to write any data frames because there are only empty data frames in the queue - // and it is not end of stream yet. Just complete their promises by getting the buffer corresponding - // to 0 bytes and writing it to the channel (to preserve notification order). - ChannelPromise writePromise = ctx.newPromise().addListener(this); - ctx.write(queue.remove(0, writePromise), writePromise); + if (queue.isEmpty()) { + // When the queue is empty it means we did clear it because of an error(...) call + // (as otherwise we will have at least 1 entry in there), which will happen either when called + // explicit or when the write itself fails. In this case just set dataSize and padding to 0 + // which will signal back that the whole frame was consumed. + // + // See https://github.com/netty/netty/issues/8707. + padding = dataSize = 0; + } else { + // There's no need to write any data frames because there are only empty data frames in the + // queue and it is not end of stream yet. Just complete their promises by getting the buffer + // corresponding to 0 bytes and writing it to the channel (to preserve notification order). + Promise writePromise = ctx.newPromise(); + writePromise.addListener(this); + ctx.write(queue.remove(0, writePromise)).cascadeTo(writePromise); + } return; } @@ -414,7 +492,8 @@ public void write(ChannelHandlerContext ctx, int allowedBytes) { // Determine how much data to write. int writableData = min(queuedData, allowedBytes); - ChannelPromise writePromise = ctx.newPromise().addListener(this); + Promise writePromise = ctx.newPromise(); + writePromise.addListener(this); ByteBuf toWrite = queue.remove(writableData, writePromise); dataSize = queue.readableBytes(); @@ -424,7 +503,7 @@ public void write(ChannelHandlerContext ctx, int allowedBytes) { // Write the frame(s). frameWriter().writeData(ctx, stream.id(), toWrite, writablePadding, - endOfStream && size() == 0, writePromise); + endOfStream && size() == 0).cascadeTo(writePromise); } @Override @@ -443,14 +522,11 @@ public boolean merge(ChannelHandlerContext ctx, Http2RemoteFlowController.FlowCo } } - private void notifyLifecycleManagerOnError(ChannelFuture future, final ChannelHandlerContext ctx) { - future.addListener(new ChannelFutureListener() { - @Override - public void operationComplete(ChannelFuture future) throws Exception { - Throwable cause = future.cause(); - if (cause != null) { - lifecycleManager.onError(ctx, true, cause); - } + private void notifyLifecycleManagerOnError(Future future, final ChannelHandlerContext ctx) { + future.addListener(future1 -> { + Throwable cause = future1.cause(); + if (cause != null) { + lifecycleManager.onError(ctx, true, cause); } }); } @@ -462,14 +538,17 @@ public void operationComplete(ChannelFuture future) throws Exception { */ private final class FlowControlledHeaders extends FlowControlledBase { private final Http2Headers headers; + private final boolean hasPriority; private final int streamDependency; private final short weight; private final boolean exclusive; - FlowControlledHeaders(Http2Stream stream, Http2Headers headers, int streamDependency, short weight, - boolean exclusive, int padding, boolean endOfStream, ChannelPromise promise) { + FlowControlledHeaders(Http2Stream stream, Http2Headers headers, boolean hasPriority, + int streamDependency, short weight, boolean exclusive, + int padding, boolean endOfStream, Promise promise) { super(stream, padding, endOfStream, promise); this.headers = headers; + this.hasPriority = hasPriority; this.streamDependency = streamDependency; this.weight = weight; this.exclusive = exclusive; @@ -491,16 +570,15 @@ public void error(ChannelHandlerContext ctx, Throwable cause) { @Override public void write(ChannelHandlerContext ctx, int allowedBytes) { boolean isInformational = validateHeadersSentState(stream, headers, connection.isServer(), endOfStream); - if (promise.isVoid()) { - promise = ctx.newPromise(); - } + // The code is currently requiring adding this listener before writing, in order to call onError() before + // closeStreamLocal(). promise.addListener(this); - ChannelFuture f = frameWriter.writeHeaders(ctx, stream.id(), headers, streamDependency, weight, exclusive, - padding, endOfStream, promise); + Future f = sendHeaders(frameWriter, ctx, stream.id(), headers, hasPriority, streamDependency, + weight, exclusive, padding, endOfStream); + f.cascadeTo(promise); // Writing headers may fail during the encode state if they violate HPACK limits. - Throwable failureCause = f.cause(); - if (failureCause == null) { + if (!f.isFailed()) { // "not failed" means either not done, or completed successfully. // This just sets internal stream state which is used elsewhere in the codec and doesn't // necessarily mean the write will complete successfully. stream.headersSent(isInformational); @@ -516,18 +594,15 @@ public boolean merge(ChannelHandlerContext ctx, Http2RemoteFlowController.FlowCo /** * Common base type for payloads to deliver via flow-control. */ - public abstract class FlowControlledBase implements Http2RemoteFlowController.FlowControlled, - ChannelFutureListener { + public abstract class FlowControlledBase implements Http2RemoteFlowController.FlowControlled, FutureListener { protected final Http2Stream stream; - protected ChannelPromise promise; + protected Promise promise; protected boolean endOfStream; protected int padding; FlowControlledBase(final Http2Stream stream, int padding, boolean endOfStream, - final ChannelPromise promise) { - if (padding < 0) { - throw new IllegalArgumentException("padding must be >= 0"); - } + final Promise promise) { + checkPositiveOrZero(padding, "padding"); this.padding = padding; this.endOfStream = endOfStream; this.stream = stream; @@ -542,8 +617,8 @@ public void writeComplete() { } @Override - public void operationComplete(ChannelFuture future) throws Exception { - if (!future.isSuccess()) { + public void operationComplete(Future future) { + if (future.isFailed()) { error(flowController().channelHandlerContext(), future.cause()); } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2DataFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2DataFrame.java index b5554379dfc..8856831ba3a 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2DataFrame.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2DataFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,13 +16,13 @@ package io.netty.handler.codec.http2; import io.netty.buffer.ByteBuf; +import io.netty.buffer.ByteBufUtil; import io.netty.buffer.Unpooled; -import io.netty.util.IllegalReferenceCountException; import io.netty.util.internal.StringUtil; import io.netty.util.internal.UnstableApi; import static io.netty.handler.codec.http2.Http2CodecUtil.verifyPadding; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The default {@link Http2DataFrame} implementation. @@ -71,7 +71,7 @@ public DefaultHttp2DataFrame(ByteBuf content, boolean endStream) { * 256 (inclusive). */ public DefaultHttp2DataFrame(ByteBuf content, boolean endStream, int padding) { - this.content = checkNotNull(content, "content"); + this.content = requireNonNull(content, "content"); this.endStream = endStream; verifyPadding(padding); this.padding = padding; @@ -104,10 +104,7 @@ public int padding() { @Override public ByteBuf content() { - if (content.refCnt() <= 0) { - throw new IllegalReferenceCountException(content.refCnt()); - } - return content; + return ByteBufUtil.ensureAccessible(content); } @Override diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameReader.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameReader.java index 63e184bc7b4..953b4b8bfd1 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameReader.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameReader.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -18,7 +18,6 @@ import io.netty.buffer.ByteBufAllocator; import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.http2.Http2FrameReader.Configuration; -import io.netty.util.internal.PlatformDependent; import io.netty.util.internal.UnstableApi; import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_MAX_FRAME_SIZE; @@ -27,7 +26,6 @@ import static io.netty.handler.codec.http2.Http2CodecUtil.PING_FRAME_PAYLOAD_LENGTH; import static io.netty.handler.codec.http2.Http2CodecUtil.PRIORITY_ENTRY_LENGTH; import static io.netty.handler.codec.http2.Http2CodecUtil.SETTINGS_INITIAL_WINDOW_SIZE; -import static io.netty.handler.codec.http2.Http2CodecUtil.SETTINGS_MAX_FRAME_SIZE; import static io.netty.handler.codec.http2.Http2CodecUtil.SETTING_ENTRY_LENGTH; import static io.netty.handler.codec.http2.Http2CodecUtil.headerListSizeExceeded; import static io.netty.handler.codec.http2.Http2CodecUtil.isMaxFrameSizeValid; @@ -166,12 +164,9 @@ public void readFrame(ChannelHandlerContext ctx, ByteBuf input, Http2FrameListen } catch (Http2Exception e) { readError = !Http2Exception.isStreamError(e); throw e; - } catch (RuntimeException e) { + } catch (Throwable e) { readError = true; throw e; - } catch (Throwable cause) { - readError = true; - PlatformDependent.throwException(cause); } } @@ -239,8 +234,8 @@ private void processPayloadState(ChannelHandlerContext ctx, ByteBuf in, Http2Fra return; } - // Get a view of the buffer for the size of the payload. - ByteBuf payload = in.readSlice(payloadLength); + // Only process up to payloadLength bytes. + int payloadEndIndex = in.readerIndex() + payloadLength; // We have consumed the data, next time we read we will be expecting to read a frame header. readingHeaders = true; @@ -248,45 +243,45 @@ private void processPayloadState(ChannelHandlerContext ctx, ByteBuf in, Http2Fra // Read the payload and fire the frame event to the listener. switch (frameType) { case DATA: - readDataFrame(ctx, payload, listener); + readDataFrame(ctx, in, payloadEndIndex, listener); break; case HEADERS: - readHeadersFrame(ctx, payload, listener); + readHeadersFrame(ctx, in, payloadEndIndex, listener); break; case PRIORITY: - readPriorityFrame(ctx, payload, listener); + readPriorityFrame(ctx, in, listener); break; case RST_STREAM: - readRstStreamFrame(ctx, payload, listener); + readRstStreamFrame(ctx, in, listener); break; case SETTINGS: - readSettingsFrame(ctx, payload, listener); + readSettingsFrame(ctx, in, listener); break; case PUSH_PROMISE: - readPushPromiseFrame(ctx, payload, listener); + readPushPromiseFrame(ctx, in, payloadEndIndex, listener); break; case PING: - readPingFrame(ctx, payload.readLong(), listener); + readPingFrame(ctx, in.readLong(), listener); break; case GO_AWAY: - readGoAwayFrame(ctx, payload, listener); + readGoAwayFrame(ctx, in, payloadEndIndex, listener); break; case WINDOW_UPDATE: - readWindowUpdateFrame(ctx, payload, listener); + readWindowUpdateFrame(ctx, in, listener); break; case CONTINUATION: - readContinuationFrame(payload, listener); + readContinuationFrame(in, payloadEndIndex, listener); break; default: - readUnknownFrame(ctx, payload, listener); + readUnknownFrame(ctx, in, payloadEndIndex, listener); break; } + in.readerIndex(payloadEndIndex); } private void verifyDataFrame() throws Http2Exception { verifyAssociatedWithAStream(); verifyNotProcessingHeaders(); - verifyPayloadLength(payloadLength); if (payloadLength < flags.getPaddingPresenceFieldLength()) { throw streamError(streamId, FRAME_SIZE_ERROR, @@ -297,7 +292,6 @@ private void verifyDataFrame() throws Http2Exception { private void verifyHeadersFrame() throws Http2Exception { verifyAssociatedWithAStream(); verifyNotProcessingHeaders(); - verifyPayloadLength(payloadLength); int requiredLength = flags.getPaddingPresenceFieldLength() + flags.getNumPriorityBytes(); if (payloadLength < requiredLength) { @@ -327,7 +321,6 @@ private void verifyRstStreamFrame() throws Http2Exception { private void verifySettingsFrame() throws Http2Exception { verifyNotProcessingHeaders(); - verifyPayloadLength(payloadLength); if (streamId != 0) { throw connectionError(PROTOCOL_ERROR, "A stream ID must be zero."); } @@ -341,7 +334,6 @@ private void verifySettingsFrame() throws Http2Exception { private void verifyPushPromiseFrame() throws Http2Exception { verifyNotProcessingHeaders(); - verifyPayloadLength(payloadLength); // Subtract the length of the promised stream ID field, to determine the length of the // rest of the payload (header block fragment + payload). @@ -365,7 +357,6 @@ private void verifyPingFrame() throws Http2Exception { private void verifyGoAwayFrame() throws Http2Exception { verifyNotProcessingHeaders(); - verifyPayloadLength(payloadLength); if (streamId != 0) { throw connectionError(PROTOCOL_ERROR, "A stream ID must be zero."); @@ -386,7 +377,6 @@ private void verifyWindowUpdateFrame() throws Http2Exception { private void verifyContinuationFrame() throws Http2Exception { verifyAssociatedWithAStream(); - verifyPayloadLength(payloadLength); if (headersContinuation == null) { throw connectionError(PROTOCOL_ERROR, "Received %s frame but not currently processing headers.", @@ -408,21 +398,20 @@ private void verifyUnknownFrame() throws Http2Exception { verifyNotProcessingHeaders(); } - private void readDataFrame(ChannelHandlerContext ctx, ByteBuf payload, + private void readDataFrame(ChannelHandlerContext ctx, ByteBuf payload, int payloadEndIndex, Http2FrameListener listener) throws Http2Exception { int padding = readPadding(payload); verifyPadding(padding); // Determine how much data there is to read by removing the trailing // padding. - int dataLength = lengthWithoutTrailingPadding(payload.readableBytes(), padding); + int dataLength = lengthWithoutTrailingPadding(payloadEndIndex - payload.readerIndex(), padding); ByteBuf data = payload.readSlice(dataLength); listener.onDataRead(ctx, streamId, data, padding, flags.endOfStream()); - payload.skipBytes(payload.readableBytes()); } - private void readHeadersFrame(final ChannelHandlerContext ctx, ByteBuf payload, + private void readHeadersFrame(final ChannelHandlerContext ctx, ByteBuf payload, int payloadEndIndex, Http2FrameListener listener) throws Http2Exception { final int headersStreamId = streamId; final Http2Flags headersFlags = flags; @@ -439,7 +428,7 @@ private void readHeadersFrame(final ChannelHandlerContext ctx, ByteBuf payload, throw streamError(streamId, PROTOCOL_ERROR, "A stream cannot depend on itself."); } final short weight = (short) (payload.readUnsignedByte() + 1); - final ByteBuf fragment = payload.readSlice(lengthWithoutTrailingPadding(payload.readableBytes(), padding)); + final int lenToRead = lengthWithoutTrailingPadding(payloadEndIndex - payload.readerIndex(), padding); // Create a handler that invokes the listener when the header block is complete. headersContinuation = new HeadersContinuation() { @@ -449,10 +438,10 @@ public int getStreamId() { } @Override - public void processFragment(boolean endOfHeaders, ByteBuf fragment, + public void processFragment(boolean endOfHeaders, ByteBuf fragment, int len, Http2FrameListener listener) throws Http2Exception { final HeadersBlockBuilder hdrBlockBuilder = headersBlockBuilder(); - hdrBlockBuilder.addFragment(fragment, ctx.alloc(), endOfHeaders); + hdrBlockBuilder.addFragment(fragment, len, ctx.alloc(), endOfHeaders); if (endOfHeaders) { listener.onHeadersRead(ctx, headersStreamId, hdrBlockBuilder.headers(), streamDependency, weight, exclusive, padding, headersFlags.endOfStream()); @@ -461,7 +450,7 @@ public void processFragment(boolean endOfHeaders, ByteBuf fragment, }; // Process the initial fragment, invoking the listener's callback if end of headers. - headersContinuation.processFragment(flags.endOfHeaders(), fragment, listener); + headersContinuation.processFragment(flags.endOfHeaders(), payload, lenToRead, listener); resetHeadersContinuationIfEnd(flags.endOfHeaders()); return; } @@ -475,10 +464,10 @@ public int getStreamId() { } @Override - public void processFragment(boolean endOfHeaders, ByteBuf fragment, + public void processFragment(boolean endOfHeaders, ByteBuf fragment, int len, Http2FrameListener listener) throws Http2Exception { final HeadersBlockBuilder hdrBlockBuilder = headersBlockBuilder(); - hdrBlockBuilder.addFragment(fragment, ctx.alloc(), endOfHeaders); + hdrBlockBuilder.addFragment(fragment, len, ctx.alloc(), endOfHeaders); if (endOfHeaders) { listener.onHeadersRead(ctx, headersStreamId, hdrBlockBuilder.headers(), padding, headersFlags.endOfStream()); @@ -487,8 +476,8 @@ public void processFragment(boolean endOfHeaders, ByteBuf fragment, }; // Process the initial fragment, invoking the listener's callback if end of headers. - final ByteBuf fragment = payload.readSlice(lengthWithoutTrailingPadding(payload.readableBytes(), padding)); - headersContinuation.processFragment(flags.endOfHeaders(), fragment, listener); + int len = lengthWithoutTrailingPadding(payloadEndIndex - payload.readerIndex(), padding); + headersContinuation.processFragment(flags.endOfHeaders(), payload, len, listener); resetHeadersContinuationIfEnd(flags.endOfHeaders()); } @@ -529,21 +518,17 @@ private void readSettingsFrame(ChannelHandlerContext ctx, ByteBuf payload, try { settings.put(id, Long.valueOf(value)); } catch (IllegalArgumentException e) { - switch(id) { - case SETTINGS_MAX_FRAME_SIZE: - throw connectionError(PROTOCOL_ERROR, e, e.getMessage()); - case SETTINGS_INITIAL_WINDOW_SIZE: + if (id == SETTINGS_INITIAL_WINDOW_SIZE) { throw connectionError(FLOW_CONTROL_ERROR, e, e.getMessage()); - default: - throw connectionError(PROTOCOL_ERROR, e, e.getMessage()); } + throw connectionError(PROTOCOL_ERROR, e, e.getMessage()); } } listener.onSettingsRead(ctx, settings); } } - private void readPushPromiseFrame(final ChannelHandlerContext ctx, ByteBuf payload, + private void readPushPromiseFrame(final ChannelHandlerContext ctx, ByteBuf payload, int payloadEndIndex, Http2FrameListener listener) throws Http2Exception { final int pushPromiseStreamId = streamId; final int padding = readPadding(payload); @@ -558,9 +543,9 @@ public int getStreamId() { } @Override - public void processFragment(boolean endOfHeaders, ByteBuf fragment, + public void processFragment(boolean endOfHeaders, ByteBuf fragment, int len, Http2FrameListener listener) throws Http2Exception { - headersBlockBuilder().addFragment(fragment, ctx.alloc(), endOfHeaders); + headersBlockBuilder().addFragment(fragment, len, ctx.alloc(), endOfHeaders); if (endOfHeaders) { listener.onPushPromiseRead(ctx, pushPromiseStreamId, promisedStreamId, headersBlockBuilder().headers(), padding); @@ -569,8 +554,8 @@ public void processFragment(boolean endOfHeaders, ByteBuf fragment, }; // Process the initial fragment, invoking the listener's callback if end of headers. - final ByteBuf fragment = payload.readSlice(lengthWithoutTrailingPadding(payload.readableBytes(), padding)); - headersContinuation.processFragment(flags.endOfHeaders(), fragment, listener); + int len = lengthWithoutTrailingPadding(payloadEndIndex - payload.readerIndex(), padding); + headersContinuation.processFragment(flags.endOfHeaders(), payload, len, listener); resetHeadersContinuationIfEnd(flags.endOfHeaders()); } @@ -583,11 +568,11 @@ private void readPingFrame(ChannelHandlerContext ctx, long data, } } - private static void readGoAwayFrame(ChannelHandlerContext ctx, ByteBuf payload, + private static void readGoAwayFrame(ChannelHandlerContext ctx, ByteBuf payload, int payloadEndIndex, Http2FrameListener listener) throws Http2Exception { int lastStreamId = readUnsignedInt(payload); long errorCode = payload.readUnsignedInt(); - ByteBuf debugData = payload.readSlice(payload.readableBytes()); + ByteBuf debugData = payload.readSlice(payloadEndIndex - payload.readerIndex()); listener.onGoAwayRead(ctx, lastStreamId, errorCode, debugData); } @@ -601,18 +586,17 @@ private void readWindowUpdateFrame(ChannelHandlerContext ctx, ByteBuf payload, listener.onWindowUpdateRead(ctx, streamId, windowSizeIncrement); } - private void readContinuationFrame(ByteBuf payload, Http2FrameListener listener) + private void readContinuationFrame(ByteBuf payload, int payloadEndIndex, Http2FrameListener listener) throws Http2Exception { // Process the initial fragment, invoking the listener's callback if end of headers. - final ByteBuf continuationFragment = payload.readSlice(payload.readableBytes()); - headersContinuation.processFragment(flags.endOfHeaders(), continuationFragment, - listener); + headersContinuation.processFragment(flags.endOfHeaders(), payload, + payloadEndIndex - payload.readerIndex(), listener); resetHeadersContinuationIfEnd(flags.endOfHeaders()); } - private void readUnknownFrame(ChannelHandlerContext ctx, ByteBuf payload, Http2FrameListener listener) - throws Http2Exception { - payload = payload.readSlice(payload.readableBytes()); + private void readUnknownFrame(ChannelHandlerContext ctx, ByteBuf payload, + int payloadEndIndex, Http2FrameListener listener) throws Http2Exception { + payload = payload.readSlice(payloadEndIndex - payload.readerIndex()); listener.onUnknownFrame(ctx, frameType, streamId, flags, payload); } @@ -664,7 +648,7 @@ private abstract class HeadersContinuation { * @param fragment the fragment of the header block to be added. * @param listener the listener to be notified if the header block is completed. */ - abstract void processFragment(boolean endOfHeaders, ByteBuf fragment, + abstract void processFragment(boolean endOfHeaders, ByteBuf fragment, int len, Http2FrameListener listener) throws Http2Exception; final HeadersBlockBuilder headersBlockBuilder() { @@ -704,33 +688,32 @@ private void headerSizeExceeded() throws Http2Exception { * This is used for an optimization for when the first fragment is the full * block. In that case, the buffer is used directly without copying. */ - final void addFragment(ByteBuf fragment, ByteBufAllocator alloc, boolean endOfHeaders) throws Http2Exception { + final void addFragment(ByteBuf fragment, int len, ByteBufAllocator alloc, + boolean endOfHeaders) throws Http2Exception { if (headerBlock == null) { - if (fragment.readableBytes() > headersDecoder.configuration().maxHeaderListSizeGoAway()) { + if (len > headersDecoder.configuration().maxHeaderListSizeGoAway()) { headerSizeExceeded(); } if (endOfHeaders) { // Optimization - don't bother copying, just use the buffer as-is. Need // to retain since we release when the header block is built. - headerBlock = fragment.retain(); + headerBlock = fragment.readRetainedSlice(len); } else { - headerBlock = alloc.buffer(fragment.readableBytes()); - headerBlock.writeBytes(fragment); + headerBlock = alloc.buffer(len).writeBytes(fragment, len); } return; } - if (headersDecoder.configuration().maxHeaderListSizeGoAway() - fragment.readableBytes() < + if (headersDecoder.configuration().maxHeaderListSizeGoAway() - len < headerBlock.readableBytes()) { headerSizeExceeded(); } - if (headerBlock.isWritable(fragment.readableBytes())) { + if (headerBlock.isWritable(len)) { // The buffer can hold the requested bytes, just write it directly. - headerBlock.writeBytes(fragment); + headerBlock.writeBytes(fragment, len); } else { // Allocate a new buffer that is big enough to hold the entire header block so far. - ByteBuf buf = alloc.buffer(headerBlock.readableBytes() + fragment.readableBytes()); - buf.writeBytes(headerBlock); - buf.writeBytes(fragment); + ByteBuf buf = alloc.buffer(headerBlock.readableBytes() + len); + buf.writeBytes(headerBlock).writeBytes(fragment, len); headerBlock.release(); headerBlock = buf; } @@ -773,12 +756,6 @@ private void verifyNotProcessingHeaders() throws Http2Exception { } } - private void verifyPayloadLength(int payloadLength) throws Http2Exception { - if (payloadLength > maxFrameSize) { - throw connectionError(PROTOCOL_ERROR, "Total payload length %d exceeds max frame length.", payloadLength); - } - } - private void verifyAssociatedWithAStream() throws Http2Exception { if (streamId == 0) { throw connectionError(PROTOCOL_ERROR, "Frame of type %s must be associated with a stream.", frameType); diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java index 3fa84137042..5672bc15436 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2FrameWriter.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -16,13 +16,11 @@ package io.netty.handler.codec.http2; import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelFuture; import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelPromise; import io.netty.handler.codec.http2.Http2CodecUtil.SimpleChannelPromiseAggregator; import io.netty.handler.codec.http2.Http2FrameWriter.Configuration; import io.netty.handler.codec.http2.Http2HeadersEncoder.SensitivityDetector; -import io.netty.util.internal.PlatformDependent; +import io.netty.util.concurrent.Future; import io.netty.util.internal.UnstableApi; import static io.netty.buffer.Unpooled.directBuffer; @@ -60,9 +58,11 @@ import static io.netty.handler.codec.http2.Http2FrameTypes.RST_STREAM; import static io.netty.handler.codec.http2.Http2FrameTypes.SETTINGS; import static io.netty.handler.codec.http2.Http2FrameTypes.WINDOW_UPDATE; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static io.netty.util.internal.ObjectUtil.checkPositive; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; import static java.lang.Math.max; import static java.lang.Math.min; +import static java.util.Objects.requireNonNull; /** * A {@link Http2FrameWriter} that supports all frame types defined by the HTTP/2 specification. @@ -131,10 +131,10 @@ public int maxFrameSize() { public void close() { } @Override - public ChannelFuture writeData(ChannelHandlerContext ctx, int streamId, ByteBuf data, - int padding, boolean endStream, ChannelPromise promise) { + public Future writeData(ChannelHandlerContext ctx, int streamId, ByteBuf data, + int padding, boolean endStream) { final SimpleChannelPromiseAggregator promiseAggregator = - new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor()); + new SimpleChannelPromiseAggregator(ctx.newPromise(), ctx.executor()); ByteBuf frameHeader = null; try { verifyStreamId(streamId, STREAM_ID); @@ -150,10 +150,10 @@ public ChannelFuture writeData(ChannelHandlerContext ctx, int streamId, ByteBuf writeFrameHeaderInternal(frameHeader, maxFrameSize, DATA, flags, streamId); do { // Write the header. - ctx.write(frameHeader.retainedSlice(), promiseAggregator.newPromise()); + ctx.write(frameHeader.retainedSlice()).cascadeTo(promiseAggregator.newPromise()); // Write the payload. - ctx.write(data.readRetainedSlice(maxFrameSize), promiseAggregator.newPromise()); + ctx.write(data.readRetainedSlice(maxFrameSize)).cascadeTo(promiseAggregator.newPromise()); remainingData -= maxFrameSize; // Stop iterating if remainingData == maxFrameSize so we can take care of reference counts below. @@ -169,12 +169,12 @@ public ChannelFuture writeData(ChannelHandlerContext ctx, int streamId, ByteBuf ByteBuf frameHeader2 = ctx.alloc().buffer(FRAME_HEADER_LENGTH); flags.endOfStream(endStream); writeFrameHeaderInternal(frameHeader2, remainingData, DATA, flags, streamId); - ctx.write(frameHeader2, promiseAggregator.newPromise()); + ctx.write(frameHeader2).cascadeTo(promiseAggregator.newPromise()); // Write the payload. ByteBuf lastFrame = data.readSlice(remainingData); data = null; - ctx.write(lastFrame, promiseAggregator.newPromise()); + ctx.write(lastFrame).cascadeTo(promiseAggregator.newPromise()); } else { if (remainingData != maxFrameSize) { if (frameHeader != null) { @@ -192,17 +192,17 @@ public ChannelFuture writeData(ChannelHandlerContext ctx, int streamId, ByteBuf lastFrame = frameHeader.slice(); frameHeader = null; } - ctx.write(lastFrame, promiseAggregator.newPromise()); + ctx.write(lastFrame).cascadeTo(promiseAggregator.newPromise()); // Write the payload. - lastFrame = data.readSlice(maxFrameSize); + lastFrame = data.readableBytes() != maxFrameSize ? data.readSlice(maxFrameSize) : data; data = null; - ctx.write(lastFrame, promiseAggregator.newPromise()); + ctx.write(lastFrame).cascadeTo(promiseAggregator.newPromise()); } do { int frameDataBytes = min(remainingData, maxFrameSize); - int framePaddingBytes = min(padding, max(0, (maxFrameSize - 1) - frameDataBytes)); + int framePaddingBytes = min(padding, max(0, maxFrameSize - 1 - frameDataBytes)); // Decrement the remaining counters. padding -= framePaddingBytes; @@ -214,22 +214,23 @@ public ChannelFuture writeData(ChannelHandlerContext ctx, int streamId, ByteBuf flags.paddingPresent(framePaddingBytes > 0); writeFrameHeaderInternal(frameHeader2, framePaddingBytes + frameDataBytes, DATA, flags, streamId); writePaddingLength(frameHeader2, framePaddingBytes); - ctx.write(frameHeader2, promiseAggregator.newPromise()); + ctx.write(frameHeader2).cascadeTo(promiseAggregator.newPromise()); // Write the payload. - if (frameDataBytes != 0) { + if (frameDataBytes != 0 && data != null) { // Make sure Data is not null if (remainingData == 0) { ByteBuf lastFrame = data.readSlice(frameDataBytes); data = null; - ctx.write(lastFrame, promiseAggregator.newPromise()); + ctx.write(lastFrame).cascadeTo(promiseAggregator.newPromise()); } else { - ctx.write(data.readRetainedSlice(frameDataBytes), promiseAggregator.newPromise()); + ctx.write(data.readRetainedSlice(frameDataBytes)) + .cascadeTo(promiseAggregator.newPromise()); } } // Write the frame padding. if (paddingBytes(framePaddingBytes) > 0) { - ctx.write(ZERO_BUFFER.slice(0, paddingBytes(framePaddingBytes)), - promiseAggregator.newPromise()); + ctx.write(ZERO_BUFFER.slice(0, paddingBytes(framePaddingBytes))) + .cascadeTo(promiseAggregator.newPromise()); } } while (remainingData != 0 || padding != 0); } @@ -240,7 +241,9 @@ public ChannelFuture writeData(ChannelHandlerContext ctx, int streamId, ByteBuf // Use a try/finally here in case the data has been released before calling this method. This is not // necessary above because we internally allocate frameHeader. try { - if (data != null) { + if (data != null && + // Check if the data was released already. + data.refCnt() > 0) { data.release(); } } finally { @@ -253,26 +256,26 @@ public ChannelFuture writeData(ChannelHandlerContext ctx, int streamId, ByteBuf } @Override - public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, - Http2Headers headers, int padding, boolean endStream, ChannelPromise promise) { + public Future writeHeaders(ChannelHandlerContext ctx, int streamId, + Http2Headers headers, int padding, boolean endStream) { return writeHeadersInternal(ctx, streamId, headers, padding, endStream, - false, 0, (short) 0, false, promise); + false, 0, (short) 0, false); } @Override - public ChannelFuture writeHeaders(ChannelHandlerContext ctx, int streamId, - Http2Headers headers, int streamDependency, short weight, boolean exclusive, - int padding, boolean endStream, ChannelPromise promise) { + public Future writeHeaders(ChannelHandlerContext ctx, int streamId, + Http2Headers headers, int streamDependency, short weight, boolean exclusive, + int padding, boolean endStream) { return writeHeadersInternal(ctx, streamId, headers, padding, endStream, - true, streamDependency, weight, exclusive, promise); + true, streamDependency, weight, exclusive); } @Override - public ChannelFuture writePriority(ChannelHandlerContext ctx, int streamId, - int streamDependency, short weight, boolean exclusive, ChannelPromise promise) { + public Future writePriority(ChannelHandlerContext ctx, int streamId, + int streamDependency, short weight, boolean exclusive) { try { verifyStreamId(streamId, STREAM_ID); - verifyStreamId(streamDependency, STREAM_DEPENDENCY); + verifyStreamOrConnectionId(streamDependency, STREAM_DEPENDENCY); verifyWeight(weight); ByteBuf buf = ctx.alloc().buffer(PRIORITY_FRAME_LENGTH); @@ -280,15 +283,14 @@ public ChannelFuture writePriority(ChannelHandlerContext ctx, int streamId, buf.writeInt(exclusive ? (int) (0x80000000L | streamDependency) : streamDependency); // Adjust the weight so that it fits into a single byte on the wire. buf.writeByte(weight - 1); - return ctx.write(buf, promise); + return ctx.write(buf); } catch (Throwable t) { - return promise.setFailure(t); + return ctx.newFailedFuture(t); } } @Override - public ChannelFuture writeRstStream(ChannelHandlerContext ctx, int streamId, long errorCode, - ChannelPromise promise) { + public Future writeRstStream(ChannelHandlerContext ctx, int streamId, long errorCode) { try { verifyStreamId(streamId, STREAM_ID); verifyErrorCode(errorCode); @@ -296,58 +298,57 @@ public ChannelFuture writeRstStream(ChannelHandlerContext ctx, int streamId, lon ByteBuf buf = ctx.alloc().buffer(RST_STREAM_FRAME_LENGTH); writeFrameHeaderInternal(buf, INT_FIELD_LENGTH, RST_STREAM, new Http2Flags(), streamId); buf.writeInt((int) errorCode); - return ctx.write(buf, promise); + return ctx.write(buf); } catch (Throwable t) { - return promise.setFailure(t); + return ctx.newFailedFuture(t); } } @Override - public ChannelFuture writeSettings(ChannelHandlerContext ctx, Http2Settings settings, - ChannelPromise promise) { + public Future writeSettings(ChannelHandlerContext ctx, Http2Settings settings) { try { - checkNotNull(settings, "settings"); + requireNonNull(settings, "settings"); int payloadLength = SETTING_ENTRY_LENGTH * settings.size(); - ByteBuf buf = ctx.alloc().buffer(FRAME_HEADER_LENGTH + settings.size() * SETTING_ENTRY_LENGTH); + ByteBuf buf = ctx.alloc().buffer(FRAME_HEADER_LENGTH + payloadLength); writeFrameHeaderInternal(buf, payloadLength, SETTINGS, new Http2Flags(), 0); for (Http2Settings.PrimitiveEntry entry : settings.entries()) { buf.writeChar(entry.key()); buf.writeInt(entry.value().intValue()); } - return ctx.write(buf, promise); + return ctx.write(buf); } catch (Throwable t) { - return promise.setFailure(t); + return ctx.newFailedFuture(t); } } @Override - public ChannelFuture writeSettingsAck(ChannelHandlerContext ctx, ChannelPromise promise) { + public Future writeSettingsAck(ChannelHandlerContext ctx) { try { ByteBuf buf = ctx.alloc().buffer(FRAME_HEADER_LENGTH); writeFrameHeaderInternal(buf, 0, SETTINGS, new Http2Flags().ack(true), 0); - return ctx.write(buf, promise); + return ctx.write(buf); } catch (Throwable t) { - return promise.setFailure(t); + return ctx.newFailedFuture(t); } } @Override - public ChannelFuture writePing(ChannelHandlerContext ctx, boolean ack, long data, ChannelPromise promise) { + public Future writePing(ChannelHandlerContext ctx, boolean ack, long data) { Http2Flags flags = ack ? new Http2Flags().ack(true) : new Http2Flags(); ByteBuf buf = ctx.alloc().buffer(FRAME_HEADER_LENGTH + PING_FRAME_PAYLOAD_LENGTH); // Assume nothing below will throw until buf is written. That way we don't have to take care of ownership // in the catch block. writeFrameHeaderInternal(buf, PING_FRAME_PAYLOAD_LENGTH, PING, flags, 0); buf.writeLong(data); - return ctx.write(buf, promise); + return ctx.write(buf); } @Override - public ChannelFuture writePushPromise(ChannelHandlerContext ctx, int streamId, - int promisedStreamId, Http2Headers headers, int padding, ChannelPromise promise) { + public Future writePushPromise(ChannelHandlerContext ctx, int streamId, int promisedStreamId, + Http2Headers headers, int padding) { ByteBuf headerBlock = null; SimpleChannelPromiseAggregator promiseAggregator = - new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor()); + new SimpleChannelPromiseAggregator(ctx.newPromise(), ctx.executor()); try { verifyStreamId(streamId, STREAM_ID); verifyStreamId(promisedStreamId, "Promised Stream ID"); @@ -373,25 +374,25 @@ public ChannelFuture writePushPromise(ChannelHandlerContext ctx, int streamId, // Write out the promised stream ID. buf.writeInt(promisedStreamId); - ctx.write(buf, promiseAggregator.newPromise()); + ctx.write(buf).cascadeTo(promiseAggregator.newPromise()); // Write the first fragment. - ctx.write(fragment, promiseAggregator.newPromise()); + ctx.write(fragment).cascadeTo(promiseAggregator.newPromise()); // Write out the padding, if any. if (paddingBytes(padding) > 0) { - ctx.write(ZERO_BUFFER.slice(0, paddingBytes(padding)), promiseAggregator.newPromise()); + ctx.write(ZERO_BUFFER.slice(0, paddingBytes(padding))).cascadeTo(promiseAggregator.newPromise()); } if (!flags.endOfHeaders()) { - writeContinuationFrames(ctx, streamId, headerBlock, padding, promiseAggregator); + writeContinuationFrames(ctx, streamId, headerBlock, promiseAggregator); } } catch (Http2Exception e) { promiseAggregator.setFailure(e); } catch (Throwable t) { promiseAggregator.setFailure(t); promiseAggregator.doneAllocatingPromises(); - PlatformDependent.throwException(t); + throw t; } finally { if (headerBlock != null) { headerBlock.release(); @@ -401,10 +402,10 @@ public ChannelFuture writePushPromise(ChannelHandlerContext ctx, int streamId, } @Override - public ChannelFuture writeGoAway(ChannelHandlerContext ctx, int lastStreamId, long errorCode, - ByteBuf debugData, ChannelPromise promise) { + public Future writeGoAway(ChannelHandlerContext ctx, int lastStreamId, long errorCode, + ByteBuf debugData) { SimpleChannelPromiseAggregator promiseAggregator = - new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor()); + new SimpleChannelPromiseAggregator(ctx.newPromise(), ctx.executor()); try { verifyStreamOrConnectionId(lastStreamId, "Last Stream ID"); verifyErrorCode(errorCode); @@ -416,7 +417,7 @@ public ChannelFuture writeGoAway(ChannelHandlerContext ctx, int lastStreamId, lo writeFrameHeaderInternal(buf, payloadLength, GO_AWAY, new Http2Flags(), 0); buf.writeInt(lastStreamId); buf.writeInt((int) errorCode); - ctx.write(buf, promiseAggregator.newPromise()); + ctx.write(buf).cascadeTo(promiseAggregator.newPromise()); } catch (Throwable t) { try { debugData.release(); @@ -428,7 +429,7 @@ public ChannelFuture writeGoAway(ChannelHandlerContext ctx, int lastStreamId, lo } try { - ctx.write(debugData, promiseAggregator.newPromise()); + ctx.write(debugData).cascadeTo(promiseAggregator.newPromise()); } catch (Throwable t) { promiseAggregator.setFailure(t); } @@ -436,8 +437,8 @@ public ChannelFuture writeGoAway(ChannelHandlerContext ctx, int lastStreamId, lo } @Override - public ChannelFuture writeWindowUpdate(ChannelHandlerContext ctx, int streamId, - int windowSizeIncrement, ChannelPromise promise) { + public Future writeWindowUpdate(ChannelHandlerContext ctx, int streamId, + int windowSizeIncrement) { try { verifyStreamOrConnectionId(streamId, STREAM_ID); verifyWindowSizeIncrement(windowSizeIncrement); @@ -445,24 +446,24 @@ public ChannelFuture writeWindowUpdate(ChannelHandlerContext ctx, int streamId, ByteBuf buf = ctx.alloc().buffer(WINDOW_UPDATE_FRAME_LENGTH); writeFrameHeaderInternal(buf, INT_FIELD_LENGTH, WINDOW_UPDATE, new Http2Flags(), streamId); buf.writeInt(windowSizeIncrement); - return ctx.write(buf, promise); + return ctx.write(buf); } catch (Throwable t) { - return promise.setFailure(t); + return ctx.newFailedFuture(t); } } @Override - public ChannelFuture writeFrame(ChannelHandlerContext ctx, byte frameType, int streamId, - Http2Flags flags, ByteBuf payload, ChannelPromise promise) { + public Future writeFrame(ChannelHandlerContext ctx, byte frameType, int streamId, + Http2Flags flags, ByteBuf payload) { SimpleChannelPromiseAggregator promiseAggregator = - new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor()); + new SimpleChannelPromiseAggregator(ctx.newPromise(), ctx.executor()); try { verifyStreamOrConnectionId(streamId, STREAM_ID); ByteBuf buf = ctx.alloc().buffer(FRAME_HEADER_LENGTH); // Assume nothing below will throw until buf is written. That way we don't have to take care of ownership // in the catch block. writeFrameHeaderInternal(buf, payload.readableBytes(), frameType, flags, streamId); - ctx.write(buf, promiseAggregator.newPromise()); + ctx.write(buf).cascadeTo(promiseAggregator.newPromise()); } catch (Throwable t) { try { payload.release(); @@ -473,19 +474,19 @@ public ChannelFuture writeFrame(ChannelHandlerContext ctx, byte frameType, int s return promiseAggregator; } try { - ctx.write(payload, promiseAggregator.newPromise()); + ctx.write(payload).cascadeTo(promiseAggregator.newPromise()); } catch (Throwable t) { promiseAggregator.setFailure(t); } return promiseAggregator.doneAllocatingPromises(); } - private ChannelFuture writeHeadersInternal(ChannelHandlerContext ctx, + private Future writeHeadersInternal(ChannelHandlerContext ctx, int streamId, Http2Headers headers, int padding, boolean endStream, - boolean hasPriority, int streamDependency, short weight, boolean exclusive, ChannelPromise promise) { + boolean hasPriority, int streamDependency, short weight, boolean exclusive) { ByteBuf headerBlock = null; SimpleChannelPromiseAggregator promiseAggregator = - new SimpleChannelPromiseAggregator(promise, ctx.channel(), ctx.executor()); + new SimpleChannelPromiseAggregator(ctx.newPromise(), ctx.executor()); try { verifyStreamId(streamId, STREAM_ID); if (hasPriority) { @@ -520,25 +521,26 @@ private ChannelFuture writeHeadersInternal(ChannelHandlerContext ctx, // Adjust the weight so that it fits into a single byte on the wire. buf.writeByte(weight - 1); } - ctx.write(buf, promiseAggregator.newPromise()); + ctx.write(buf).cascadeTo(promiseAggregator.newPromise()); // Write the first fragment. - ctx.write(fragment, promiseAggregator.newPromise()); + ctx.write(fragment).cascadeTo(promiseAggregator.newPromise()); // Write out the padding, if any. if (paddingBytes(padding) > 0) { - ctx.write(ZERO_BUFFER.slice(0, paddingBytes(padding)), promiseAggregator.newPromise()); + ctx.write(ZERO_BUFFER.slice(0, paddingBytes(padding))) + .cascadeTo(promiseAggregator.newPromise()); } if (!flags.endOfHeaders()) { - writeContinuationFrames(ctx, streamId, headerBlock, padding, promiseAggregator); + writeContinuationFrames(ctx, streamId, headerBlock, promiseAggregator); } } catch (Http2Exception e) { promiseAggregator.setFailure(e); } catch (Throwable t) { promiseAggregator.setFailure(t); promiseAggregator.doneAllocatingPromises(); - PlatformDependent.throwException(t); + throw t; } finally { if (headerBlock != null) { headerBlock.release(); @@ -550,48 +552,34 @@ private ChannelFuture writeHeadersInternal(ChannelHandlerContext ctx, /** * Writes as many continuation frames as needed until {@code padding} and {@code headerBlock} are consumed. */ - private ChannelFuture writeContinuationFrames(ChannelHandlerContext ctx, int streamId, - ByteBuf headerBlock, int padding, SimpleChannelPromiseAggregator promiseAggregator) { - Http2Flags flags = new Http2Flags().paddingPresent(padding > 0); - int maxFragmentLength = maxFrameSize - padding; - // TODO: same padding is applied to all frames, is this desired? - if (maxFragmentLength <= 0) { - return promiseAggregator.setFailure(new IllegalArgumentException( - "Padding [" + padding + "] is too large for max frame size [" + maxFrameSize + "]")); - } + private Future writeContinuationFrames(ChannelHandlerContext ctx, int streamId, + ByteBuf headerBlock, SimpleChannelPromiseAggregator promiseAggregator) { + Http2Flags flags = new Http2Flags(); if (headerBlock.isReadable()) { // The frame header (and padding) only changes on the last frame, so allocate it once and re-use - int fragmentReadableBytes = min(headerBlock.readableBytes(), maxFragmentLength); - int payloadLength = fragmentReadableBytes + padding; + int fragmentReadableBytes = min(headerBlock.readableBytes(), maxFrameSize); ByteBuf buf = ctx.alloc().buffer(CONTINUATION_FRAME_HEADER_LENGTH); - writeFrameHeaderInternal(buf, payloadLength, CONTINUATION, flags, streamId); - writePaddingLength(buf, padding); + writeFrameHeaderInternal(buf, fragmentReadableBytes, CONTINUATION, flags, streamId); do { - fragmentReadableBytes = min(headerBlock.readableBytes(), maxFragmentLength); + fragmentReadableBytes = min(headerBlock.readableBytes(), maxFrameSize); ByteBuf fragment = headerBlock.readRetainedSlice(fragmentReadableBytes); - payloadLength = fragmentReadableBytes + padding; if (headerBlock.isReadable()) { - ctx.write(buf.retain(), promiseAggregator.newPromise()); + ctx.write(buf.retain()).cascadeTo(promiseAggregator.newPromise()); } else { // The frame header is different for the last frame, so re-allocate and release the old buffer flags = flags.endOfHeaders(true); buf.release(); buf = ctx.alloc().buffer(CONTINUATION_FRAME_HEADER_LENGTH); - writeFrameHeaderInternal(buf, payloadLength, CONTINUATION, flags, streamId); - writePaddingLength(buf, padding); - ctx.write(buf, promiseAggregator.newPromise()); + writeFrameHeaderInternal(buf, fragmentReadableBytes, CONTINUATION, flags, streamId); + ctx.write(buf).cascadeTo(promiseAggregator.newPromise()); } - ctx.write(fragment, promiseAggregator.newPromise()); + ctx.write(fragment).cascadeTo(promiseAggregator.newPromise()); - // Write out the padding, if any. - if (paddingBytes(padding) > 0) { - ctx.write(ZERO_BUFFER.slice(0, paddingBytes(padding)), promiseAggregator.newPromise()); - } - } while(headerBlock.isReadable()); + } while (headerBlock.isReadable()); } return promiseAggregator; } @@ -614,15 +602,11 @@ private static void writePaddingLength(ByteBuf buf, int padding) { } private static void verifyStreamId(int streamId, String argumentName) { - if (streamId <= 0) { - throw new IllegalArgumentException(argumentName + " must be > 0"); - } + checkPositive(streamId, argumentName); } private static void verifyStreamOrConnectionId(int streamId, String argumentName) { - if (streamId < 0) { - throw new IllegalArgumentException(argumentName + " must be >= 0"); - } + checkPositiveOrZero(streamId, argumentName); } private static void verifyWeight(short weight) { @@ -638,14 +622,6 @@ private static void verifyErrorCode(long errorCode) { } private static void verifyWindowSizeIncrement(int windowSizeIncrement) { - if (windowSizeIncrement < 0) { - throw new IllegalArgumentException("WindowSizeIncrement must be >= 0"); - } - } - - private static void verifyPingPayload(ByteBuf data) { - if (data == null || data.readableBytes() != PING_FRAME_PAYLOAD_LENGTH) { - throw new IllegalArgumentException("Opaque data must be " + PING_FRAME_PAYLOAD_LENGTH + " bytes"); - } + checkPositiveOrZero(windowSizeIncrement, "windowSizeIncrement"); } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2GoAwayFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2GoAwayFrame.java index 77207673303..8940d06fb60 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2GoAwayFrame.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2GoAwayFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,8 @@ */ package io.netty.handler.codec.http2; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; + import io.netty.buffer.ByteBuf; import io.netty.buffer.DefaultByteBufHolder; import io.netty.buffer.Unpooled; @@ -98,9 +100,7 @@ public int extraStreamIds() { @Override public Http2GoAwayFrame setExtraStreamIds(int extraStreamIds) { - if (extraStreamIds < 0) { - throw new IllegalArgumentException("extraStreamIds must be non-negative"); - } + checkPositiveOrZero(extraStreamIds, "extraStreamIds"); this.extraStreamIds = extraStreamIds; return this; } @@ -166,7 +166,7 @@ public boolean equals(Object o) { @Override public int hashCode() { int hash = super.hashCode(); - hash = hash * 31 + (int) (errorCode ^ (errorCode >>> 32)); + hash = hash * 31 + (int) (errorCode ^ errorCode >>> 32); hash = hash * 31 + extraStreamIds; return hash; } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2Headers.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2Headers.java index 0ab5705224f..4ac21bbf41f 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2Headers.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2Headers.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -31,43 +31,24 @@ @UnstableApi public class DefaultHttp2Headers extends DefaultHeaders implements Http2Headers { - private static final ByteProcessor HTTP2_NAME_VALIDATOR_PROCESSOR = new ByteProcessor() { - @Override - public boolean process(byte value) { - return !isUpperCase(value); + private static final ByteProcessor HTTP2_NAME_VALIDATOR_PROCESSOR = value -> !isUpperCase(value); + static final NameValidator HTTP2_NAME_VALIDATOR = name -> { + if (name == null || name.length() == 0) { + PlatformDependent.throwException(connectionError(PROTOCOL_ERROR, + "empty headers are not allowed [%s]", name)); } - }; - static final NameValidator HTTP2_NAME_VALIDATOR = new NameValidator() { - @Override - public void validateName(CharSequence name) { - if (name == null || name.length() == 0) { + if (name instanceof AsciiString) { + int index = ((AsciiString) name).forEachByte(HTTP2_NAME_VALIDATOR_PROCESSOR); + if (index != -1) { PlatformDependent.throwException(connectionError(PROTOCOL_ERROR, - "empty headers are not allowed [%s]", name)); + "invalid header name [%s]", name)); } - if (name instanceof AsciiString) { - final int index; - try { - index = ((AsciiString) name).forEachByte(HTTP2_NAME_VALIDATOR_PROCESSOR); - } catch (Http2Exception e) { - PlatformDependent.throwException(e); - return; - } catch (Throwable t) { - PlatformDependent.throwException(connectionError(PROTOCOL_ERROR, t, - "unexpected error. invalid header name [%s]", name)); - return; - } - - if (index != -1) { + } else { + for (int i = 0; i < name.length(); ++i) { + if (isUpperCase(name.charAt(i))) { PlatformDependent.throwException(connectionError(PROTOCOL_ERROR, "invalid header name [%s]", name)); } - } else { - for (int i = 0; i < name.length(); ++i) { - if (isUpperCase(name.charAt(i))) { - PlatformDependent.throwException(connectionError(PROTOCOL_ERROR, - "invalid header name [%s]", name)); - } - } } } }; @@ -117,7 +98,7 @@ public DefaultHttp2Headers(boolean validate, int arraySizeHint) { @Override public Http2Headers clear() { - this.firstNonPseudo = head; + firstNonPseudo = head; return super.clear(); } @@ -203,7 +184,7 @@ protected final HeaderEntry newHeaderEntry(int h, Ch } private final class Http2HeaderEntry extends HeaderEntry { - protected Http2HeaderEntry(int hash, CharSequence key, CharSequence value, + Http2HeaderEntry(int hash, CharSequence key, CharSequence value, HeaderEntry next) { super(hash, key); this.value = value; diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersDecoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersDecoder.java index 684fef03528..98cc6154af2 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersDecoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersDecoder.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -16,13 +16,13 @@ package io.netty.handler.codec.http2; import io.netty.buffer.ByteBuf; -import io.netty.util.internal.ObjectUtil; import io.netty.util.internal.UnstableApi; import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_HEADER_LIST_SIZE; -import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_INITIAL_HUFFMAN_DECODE_CAPACITY; import static io.netty.handler.codec.http2.Http2Error.COMPRESSION_ERROR; +import static io.netty.handler.codec.http2.Http2Error.INTERNAL_ERROR; import static io.netty.handler.codec.http2.Http2Exception.connectionError; +import static java.util.Objects.requireNonNull; @UnstableApi public class DefaultHttp2HeadersDecoder implements Http2HeadersDecoder, Http2HeadersDecoder.Configuration { @@ -31,6 +31,7 @@ public class DefaultHttp2HeadersDecoder implements Http2HeadersDecoder, Http2Hea private final HpackDecoder hpackDecoder; private final boolean validateHeaders; + private long maxHeaderListSizeGoAway; /** * Used to calculate an exponential moving average of header sizes to get an estimate of how large the data @@ -55,7 +56,7 @@ public DefaultHttp2HeadersDecoder(boolean validateHeaders) { * (which is dangerous). */ public DefaultHttp2HeadersDecoder(boolean validateHeaders, long maxHeaderListSize) { - this(validateHeaders, maxHeaderListSize, DEFAULT_INITIAL_HUFFMAN_DECODE_CAPACITY); + this(validateHeaders, maxHeaderListSize, /* initialHuffmanDecodeCapacity= */ -1); } /** @@ -65,11 +66,11 @@ public DefaultHttp2HeadersDecoder(boolean validateHeaders, long maxHeaderListSiz * This is because SETTINGS_MAX_HEADER_LIST_SIZE * allows a lower than advertised limit from being enforced, and the default limit is unlimited * (which is dangerous). - * @param initialHuffmanDecodeCapacity Size of an intermediate buffer used during huffman decode. + * @param initialHuffmanDecodeCapacity Does nothing, do not use. */ public DefaultHttp2HeadersDecoder(boolean validateHeaders, long maxHeaderListSize, - int initialHuffmanDecodeCapacity) { - this(validateHeaders, new HpackDecoder(maxHeaderListSize, initialHuffmanDecodeCapacity)); + @Deprecated int initialHuffmanDecodeCapacity) { + this(validateHeaders, new HpackDecoder(maxHeaderListSize)); } /** @@ -77,8 +78,10 @@ public DefaultHttp2HeadersDecoder(boolean validateHeaders, long maxHeaderListSiz * for testing but violate the RFC if used outside the scope of testing. */ DefaultHttp2HeadersDecoder(boolean validateHeaders, HpackDecoder hpackDecoder) { - this.hpackDecoder = ObjectUtil.checkNotNull(hpackDecoder, "hpackDecoder"); + this.hpackDecoder = requireNonNull(hpackDecoder, "hpackDecoder"); this.validateHeaders = validateHeaders; + maxHeaderListSizeGoAway = + Http2CodecUtil.calculateMaxHeaderListSizeGoAway(hpackDecoder.getMaxHeaderListSize()); } @Override @@ -93,7 +96,12 @@ public long maxHeaderTableSize() { @Override public void maxHeaderListSize(long max, long goAwayMax) throws Http2Exception { - hpackDecoder.setMaxHeaderListSize(max, goAwayMax); + if (goAwayMax < max || goAwayMax < 0) { + throw connectionError(INTERNAL_ERROR, "Header List Size GO_AWAY %d must be non-negative and >= %d", + goAwayMax, max); + } + hpackDecoder.setMaxHeaderListSize(max); + maxHeaderListSizeGoAway = goAwayMax; } @Override @@ -103,7 +111,7 @@ public long maxHeaderListSize() { @Override public long maxHeaderListSizeGoAway() { - return hpackDecoder.getMaxHeaderListSizeGoAway(); + return maxHeaderListSizeGoAway; } @Override diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersEncoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersEncoder.java index 5e0a9f6cf13..7e932b56308 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersEncoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersEncoder.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -21,7 +21,7 @@ import static io.netty.handler.codec.http2.Http2Error.COMPRESSION_ERROR; import static io.netty.handler.codec.http2.Http2Exception.connectionError; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; @UnstableApi public class DefaultHttp2HeadersEncoder implements Http2HeadersEncoder, Http2HeadersEncoder.Configuration { @@ -43,7 +43,13 @@ public DefaultHttp2HeadersEncoder(SensitivityDetector sensitivityDetector, boole public DefaultHttp2HeadersEncoder(SensitivityDetector sensitivityDetector, boolean ignoreMaxHeaderListSize, int dynamicTableArraySizeHint) { - this(sensitivityDetector, new HpackEncoder(ignoreMaxHeaderListSize, dynamicTableArraySizeHint)); + this(sensitivityDetector, ignoreMaxHeaderListSize, dynamicTableArraySizeHint, HpackEncoder.HUFF_CODE_THRESHOLD); + } + + public DefaultHttp2HeadersEncoder(SensitivityDetector sensitivityDetector, boolean ignoreMaxHeaderListSize, + int dynamicTableArraySizeHint, int huffCodeThreshold) { + this(sensitivityDetector, + new HpackEncoder(ignoreMaxHeaderListSize, dynamicTableArraySizeHint, huffCodeThreshold)); } /** @@ -51,8 +57,8 @@ public DefaultHttp2HeadersEncoder(SensitivityDetector sensitivityDetector, boole * for testing but violate the RFC if used outside the scope of testing. */ DefaultHttp2HeadersEncoder(SensitivityDetector sensitivityDetector, HpackEncoder hpackEncoder) { - this.sensitivityDetector = checkNotNull(sensitivityDetector, "sensitiveDetector"); - this.hpackEncoder = checkNotNull(hpackEncoder, "hpackEncoder"); + this.sensitivityDetector = requireNonNull(sensitivityDetector, "sensitiveDetector"); + this.hpackEncoder = requireNonNull(hpackEncoder, "hpackEncoder"); } @Override diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersFrame.java index 9d239e230b0..df691dcd3a3 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersFrame.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2HeadersFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -19,7 +19,7 @@ import io.netty.util.internal.UnstableApi; import static io.netty.handler.codec.http2.Http2CodecUtil.verifyPadding; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The default {@link Http2HeadersFrame} implementation. @@ -57,7 +57,7 @@ public DefaultHttp2HeadersFrame(Http2Headers headers, boolean endStream) { * 256 (inclusive). */ public DefaultHttp2HeadersFrame(Http2Headers headers, boolean endStream, int padding) { - this.headers = checkNotNull(headers, "headers"); + this.headers = requireNonNull(headers, "headers"); this.endStream = endStream; verifyPadding(padding); this.padding = padding; diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2LocalFlowController.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2LocalFlowController.java index 74dc3ae31c4..5714de66e44 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2LocalFlowController.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2LocalFlowController.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -15,6 +15,13 @@ package io.netty.handler.codec.http2; +import io.netty.buffer.ByteBuf; +import io.netty.channel.ChannelHandlerContext; +import io.netty.handler.codec.http2.Http2Exception.CompositeStreamException; +import io.netty.handler.codec.http2.Http2Exception.StreamException; +import io.netty.util.internal.PlatformDependent; +import io.netty.util.internal.UnstableApi; + import static io.netty.handler.codec.http2.Http2CodecUtil.CONNECTION_STREAM_ID; import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_WINDOW_SIZE; import static io.netty.handler.codec.http2.Http2CodecUtil.MAX_INITIAL_WINDOW_SIZE; @@ -23,15 +30,10 @@ import static io.netty.handler.codec.http2.Http2Error.INTERNAL_ERROR; import static io.netty.handler.codec.http2.Http2Exception.connectionError; import static io.netty.handler.codec.http2.Http2Exception.streamError; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; import static java.lang.Math.max; import static java.lang.Math.min; -import io.netty.buffer.ByteBuf; -import io.netty.channel.ChannelHandlerContext; -import io.netty.handler.codec.http2.Http2Exception.CompositeStreamException; -import io.netty.handler.codec.http2.Http2Exception.StreamException; -import io.netty.util.internal.PlatformDependent; -import io.netty.util.internal.UnstableApi; +import static java.util.Objects.requireNonNull; /** * Basic implementation of {@link Http2LocalFlowController}. @@ -74,7 +76,7 @@ public DefaultHttp2LocalFlowController(Http2Connection connection) { public DefaultHttp2LocalFlowController(Http2Connection connection, float windowUpdateRatio, boolean autoRefillConnectionWindow) { - this.connection = checkNotNull(connection, "connection"); + this.connection = requireNonNull(connection, "connection"); windowUpdateRatio(windowUpdateRatio); // Add a flow state for the connection. @@ -108,8 +110,11 @@ public void onStreamClosed(Http2Stream stream) { FlowState state = state(stream); int unconsumedBytes = state.unconsumedBytes(); if (ctx != null && unconsumedBytes > 0) { - connectionState().consumeBytes(unconsumedBytes); - state.consumeBytes(unconsumedBytes); + if (consumeAllBytes(state, unconsumedBytes)) { + // As the user has no real control on when this callback is used we should better + // call flush() if we produced any window update to ensure we not stale. + ctx.flush(); + } } } catch (Http2Exception e) { PlatformDependent.throwException(e); @@ -125,13 +130,13 @@ public void onStreamClosed(Http2Stream stream) { @Override public DefaultHttp2LocalFlowController frameWriter(Http2FrameWriter frameWriter) { - this.frameWriter = checkNotNull(frameWriter, "frameWriter"); + this.frameWriter = requireNonNull(frameWriter, "frameWriter"); return this; } @Override public void channelHandlerContext(ChannelHandlerContext ctx) { - this.ctx = checkNotNull(ctx, "ctx"); + this.ctx = requireNonNull(ctx, "ctx"); } @Override @@ -173,9 +178,7 @@ public void incrementWindowSize(Http2Stream stream, int delta) throws Http2Excep @Override public boolean consumeBytes(Http2Stream stream, int numBytes) throws Http2Exception { assert ctx != null && ctx.executor().inEventLoop(); - if (numBytes < 0) { - throw new IllegalArgumentException("numBytes must not be negative"); - } + checkPositiveOrZero(numBytes, "numBytes"); if (numBytes == 0) { return false; } @@ -187,13 +190,15 @@ public boolean consumeBytes(Http2Stream stream, int numBytes) throws Http2Except throw new UnsupportedOperationException("Returning bytes for the connection window is not supported"); } - boolean windowUpdateSent = connectionState().consumeBytes(numBytes); - windowUpdateSent |= state(stream).consumeBytes(numBytes); - return windowUpdateSent; + return consumeAllBytes(state(stream), numBytes); } return false; } + private boolean consumeAllBytes(FlowState state, int numBytes) throws Http2Exception { + return connectionState().consumeBytes(numBytes) | state.consumeBytes(numBytes); + } + @Override public int unconsumedBytes(Http2Stream stream) { return state(stream).unconsumedBytes(); @@ -296,7 +301,7 @@ private static boolean isClosed(Http2Stream stream) { * received. */ private final class AutoRefillState extends DefaultState { - public AutoRefillState(Http2Stream stream, int initialWindowSize) { + AutoRefillState(Http2Stream stream, int initialWindowSize) { super(stream, initialWindowSize); } @@ -349,7 +354,7 @@ private class DefaultState implements FlowState { private int lowerBound; private boolean endOfStream; - public DefaultState(Http2Stream stream, int initialWindowSize) { + DefaultState(Http2Stream stream, int initialWindowSize) { this.stream = stream; window(initialWindowSize); streamWindowUpdateRatio = windowUpdateRatio; @@ -406,7 +411,7 @@ public void incrementFlowControlWindows(int delta) throws Http2Exception { window += delta; processedWindow += delta; - lowerBound = delta < 0 ? delta : 0; + lowerBound = min(delta, 0); } @Override @@ -449,7 +454,9 @@ public int unconsumedBytes() { @Override public boolean writeWindowUpdateIfNeeded() throws Http2Exception { - if (endOfStream || initialStreamWindowSize <= 0) { + if (endOfStream || initialStreamWindowSize <= 0 || + // If the stream is already closed there is no need to try to write a window update for it. + isClosed(stream)) { return false; } @@ -476,7 +483,7 @@ private void writeWindowUpdate() throws Http2Exception { } // Send a window update for the stream/connection. - frameWriter.writeWindowUpdate(ctx, stream.id(), deltaWindowSize, ctx.newPromise()); + frameWriter.writeWindowUpdate(ctx, stream.id(), deltaWindowSize); } } @@ -579,7 +586,8 @@ private interface FlowState { * * @param numBytes the number of bytes to be returned to the flow control window. * @return true if {@code WINDOW_UPDATE} was written, false otherwise. - * @throws Http2Exception + * @throws Http2Exception If the number of bytes is too great for the current window, + * or an internal error occurs. */ boolean consumeBytes(int numBytes) throws Http2Exception; @@ -613,7 +621,7 @@ private final class WindowUpdateVisitor implements Http2StreamVisitor { private CompositeStreamException compositeException; private final int delta; - public WindowUpdateVisitor(int delta) { + WindowUpdateVisitor(int delta) { this.delta = delta; } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PingFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PingFrame.java index be5f2da02c6..f102bd1da30 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PingFrame.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PingFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -32,10 +32,7 @@ public DefaultHttp2PingFrame(long content) { this(content, false); } - /** - * A user cannot send a ping ack, as this is done automatically when a ping is received. - */ - DefaultHttp2PingFrame(long content, boolean ack) { + public DefaultHttp2PingFrame(long content, boolean ack) { this.content = content; this.ack = ack; } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PriorityFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PriorityFrame.java new file mode 100644 index 00000000000..5e073d56af6 --- /dev/null +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PriorityFrame.java @@ -0,0 +1,91 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.util.internal.UnstableApi; + +/** + * Default implementation of {@linkplain Http2PriorityFrame} + */ +@UnstableApi +public final class DefaultHttp2PriorityFrame extends AbstractHttp2StreamFrame implements Http2PriorityFrame { + + private final int streamDependency; + private final short weight; + private final boolean exclusive; + + public DefaultHttp2PriorityFrame(int streamDependency, short weight, boolean exclusive) { + this.streamDependency = streamDependency; + this.weight = weight; + this.exclusive = exclusive; + } + + @Override + public int streamDependency() { + return streamDependency; + } + + @Override + public short weight() { + return weight; + } + + @Override + public boolean exclusive() { + return exclusive; + } + + @Override + public DefaultHttp2PriorityFrame stream(Http2FrameStream stream) { + super.stream(stream); + return this; + } + + @Override + public String name() { + return "PRIORITY_FRAME"; + } + + @Override + public boolean equals(Object o) { + if (!(o instanceof DefaultHttp2PriorityFrame)) { + return false; + } + DefaultHttp2PriorityFrame other = (DefaultHttp2PriorityFrame) o; + boolean same = super.equals(other); + return same && streamDependency == other.streamDependency + && weight == other.weight && exclusive == other.exclusive; + } + + @Override + public int hashCode() { + int hash = super.hashCode(); + hash = hash * 31 + streamDependency; + hash = hash * 31 + weight; + hash = hash * 31 + (exclusive ? 1 : 0); + return hash; + } + + @Override + public String toString() { + return "DefaultHttp2PriorityFrame(" + + "stream=" + stream() + + ", streamDependency=" + streamDependency + + ", weight=" + weight + + ", exclusive=" + exclusive + + ')'; + } +} diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrame.java new file mode 100644 index 00000000000..f9fd9871093 --- /dev/null +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2PushPromiseFrame.java @@ -0,0 +1,101 @@ +/* + * Copyright 2020 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.util.internal.UnstableApi; + +/** + * Default implementation of {@link Http2PushPromiseFrame} + */ +@UnstableApi +public final class DefaultHttp2PushPromiseFrame implements Http2PushPromiseFrame { + + private Http2FrameStream pushStreamFrame; + private final Http2Headers http2Headers; + private Http2FrameStream streamFrame; + private final int padding; + private final int promisedStreamId; + + public DefaultHttp2PushPromiseFrame(Http2Headers http2Headers) { + this(http2Headers, 0); + } + + public DefaultHttp2PushPromiseFrame(Http2Headers http2Headers, int padding) { + this(http2Headers, padding, -1); + } + + DefaultHttp2PushPromiseFrame(Http2Headers http2Headers, int padding, int promisedStreamId) { + this.http2Headers = http2Headers; + this.padding = padding; + this.promisedStreamId = promisedStreamId; + } + + @Override + public Http2StreamFrame pushStream(Http2FrameStream stream) { + pushStreamFrame = stream; + return this; + } + + @Override + public Http2FrameStream pushStream() { + return pushStreamFrame; + } + + @Override + public Http2Headers http2Headers() { + return http2Headers; + } + + @Override + public int padding() { + return padding; + } + + @Override + public int promisedStreamId() { + if (pushStreamFrame != null) { + return pushStreamFrame.id(); + } else { + return promisedStreamId; + } + } + + @Override + public Http2PushPromiseFrame stream(Http2FrameStream stream) { + streamFrame = stream; + return this; + } + + @Override + public Http2FrameStream stream() { + return streamFrame; + } + + @Override + public String name() { + return "PUSH_PROMISE_FRAME"; + } + + @Override + public String toString() { + return "DefaultHttp2PushPromiseFrame{" + + "pushStreamFrame=" + pushStreamFrame + + ", http2Headers=" + http2Headers + + ", streamFrame=" + streamFrame + + ", padding=" + padding + + '}'; + } +} diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2RemoteFlowController.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2RemoteFlowController.java index 217cf8dc251..8d140978ea8 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2RemoteFlowController.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2RemoteFlowController.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -30,9 +30,10 @@ import static io.netty.handler.codec.http2.Http2Error.STREAM_CLOSED; import static io.netty.handler.codec.http2.Http2Exception.streamError; import static io.netty.handler.codec.http2.Http2Stream.State.HALF_CLOSED_LOCAL; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; import static java.lang.Math.max; import static java.lang.Math.min; +import static java.util.Objects.requireNonNull; /** * Basic implementation of {@link Http2RemoteFlowController}. @@ -69,8 +70,8 @@ public DefaultHttp2RemoteFlowController(Http2Connection connection, final Listen public DefaultHttp2RemoteFlowController(Http2Connection connection, StreamByteDistributor streamByteDistributor, final Listener listener) { - this.connection = checkNotNull(connection, "connection"); - this.streamByteDistributor = checkNotNull(streamByteDistributor, "streamWriteDistributor"); + this.connection = requireNonNull(connection, "connection"); + this.streamByteDistributor = requireNonNull(streamByteDistributor, "streamWriteDistributor"); // Add a flow state for the connection. stateKey = connection.newKey(); @@ -107,7 +108,7 @@ public void onStreamClosed(Http2Stream stream) { @Override public void onStreamHalfClosed(Http2Stream stream) { if (HALF_CLOSED_LOCAL == stream.state()) { - /** + /* * When this method is called there should not be any * pending frames left if the API is used correctly. However, * it is possible that a erroneous application can sneak @@ -131,7 +132,7 @@ public void onStreamHalfClosed(Http2Stream stream) { */ @Override public void channelHandlerContext(ChannelHandlerContext ctx) throws Http2Exception { - this.ctx = checkNotNull(ctx, "ctx"); + this.ctx = requireNonNull(ctx, "ctx"); // Writing the pending bytes will not check writability change and instead a writability change notification // to be provided by an explicit call. @@ -210,7 +211,7 @@ public void incrementWindowSize(Http2Stream stream, int delta) throws Http2Excep public void addFlowControlled(Http2Stream stream, FlowControlled frame) { // The context can be null assuming the frame will be queued and send later when the context is set. assert ctx == null || ctx.executor().inEventLoop(); - checkNotNull(frame, "frame"); + requireNonNull(frame, "frame"); try { monitor.enqueueFrame(state(stream), frame); } catch (Throwable t) { @@ -287,7 +288,7 @@ private final class FlowState implements StreamByteDistributor.StreamState { FlowState(Http2Stream stream) { this.stream = stream; - pendingWriteQueue = new ArrayDeque(2); + pendingWriteQueue = new ArrayDeque<>(2); } /** @@ -635,18 +636,13 @@ final void writePendingBytes() throws Http2Exception { } void initialWindowSize(int newWindowSize) throws Http2Exception { - if (newWindowSize < 0) { - throw new IllegalArgumentException("Invalid initial window size: " + newWindowSize); - } + checkPositiveOrZero(newWindowSize, "newWindowSize"); final int delta = newWindowSize - initialWindowSize; initialWindowSize = newWindowSize; - connection.forEachActiveStream(new Http2StreamVisitor() { - @Override - public boolean visit(Http2Stream stream) throws Http2Exception { - state(stream).incrementStreamWindow(delta); - return true; - } + connection.forEachActiveStream(stream -> { + state(stream).incrementStreamWindow(delta); + return true; }); if (delta > 0 && isChannelWritable()) { diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ResetFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ResetFrame.java index d3cdf67eb88..9ebd5e057b0 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ResetFrame.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2ResetFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -18,7 +18,7 @@ import io.netty.util.internal.StringUtil; import io.netty.util.internal.UnstableApi; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static java.util.Objects.requireNonNull; /** * The default {@link Http2ResetFrame} implementation. @@ -34,7 +34,7 @@ public final class DefaultHttp2ResetFrame extends AbstractHttp2StreamFrame imple * @param error the non-{@code null} reason for reset */ public DefaultHttp2ResetFrame(Http2Error error) { - errorCode = checkNotNull(error, "error").code(); + errorCode = requireNonNull(error, "error").code(); } /** @@ -79,7 +79,7 @@ public boolean equals(Object o) { @Override public int hashCode() { int hash = super.hashCode(); - hash = hash * 31 + (int) (errorCode ^ (errorCode >>> 32)); + hash = hash * 31 + (int) (errorCode ^ errorCode >>> 32); return hash; } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2SettingsAckFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2SettingsAckFrame.java new file mode 100644 index 00000000000..ea1c9ba9002 --- /dev/null +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2SettingsAckFrame.java @@ -0,0 +1,33 @@ +/* + * Copyright 2019 The Netty Project + * + * The Netty Project licenses this file to you under the Apache License, + * version 2.0 (the "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at: + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations + * under the License. + */ +package io.netty.handler.codec.http2; + +import io.netty.util.internal.StringUtil; + +/** + * The default {@link Http2SettingsAckFrame} implementation. + */ +final class DefaultHttp2SettingsAckFrame implements Http2SettingsAckFrame { + @Override + public String name() { + return "SETTINGS(ACK)"; + } + + @Override + public String toString() { + return StringUtil.simpleClassName(this); + } +} diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2SettingsFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2SettingsFrame.java index c60f59feec5..0064c8d4bba 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2SettingsFrame.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2SettingsFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,7 +16,8 @@ package io.netty.handler.codec.http2; -import io.netty.util.internal.ObjectUtil; +import static java.util.Objects.requireNonNull; + import io.netty.util.internal.StringUtil; import io.netty.util.internal.UnstableApi; @@ -29,7 +30,7 @@ public class DefaultHttp2SettingsFrame implements Http2SettingsFrame { private final Http2Settings settings; public DefaultHttp2SettingsFrame(Http2Settings settings) { - this.settings = ObjectUtil.checkNotNull(settings, "settings"); + this.settings = requireNonNull(settings, "settings"); } @Override @@ -42,6 +43,20 @@ public String name() { return "SETTINGS"; } + @Override + public boolean equals(Object o) { + if (!(o instanceof Http2SettingsFrame)) { + return false; + } + Http2SettingsFrame other = (Http2SettingsFrame) o; + return settings.equals(other.settings()); + } + + @Override + public int hashCode() { + return settings.hashCode(); + } + @Override public String toString() { return StringUtil.simpleClassName(this) + "(settings=" + settings + ')'; diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2UnknownFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2UnknownFrame.java index 65289d42804..66a3c6dc0c8 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2UnknownFrame.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2UnknownFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -80,7 +80,7 @@ public DefaultHttp2UnknownFrame retainedDuplicate() { @Override public DefaultHttp2UnknownFrame replace(ByteBuf content) { - return new DefaultHttp2UnknownFrame(frameType, flags, content).stream(stream()); + return new DefaultHttp2UnknownFrame(frameType, flags, content).stream(stream); } @Override @@ -97,8 +97,8 @@ public DefaultHttp2UnknownFrame retain(int increment) { @Override public String toString() { - return StringUtil.simpleClassName(this) + "(frameType=" + frameType() + ", stream=" + stream() + - ", flags=" + flags() + ", content=" + contentToString() + ')'; + return StringUtil.simpleClassName(this) + "(frameType=" + frameType + ", stream=" + stream + + ", flags=" + flags + ", content=" + contentToString() + ')'; } @Override @@ -119,18 +119,20 @@ public boolean equals(Object o) { return false; } DefaultHttp2UnknownFrame other = (DefaultHttp2UnknownFrame) o; - return super.equals(other) && flags().equals(other.flags()) - && frameType() == other.frameType() && (stream() == null && other.stream() == null) || - stream().equals(other.stream()); + Http2FrameStream otherStream = other.stream(); + return (stream == otherStream || otherStream != null && otherStream.equals(stream)) + && flags.equals(other.flags()) + && frameType == other.frameType() + && super.equals(other); } @Override public int hashCode() { int hash = super.hashCode(); - hash = hash * 31 + frameType(); - hash = hash * 31 + flags().hashCode(); - if (stream() != null) { - hash = hash * 31 + stream().hashCode(); + hash = hash * 31 + frameType; + hash = hash * 31 + flags.hashCode(); + if (stream != null) { + hash = hash * 31 + stream.hashCode(); } return hash; diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2WindowUpdateFrame.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2WindowUpdateFrame.java index aba6c880534..1c98dd471c1 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2WindowUpdateFrame.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DefaultHttp2WindowUpdateFrame.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -15,6 +15,7 @@ */ package io.netty.handler.codec.http2; +import io.netty.util.internal.StringUtil; import io.netty.util.internal.UnstableApi; /** @@ -44,4 +45,10 @@ public String name() { public int windowSizeIncrement() { return windowUpdateIncrement; } + + @Override + public String toString() { + return StringUtil.simpleClassName(this) + + "(stream=" + stream() + ", windowUpdateIncrement=" + windowUpdateIncrement + ')'; + } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java index 78ef230c62f..e38e30195a1 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express @@ -19,12 +19,15 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.channel.embedded.EmbeddedChannel; import io.netty.handler.codec.ByteToMessageDecoder; +import io.netty.handler.codec.compression.Brotli; +import io.netty.handler.codec.compression.BrotliDecoder; import io.netty.handler.codec.compression.ZlibCodecFactory; import io.netty.handler.codec.compression.ZlibWrapper; import io.netty.util.internal.UnstableApi; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_ENCODING; import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; +import static io.netty.handler.codec.http.HttpHeaderValues.BR; import static io.netty.handler.codec.http.HttpHeaderValues.DEFLATE; import static io.netty.handler.codec.http.HttpHeaderValues.GZIP; import static io.netty.handler.codec.http.HttpHeaderValues.IDENTITY; @@ -32,10 +35,11 @@ import static io.netty.handler.codec.http.HttpHeaderValues.X_GZIP; import static io.netty.handler.codec.http2.Http2Error.INTERNAL_ERROR; import static io.netty.handler.codec.http2.Http2Exception.streamError; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static io.netty.util.internal.ObjectUtil.checkPositiveOrZero; +import static java.util.Objects.requireNonNull; /** - * A HTTP2 frame listener that will decompress data frames according to the {@code content-encoding} header for each + * An HTTP2 frame listener that will decompress data frames according to the {@code content-encoding} header for each * stream. The decompression provided by this class will be applied to the data for the entire stream. */ @UnstableApi @@ -174,6 +178,10 @@ protected EmbeddedChannel newContentDecompressor(final ChannelHandlerContext ctx return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), ctx.channel().config(), ZlibCodecFactory.newZlibDecoder(wrapper)); } + if (Brotli.isAvailable() && BR.contentEqualsIgnoreCase(contentEncoding)) { + return new EmbeddedChannel(ctx.channel().id(), ctx.channel().metadata().hasDisconnect(), + ctx.channel().config(), new BrotliDecoder()); + } // 'identity' or unsupported return null; } @@ -233,7 +241,7 @@ private void initDecompressor(ChannelHandlerContext ctx, int streamId, Http2Head if (decompressor != null) { // The content length will be for the compressed data. Since we will decompress the data // this content-length will not be correct. Instead of queuing messages or delaying sending - // header frames...just remove the content-length header + // header frames just remove the content-length header. headers.remove(CONTENT_LENGTH); // The first time that we initialize a decompressor, decorate the local flow controller to @@ -286,7 +294,7 @@ private final class ConsumedBytesConverter implements Http2LocalFlowController { private final Http2LocalFlowController flowController; ConsumedBytesConverter(Http2LocalFlowController flowController) { - this.flowController = checkNotNull(flowController, "flowController"); + this.flowController = requireNonNull(flowController, "flowController"); } @Override @@ -398,9 +406,7 @@ void incrementDecompressedBytes(int delta) { * @return The number of pre-decompressed bytes that have been consumed. */ int consumeBytes(int streamId, int decompressedBytes) throws Http2Exception { - if (decompressedBytes < 0) { - throw new IllegalArgumentException("decompressedBytes must not be negative: " + decompressedBytes); - } + checkPositiveOrZero(decompressedBytes, "decompressedBytes"); if (decompressed - decompressedBytes < 0) { throw streamError(streamId, INTERNAL_ERROR, "Attempting to return too many bytes for stream %d. decompressed: %d " + diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/EmptyHttp2Headers.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/EmptyHttp2Headers.java index 2dad14e1e54..f096337de28 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/EmptyHttp2Headers.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/EmptyHttp2Headers.java @@ -5,7 +5,7 @@ * "License"); you may not use this file except in compliance with the License. You may obtain a * copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackDecoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackDecoder.java index 67d6aa9944b..497ee0d6fed 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackDecoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -42,35 +42,41 @@ import static io.netty.handler.codec.http2.Http2CodecUtil.MIN_HEADER_TABLE_SIZE; import static io.netty.handler.codec.http2.Http2CodecUtil.headerListSizeExceeded; import static io.netty.handler.codec.http2.Http2Error.COMPRESSION_ERROR; -import static io.netty.handler.codec.http2.Http2Error.INTERNAL_ERROR; import static io.netty.handler.codec.http2.Http2Error.PROTOCOL_ERROR; import static io.netty.handler.codec.http2.Http2Exception.connectionError; +import static io.netty.handler.codec.http2.Http2Exception.streamError; import static io.netty.handler.codec.http2.Http2Headers.PseudoHeaderName.getPseudoHeader; import static io.netty.handler.codec.http2.Http2Headers.PseudoHeaderName.hasPseudoHeaderFormat; import static io.netty.util.AsciiString.EMPTY_STRING; import static io.netty.util.internal.ObjectUtil.checkPositive; -import static io.netty.util.internal.ThrowableUtil.unknownStackTrace; final class HpackDecoder { - private static final Http2Exception DECODE_ULE_128_DECOMPRESSION_EXCEPTION = unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - decompression failure"), HpackDecoder.class, + private static final Http2Exception DECODE_ULE_128_DECOMPRESSION_EXCEPTION = + Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - decompression failure", + Http2Exception.ShutdownHint.HARD_SHUTDOWN, HpackDecoder.class, "decodeULE128(..)"); - private static final Http2Exception DECODE_ULE_128_TO_LONG_DECOMPRESSION_EXCEPTION = unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - long overflow"), HpackDecoder.class, "decodeULE128(..)"); - private static final Http2Exception DECODE_ULE_128_TO_INT_DECOMPRESSION_EXCEPTION = unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - int overflow"), HpackDecoder.class, "decodeULE128ToInt(..)"); - private static final Http2Exception DECODE_ILLEGAL_INDEX_VALUE = unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - illegal index value"), HpackDecoder.class, "decode(..)"); - private static final Http2Exception INDEX_HEADER_ILLEGAL_INDEX_VALUE = unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - illegal index value"), HpackDecoder.class, "indexHeader(..)"); - private static final Http2Exception READ_NAME_ILLEGAL_INDEX_VALUE = unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - illegal index value"), HpackDecoder.class, "readName(..)"); - private static final Http2Exception INVALID_MAX_DYNAMIC_TABLE_SIZE = unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - invalid max dynamic table size"), HpackDecoder.class, + private static final Http2Exception DECODE_ULE_128_TO_LONG_DECOMPRESSION_EXCEPTION = + Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - long overflow", + Http2Exception.ShutdownHint.HARD_SHUTDOWN, HpackDecoder.class, "decodeULE128(..)"); + private static final Http2Exception DECODE_ULE_128_TO_INT_DECOMPRESSION_EXCEPTION = + Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - int overflow", + Http2Exception.ShutdownHint.HARD_SHUTDOWN, HpackDecoder.class, "decodeULE128ToInt(..)"); + private static final Http2Exception DECODE_ILLEGAL_INDEX_VALUE = + Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - illegal index value", + Http2Exception.ShutdownHint.HARD_SHUTDOWN, HpackDecoder.class, "decode(..)"); + private static final Http2Exception INDEX_HEADER_ILLEGAL_INDEX_VALUE = + Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - illegal index value", + Http2Exception.ShutdownHint.HARD_SHUTDOWN, HpackDecoder.class, "indexHeader(..)"); + private static final Http2Exception READ_NAME_ILLEGAL_INDEX_VALUE = + Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - illegal index value", + Http2Exception.ShutdownHint.HARD_SHUTDOWN, HpackDecoder.class, "readName(..)"); + private static final Http2Exception INVALID_MAX_DYNAMIC_TABLE_SIZE = + Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - invalid max dynamic table size", + Http2Exception.ShutdownHint.HARD_SHUTDOWN, HpackDecoder.class, "setDynamicTableSize(..)"); - private static final Http2Exception MAX_DYNAMIC_TABLE_SIZE_CHANGE_REQUIRED = unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - max dynamic table size change required"), HpackDecoder.class, - "decode(..)"); + private static final Http2Exception MAX_DYNAMIC_TABLE_SIZE_CHANGE_REQUIRED = + Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - max dynamic table size change required", + Http2Exception.ShutdownHint.HARD_SHUTDOWN, HpackDecoder.class, "decode(..)"); private static final byte READ_HEADER_REPRESENTATION = 0; private static final byte READ_MAX_DYNAMIC_TABLE_SIZE = 1; private static final byte READ_INDEXED_HEADER = 2; @@ -82,9 +88,8 @@ final class HpackDecoder { private static final byte READ_LITERAL_HEADER_VALUE_LENGTH = 8; private static final byte READ_LITERAL_HEADER_VALUE = 9; + private final HpackHuffmanDecoder huffmanDecoder = new HpackHuffmanDecoder(); private final HpackDynamicTable hpackDynamicTable; - private final HpackHuffmanDecoder hpackHuffmanDecoder; - private long maxHeaderListSizeGoAway; private long maxHeaderListSize; private long maxDynamicTableSize; private long encoderMaxDynamicTableSize; @@ -96,24 +101,21 @@ final class HpackDecoder { * This is because SETTINGS_MAX_HEADER_LIST_SIZE * allows a lower than advertised limit from being enforced, and the default limit is unlimited * (which is dangerous). - * @param initialHuffmanDecodeCapacity Size of an intermediate buffer used during huffman decode. */ - HpackDecoder(long maxHeaderListSize, int initialHuffmanDecodeCapacity) { - this(maxHeaderListSize, initialHuffmanDecodeCapacity, DEFAULT_HEADER_TABLE_SIZE); + HpackDecoder(long maxHeaderListSize) { + this(maxHeaderListSize, DEFAULT_HEADER_TABLE_SIZE); } /** * Exposed Used for testing only! Default values used in the initial settings frame are overridden intentionally * for testing but violate the RFC if used outside the scope of testing. */ - HpackDecoder(long maxHeaderListSize, int initialHuffmanDecodeCapacity, int maxHeaderTableSize) { + HpackDecoder(long maxHeaderListSize, int maxHeaderTableSize) { this.maxHeaderListSize = checkPositive(maxHeaderListSize, "maxHeaderListSize"); - this.maxHeaderListSizeGoAway = Http2CodecUtil.calculateMaxHeaderListSizeGoAway(maxHeaderListSize); maxDynamicTableSize = encoderMaxDynamicTableSize = maxHeaderTableSize; maxDynamicTableSizeChangeRequired = false; hpackDynamicTable = new HpackDynamicTable(maxHeaderTableSize); - hpackHuffmanDecoder = new HpackHuffmanDecoder(initialHuffmanDecodeCapacity); } /** @@ -122,14 +124,21 @@ final class HpackDecoder { * This method assumes the entire header block is contained in {@code in}. */ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean validateHeaders) throws Http2Exception { + Http2HeadersSink sink = new Http2HeadersSink(streamId, headers, maxHeaderListSize, validateHeaders); + decode(in, sink); + + // Now that we've read all of our headers we can perform the validation steps. We must + // delay throwing until this point to prevent dynamic table corruption. + sink.finish(); + } + + private void decode(ByteBuf in, Sink sink) throws Http2Exception { int index = 0; - long headersLength = 0; int nameLength = 0; int valueLength = 0; byte state = READ_HEADER_REPRESENTATION; boolean huffmanEncoded = false; CharSequence name = null; - HeaderType headerType = null; IndexType indexType = IndexType.NONE; while (in.isReadable()) { switch (state) { @@ -150,9 +159,7 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid break; default: HpackHeaderField indexedHeader = getIndexedHeader(index); - headerType = validate(indexedHeader.name, headerType, validateHeaders); - headersLength = addHeader(headers, indexedHeader.name, indexedHeader.value, - headersLength); + sink.appendToHeaderList(indexedHeader.name, indexedHeader.value); } } else if ((b & 0x40) == 0x40) { // Literal Header Field with Incremental Indexing @@ -168,7 +175,6 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid default: // Index was stored as the prefix name = readName(index); - headerType = validate(name, headerType, validateHeaders); nameLength = name.length(); state = READ_LITERAL_HEADER_VALUE_LENGTH_PREFIX; } @@ -183,7 +189,7 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid } } else { // Literal Header Field without Indexing / never Indexed - indexType = ((b & 0x10) == 0x10) ? IndexType.NEVER : IndexType.NONE; + indexType = (b & 0x10) == 0x10 ? IndexType.NEVER : IndexType.NONE; index = b & 0x0F; switch (index) { case 0: @@ -193,11 +199,10 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid state = READ_INDEXED_HEADER_NAME; break; default: - // Index was stored as the prefix - name = readName(index); - headerType = validate(name, headerType, validateHeaders); - nameLength = name.length(); - state = READ_LITERAL_HEADER_VALUE_LENGTH_PREFIX; + // Index was stored as the prefix + name = readName(index); + nameLength = name.length(); + state = READ_LITERAL_HEADER_VALUE_LENGTH_PREFIX; } } break; @@ -209,15 +214,13 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid case READ_INDEXED_HEADER: HpackHeaderField indexedHeader = getIndexedHeader(decodeULE128(in, index)); - headerType = validate(indexedHeader.name, headerType, validateHeaders); - headersLength = addHeader(headers, indexedHeader.name, indexedHeader.value, headersLength); + sink.appendToHeaderList(indexedHeader.name, indexedHeader.value); state = READ_HEADER_REPRESENTATION; break; case READ_INDEXED_HEADER_NAME: // Header Name matches an entry in the Header Table name = readName(decodeULE128(in, index)); - headerType = validate(name, headerType, validateHeaders); nameLength = name.length(); state = READ_LITERAL_HEADER_VALUE_LENGTH_PREFIX; break; @@ -229,9 +232,6 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid if (index == 0x7f) { state = READ_LITERAL_HEADER_NAME_LENGTH; } else { - if (index > maxHeaderListSizeGoAway - headersLength) { - headerListSizeExceeded(maxHeaderListSizeGoAway); - } nameLength = index; state = READ_LITERAL_HEADER_NAME; } @@ -241,9 +241,6 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid // Header Name is a Literal String nameLength = decodeULE128(in, index); - if (nameLength > maxHeaderListSizeGoAway - headersLength) { - headerListSizeExceeded(maxHeaderListSizeGoAway); - } state = READ_LITERAL_HEADER_NAME; break; @@ -254,7 +251,6 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid } name = readStringLiteral(in, nameLength, huffmanEncoded); - headerType = validate(name, headerType, validateHeaders); state = READ_LITERAL_HEADER_VALUE_LENGTH_PREFIX; break; @@ -268,15 +264,10 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid state = READ_LITERAL_HEADER_VALUE_LENGTH; break; case 0: - headerType = validate(name, headerType, validateHeaders); - headersLength = insertHeader(headers, name, EMPTY_STRING, indexType, headersLength); + insertHeader(sink, name, EMPTY_STRING, indexType); state = READ_HEADER_REPRESENTATION; break; default: - // Check new header size against max header size - if ((long) index + nameLength > maxHeaderListSizeGoAway - headersLength) { - headerListSizeExceeded(maxHeaderListSizeGoAway); - } valueLength = index; state = READ_LITERAL_HEADER_VALUE; } @@ -287,10 +278,6 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid // Header Value is a Literal String valueLength = decodeULE128(in, index); - // Check new header size against max header size - if ((long) valueLength + nameLength > maxHeaderListSizeGoAway - headersLength) { - headerListSizeExceeded(maxHeaderListSizeGoAway); - } state = READ_LITERAL_HEADER_VALUE; break; @@ -301,8 +288,7 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid } CharSequence value = readStringLiteral(in, valueLength, huffmanEncoded); - headerType = validate(name, headerType, validateHeaders); - headersLength = insertHeader(headers, name, value, indexType, headersLength); + insertHeader(sink, name, value, indexType); state = READ_HEADER_REPRESENTATION; break; @@ -311,13 +297,6 @@ public void decode(int streamId, ByteBuf in, Http2Headers headers, boolean valid } } - // we have read all of our headers, and not exceeded maxHeaderListSizeGoAway see if we have - // exceeded our actual maxHeaderListSize. This must be done here to prevent dynamic table - // corruption - if (headersLength > maxHeaderListSize) { - headerListSizeExceeded(streamId, maxHeaderListSize, true); - } - if (state != READ_HEADER_REPRESENTATION) { throw connectionError(COMPRESSION_ERROR, "Incomplete header block fragment."); } @@ -341,27 +320,27 @@ public void setMaxHeaderTableSize(long maxHeaderTableSize) throws Http2Exception } } + /** + * @deprecated use {@link #setMaxHeaderListSize(long)}; {@code maxHeaderListSizeGoAway} is + * ignored + */ + @Deprecated public void setMaxHeaderListSize(long maxHeaderListSize, long maxHeaderListSizeGoAway) throws Http2Exception { - if (maxHeaderListSizeGoAway < maxHeaderListSize || maxHeaderListSizeGoAway < 0) { - throw connectionError(INTERNAL_ERROR, "Header List Size GO_AWAY %d must be positive and >= %d", - maxHeaderListSizeGoAway, maxHeaderListSize); - } + setMaxHeaderListSize(maxHeaderListSize); + } + + public void setMaxHeaderListSize(long maxHeaderListSize) throws Http2Exception { if (maxHeaderListSize < MIN_HEADER_LIST_SIZE || maxHeaderListSize > MAX_HEADER_LIST_SIZE) { throw connectionError(PROTOCOL_ERROR, "Header List Size must be >= %d and <= %d but was %d", MIN_HEADER_TABLE_SIZE, MAX_HEADER_TABLE_SIZE, maxHeaderListSize); } this.maxHeaderListSize = maxHeaderListSize; - this.maxHeaderListSizeGoAway = maxHeaderListSizeGoAway; } public long getMaxHeaderListSize() { return maxHeaderListSize; } - public long getMaxHeaderListSizeGoAway() { - return maxHeaderListSizeGoAway; - } - /** * Return the maximum table size. This is the maximum size allowed by both the encoder and the * decoder. @@ -400,26 +379,23 @@ private void setDynamicTableSize(long dynamicTableSize) throws Http2Exception { hpackDynamicTable.setCapacity(dynamicTableSize); } - private HeaderType validate(CharSequence name, HeaderType previousHeaderType, - final boolean validateHeaders) throws Http2Exception { - if (!validateHeaders) { - return null; - } - + private static HeaderType validate(int streamId, CharSequence name, + HeaderType previousHeaderType) throws Http2Exception { if (hasPseudoHeaderFormat(name)) { if (previousHeaderType == HeaderType.REGULAR_HEADER) { - throw connectionError(PROTOCOL_ERROR, "Pseudo-header field '%s' found after regular header.", name); + throw streamError(streamId, PROTOCOL_ERROR, + "Pseudo-header field '%s' found after regular header.", name); } final Http2Headers.PseudoHeaderName pseudoHeader = getPseudoHeader(name); if (pseudoHeader == null) { - throw connectionError(PROTOCOL_ERROR, "Invalid HTTP/2 pseudo-header '%s' encountered.", name); + throw streamError(streamId, PROTOCOL_ERROR, "Invalid HTTP/2 pseudo-header '%s' encountered.", name); } final HeaderType currentHeaderType = pseudoHeader.isRequestOnly() ? HeaderType.REQUEST_PSEUDO_HEADER : HeaderType.RESPONSE_PSEUDO_HEADER; if (previousHeaderType != null && currentHeaderType != previousHeaderType) { - throw connectionError(PROTOCOL_ERROR, "Mix of request and response pseudo-headers."); + throw streamError(streamId, PROTOCOL_ERROR, "Mix of request and response pseudo-headers."); } return currentHeaderType; @@ -450,9 +426,8 @@ private HpackHeaderField getIndexedHeader(int index) throws Http2Exception { throw INDEX_HEADER_ILLEGAL_INDEX_VALUE; } - private long insertHeader(Http2Headers headers, CharSequence name, CharSequence value, - IndexType indexType, long headerSize) throws Http2Exception { - headerSize = addHeader(headers, name, value, headerSize); + private void insertHeader(Sink sink, CharSequence name, CharSequence value, IndexType indexType) { + sink.appendToHeaderList(name, value); switch (indexType) { case NONE: @@ -466,23 +441,11 @@ private long insertHeader(Http2Headers headers, CharSequence name, CharSequence default: throw new Error("should not reach here"); } - - return headerSize; - } - - private long addHeader(Http2Headers headers, CharSequence name, CharSequence value, long headersLength) - throws Http2Exception { - headersLength += HpackHeaderField.sizeOf(name, value); - if (headersLength > maxHeaderListSizeGoAway) { - headerListSizeExceeded(maxHeaderListSizeGoAway); - } - headers.add(name, value); - return headersLength; } private CharSequence readStringLiteral(ByteBuf in, int length, boolean huffmanEncoded) throws Http2Exception { if (huffmanEncoded) { - return hpackHuffmanDecoder.decode(in, length); + return huffmanDecoder.decode(in, length); } byte[] buf = new byte[length]; in.readBytes(buf); @@ -553,4 +516,58 @@ private enum HeaderType { REQUEST_PSEUDO_HEADER, RESPONSE_PSEUDO_HEADER } + + private interface Sink { + void appendToHeaderList(CharSequence name, CharSequence value); + void finish() throws Http2Exception; + } + + private static final class Http2HeadersSink implements Sink { + private final Http2Headers headers; + private final long maxHeaderListSize; + private final int streamId; + private final boolean validate; + private long headersLength; + private boolean exceededMaxLength; + private HeaderType previousType; + private Http2Exception validationException; + + Http2HeadersSink(int streamId, Http2Headers headers, long maxHeaderListSize, boolean validate) { + this.headers = headers; + this.maxHeaderListSize = maxHeaderListSize; + this.streamId = streamId; + this.validate = validate; + } + + @Override + public void finish() throws Http2Exception { + if (exceededMaxLength) { + headerListSizeExceeded(streamId, maxHeaderListSize, true); + } else if (validationException != null) { + throw validationException; + } + } + + @Override + public void appendToHeaderList(CharSequence name, CharSequence value) { + headersLength += HpackHeaderField.sizeOf(name, value); + exceededMaxLength |= headersLength > maxHeaderListSize; + + if (exceededMaxLength || validationException != null) { + // We don't store the header since we've already failed validation requirements. + return; + } + + if (validate) { + try { + previousType = validate(streamId, name, previousType); + } catch (Http2Exception ex) { + validationException = ex; + return; + } + } + + headers.add(name, value); + } + } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackDynamicTable.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackDynamicTable.java index 6fda10d47ee..4a712eed257 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackDynamicTable.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackDynamicTable.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -83,7 +83,7 @@ public long capacity() { */ public HpackHeaderField getEntry(int index) { if (index <= 0 || index > length()) { - throw new IndexOutOfBoundsException(); + throw new IndexOutOfBoundsException("Index " + index + " out of bounds for length " + length()); } int i = head - index; if (i < 0) { @@ -109,7 +109,7 @@ public void add(HpackHeaderField header) { remove(); } hpackHeaderFields[head++] = header; - size += header.size(); + size += headerSize; if (head == hpackHeaderFields.length) { head = 0; } @@ -183,12 +183,14 @@ public void setCapacity(long capacity) { // initially length will be 0 so there will be no copy int len = length(); - int cursor = tail; - for (int i = 0; i < len; i++) { - HpackHeaderField entry = hpackHeaderFields[cursor++]; - tmp[i] = entry; - if (cursor == hpackHeaderFields.length) { - cursor = 0; + if (hpackHeaderFields != null) { + int cursor = tail; + for (int i = 0; i < len; i++) { + HpackHeaderField entry = hpackHeaderFields[cursor++]; + tmp[i] = entry; + if (cursor == hpackHeaderFields.length) { + cursor = 0; + } } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackEncoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackEncoder.java index b9a7703042f..6df151b1e29 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackEncoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -41,7 +41,7 @@ import java.util.Map; import static io.netty.handler.codec.http2.HpackUtil.equalsConstantTime; -import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_HEADER_LIST_SIZE; +import static io.netty.handler.codec.http2.HpackUtil.equalsVariableTime; import static io.netty.handler.codec.http2.Http2CodecUtil.DEFAULT_HEADER_TABLE_SIZE; import static io.netty.handler.codec.http2.Http2CodecUtil.MAX_HEADER_LIST_SIZE; import static io.netty.handler.codec.http2.Http2CodecUtil.MAX_HEADER_TABLE_SIZE; @@ -54,7 +54,16 @@ import static java.lang.Math.max; import static java.lang.Math.min; +/** + * An HPACK encoder. + * + *

    Implementation note: This class is security sensitive, and depends on users correctly identifying their headers + * as security sensitive or not. If a header is considered not sensitive, methods names "insensitive" are used which + * are fast, but don't provide any security guarantees. + */ final class HpackEncoder { + static final int NOT_FOUND = -1; + static final int HUFF_CODE_THRESHOLD = 512; // a linked hash map of header fields private final HeaderEntry[] headerFields; private final HeaderEntry head = new HeaderEntry(-1, AsciiString.EMPTY_STRING, @@ -62,6 +71,7 @@ final class HpackEncoder { private final HpackHuffmanEncoder hpackHuffmanEncoder = new HpackHuffmanEncoder(); private final byte hashMask; private final boolean ignoreMaxHeaderListSize; + private final int huffCodeThreshold; private long size; private long maxHeaderTableSize; private long maxHeaderListSize; @@ -76,22 +86,23 @@ final class HpackEncoder { /** * Creates a new encoder. */ - public HpackEncoder(boolean ignoreMaxHeaderListSize) { - this(ignoreMaxHeaderListSize, 16); + HpackEncoder(boolean ignoreMaxHeaderListSize) { + this(ignoreMaxHeaderListSize, 16, HUFF_CODE_THRESHOLD); } /** * Creates a new encoder. */ - public HpackEncoder(boolean ignoreMaxHeaderListSize, int arraySizeHint) { + HpackEncoder(boolean ignoreMaxHeaderListSize, int arraySizeHint, int huffCodeThreshold) { this.ignoreMaxHeaderListSize = ignoreMaxHeaderListSize; maxHeaderTableSize = DEFAULT_HEADER_TABLE_SIZE; - maxHeaderListSize = DEFAULT_HEADER_LIST_SIZE; + maxHeaderListSize = MAX_HEADER_LIST_SIZE; // Enforce a bound of [2, 128] because hashMask is a byte. The max possible value of hashMask is one less // than the length of this array, and we want the mask to be > 0. headerFields = new HeaderEntry[findNextPositivePowerOfTwo(max(2, min(arraySizeHint, 128)))]; hashMask = (byte) (headerFields.length - 1); head.before = head.after = head; + this.huffCodeThreshold = huffCodeThreshold; } /** @@ -151,8 +162,8 @@ private void encodeHeader(ByteBuf out, CharSequence name, CharSequence value, bo // If the peer will only use the static table if (maxHeaderTableSize == 0) { - int staticTableIndex = HpackStaticTable.getIndex(name, value); - if (staticTableIndex == -1) { + int staticTableIndex = HpackStaticTable.getIndexInsensitive(name, value); + if (staticTableIndex == HpackStaticTable.NOT_FOUND) { int nameIndex = HpackStaticTable.getIndex(name); encodeLiteral(out, name, value, IndexType.NONE, nameIndex); } else { @@ -168,14 +179,14 @@ private void encodeHeader(ByteBuf out, CharSequence name, CharSequence value, bo return; } - HeaderEntry headerField = getEntry(name, value); + HeaderEntry headerField = getEntryInsensitive(name, value); if (headerField != null) { int index = getIndex(headerField.index) + HpackStaticTable.length; // Section 6.1. Indexed Header Field Representation encodeInteger(out, 0x80, 7, index); } else { - int staticTableIndex = HpackStaticTable.getIndex(name, value); - if (staticTableIndex != -1) { + int staticTableIndex = HpackStaticTable.getIndexInsensitive(name, value); + if (staticTableIndex != HpackStaticTable.NOT_FOUND) { // Section 6.1. Indexed Header Field Representation encodeInteger(out, 0x80, 7, staticTableIndex); } else { @@ -234,14 +245,14 @@ private static void encodeInteger(ByteBuf out, int mask, int n, int i) { */ private static void encodeInteger(ByteBuf out, int mask, int n, long i) { assert n >= 0 && n <= 8 : "N: " + n; - int nbits = 0xFF >>> (8 - n); + int nbits = 0xFF >>> 8 - n; if (i < nbits) { out.writeByte((int) (mask | i)); } else { out.writeByte(mask | nbits); long length = i - nbits; for (; (length & ~0x7F) != 0; length >>>= 7) { - out.writeByte((int) ((length & 0x7F) | 0x80)); + out.writeByte((int) (length & 0x7F | 0x80)); } out.writeByte((int) length); } @@ -251,8 +262,9 @@ private static void encodeInteger(ByteBuf out, int mask, int n, long i) { * Encode string literal according to Section 5.2. */ private void encodeStringLiteral(ByteBuf out, CharSequence string) { - int huffmanLength = hpackHuffmanEncoder.getEncodedLength(string); - if (huffmanLength < string.length()) { + int huffmanLength; + if (string.length() >= huffCodeThreshold + && (huffmanLength = hpackHuffmanEncoder.getEncodedLength(string)) < string.length()) { encodeInteger(out, 0x80, 7, huffmanLength); hpackHuffmanEncoder.encode(out, string); } else { @@ -274,7 +286,7 @@ private void encodeStringLiteral(ByteBuf out, CharSequence string) { */ private void encodeLiteral(ByteBuf out, CharSequence name, CharSequence value, IndexType indexType, int nameIndex) { - boolean nameIndexValid = nameIndex != -1; + boolean nameIndexValid = nameIndex != NOT_FOUND; switch (indexType) { case INCREMENTAL: encodeInteger(out, 0x40, 6, nameIndexValid ? nameIndex : 0); @@ -296,7 +308,7 @@ private void encodeLiteral(ByteBuf out, CharSequence name, CharSequence value, I private int getNameIndex(CharSequence name) { int index = HpackStaticTable.getIndex(name); - if (index == -1) { + if (index == HpackStaticTable.NOT_FOUND) { index = getIndex(name); if (index >= 0) { index += HpackStaticTable.length; @@ -348,15 +360,16 @@ HpackHeaderField getHeaderField(int index) { * Returns the header entry with the lowest index value for the header field. Returns null if * header field is not in the dynamic table. */ - private HeaderEntry getEntry(CharSequence name, CharSequence value) { + private HeaderEntry getEntryInsensitive(CharSequence name, CharSequence value) { if (length() == 0 || name == null || value == null) { return null; } int h = AsciiString.hashCode(name); int i = index(h); for (HeaderEntry e = headerFields[i]; e != null; e = e.next) { - // To avoid short circuit behavior a bitwise operator is used instead of a boolean operator. - if (e.hash == h && (equalsConstantTime(name, e.name) & equalsConstantTime(value, e.value)) != 0) { + // Check the value before then name, as it is more likely the value will be different incase there is no + // match. + if (e.hash == h && equalsVariableTime(value, e.value) && equalsVariableTime(name, e.name)) { return e; } } @@ -369,7 +382,7 @@ private HeaderEntry getEntry(CharSequence name, CharSequence value) { */ private int getIndex(CharSequence name) { if (length() == 0 || name == null) { - return -1; + return NOT_FOUND; } int h = AsciiString.hashCode(name); int i = index(h); @@ -378,14 +391,14 @@ private int getIndex(CharSequence name) { return getIndex(e.index); } } - return -1; + return NOT_FOUND; } /** * Compute the index into the dynamic table given the index in the header entry. */ private int getIndex(int index) { - return index == -1 ? -1 : index - head.before.index + 1; + return index == NOT_FOUND ? NOT_FOUND : index - head.before.index + 1; } /** diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHeaderField.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHeaderField.java index 0b0d6468956..2e030078a24 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHeaderField.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHeaderField.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -31,7 +31,8 @@ */ package io.netty.handler.codec.http2; -import static io.netty.util.internal.ObjectUtil.checkNotNull; +import static io.netty.handler.codec.http2.HpackUtil.equalsVariableTime; +import static java.util.Objects.requireNonNull; class HpackHeaderField { @@ -49,31 +50,16 @@ static long sizeOf(CharSequence name, CharSequence value) { // This constructor can only be used if name and value are ISO-8859-1 encoded. HpackHeaderField(CharSequence name, CharSequence value) { - this.name = checkNotNull(name, "name"); - this.value = checkNotNull(value, "value"); + this.name = requireNonNull(name, "name"); + this.value = requireNonNull(value, "value"); } final int size() { return name.length() + value.length() + HEADER_ENTRY_OVERHEAD; } - @Override - public final int hashCode() { - // TODO(nmittler): Netty's build rules require this. Probably need a better implementation. - return super.hashCode(); - } - - @Override - public final boolean equals(Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof HpackHeaderField)) { - return false; - } - HpackHeaderField other = (HpackHeaderField) obj; - // To avoid short circuit behavior a bitwise operator is used instead of a boolean operator. - return (HpackUtil.equalsConstantTime(name, other.name) & HpackUtil.equalsConstantTime(value, other.value)) != 0; + public final boolean equalsForTest(HpackHeaderField other) { + return equalsVariableTime(name, other.name) && equalsVariableTime(value, other.value); } @Override diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHuffmanDecoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHuffmanDecoder.java index 9549c66ba00..eac86e22824 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHuffmanDecoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHuffmanDecoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,212 +34,4703 @@ import io.netty.buffer.ByteBuf; import io.netty.util.AsciiString; import io.netty.util.ByteProcessor; -import io.netty.util.internal.ObjectUtil; -import io.netty.util.internal.ThrowableUtil; import static io.netty.handler.codec.http2.Http2Error.COMPRESSION_ERROR; -import static io.netty.handler.codec.http2.Http2Exception.connectionError; -final class HpackHuffmanDecoder { +final class HpackHuffmanDecoder implements ByteProcessor { - private static final Http2Exception EOS_DECODED = ThrowableUtil.unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - EOS Decoded"), HpackHuffmanDecoder.class, "decode(..)"); - private static final Http2Exception INVALID_PADDING = ThrowableUtil.unknownStackTrace( - connectionError(COMPRESSION_ERROR, "HPACK - Invalid Padding"), HpackHuffmanDecoder.class, "decode(..)"); + /* Scroll to the bottom! */ - private static final Node ROOT = buildTree(HpackUtil.HUFFMAN_CODES, HpackUtil.HUFFMAN_CODE_LENGTHS); + private static final byte HUFFMAN_COMPLETE = 1; + private static final byte HUFFMAN_EMIT_SYMBOL = 1 << 1; + private static final byte HUFFMAN_FAIL = 1 << 2; - private final DecoderProcessor processor; - - HpackHuffmanDecoder(int initialCapacity) { - processor = new DecoderProcessor(initialCapacity); - } + private static final int HUFFMAN_COMPLETE_SHIFT = HUFFMAN_COMPLETE << 8; + private static final int HUFFMAN_EMIT_SYMBOL_SHIFT = HUFFMAN_EMIT_SYMBOL << 8; + private static final int HUFFMAN_FAIL_SHIFT = HUFFMAN_FAIL << 8; /** - * Decompresses the given Huffman coded string literal. - * - * @param buf the string literal to be decoded - * @return the output stream for the compressed data - * @throws Http2Exception EOS Decoded + * A table of byte tuples (state, flags, output). They are packed together as: + *

    + * state<<16 + flags<<8 + output */ - public AsciiString decode(ByteBuf buf, int length) throws Http2Exception { - processor.reset(); - buf.forEachByte(buf.readerIndex(), length, processor); - buf.skipBytes(length); - return processor.end(); - } + private static final int[] HUFFS = new int[] { + // Node 0 (Root Node, never emits symbols.) + 4 << 16, + 5 << 16, + 7 << 16, + 8 << 16, + 11 << 16, + 12 << 16, + 16 << 16, + 19 << 16, + 25 << 16, + 28 << 16, + 32 << 16, + 35 << 16, + 42 << 16, + 49 << 16, + 57 << 16, + (64 << 16) + (HUFFMAN_COMPLETE << 8), - private static final class Node { + // Node 1 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 48, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 49, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 50, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 97, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 99, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 101, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 105, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 111, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 115, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 116, + 13 << 16, + 14 << 16, + 17 << 16, + 18 << 16, + 20 << 16, + 21 << 16, - private final int symbol; // terminal nodes have a symbol - private final int bits; // number of bits matched by the node - private final Node[] children; // internal nodes have children + // Node 2 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 48, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 49, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 50, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 97, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 99, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 101, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 105, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 111, - /** - * Construct an internal node - */ - Node() { - symbol = 0; - bits = 8; - children = new Node[256]; - } + // Node 3 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 48, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 49, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 50, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 97, - /** - * Construct a terminal node - * - * @param symbol the symbol the node represents - * @param bits the number of bits matched by this node - */ - Node(int symbol, int bits) { - assert bits > 0 && bits <= 8; - this.symbol = symbol; - this.bits = bits; - children = null; - } + // Node 4 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 48, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 48, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 49, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 49, - private boolean isTerminal() { - return children == null; - } - } + // Node 5 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 50, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 50, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 97, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 97, - private static Node buildTree(int[] codes, byte[] lengths) { - Node root = new Node(); - for (int i = 0; i < codes.length; i++) { - insert(root, i, codes[i], lengths[i]); - } - return root; - } + // Node 6 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 99, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 101, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 105, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 111, - private static void insert(Node root, int symbol, int code, byte length) { - // traverse tree using the most significant bytes of code - Node current = root; - while (length > 8) { - if (current.isTerminal()) { - throw new IllegalStateException("invalid Huffman code: prefix not unique"); - } - length -= 8; - int i = (code >>> length) & 0xFF; - if (current.children[i] == null) { - current.children[i] = new Node(); - } - current = current.children[i]; - } + // Node 7 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 99, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 99, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 101, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 101, - Node terminal = new Node(symbol, length); - int shift = 8 - length; - int start = (code << shift) & 0xFF; - int end = 1 << shift; - for (int i = start; i < start + end; i++) { - current.children[i] = terminal; - } - } + // Node 8 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 105, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 105, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 111, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 111, - private static final class DecoderProcessor implements ByteProcessor { - private final int initialCapacity; - private byte[] bytes; - private int index; - private Node node; - private int current; - private int currentBits; - private int symbolBits; - - DecoderProcessor(int initialCapacity) { - this.initialCapacity = ObjectUtil.checkPositive(initialCapacity, "initialCapacity"); - } + // Node 9 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 115, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 116, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 32, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 37, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 45, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 46, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 47, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 51, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 52, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 53, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 54, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 55, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 56, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 57, - void reset() { - node = ROOT; - current = 0; - currentBits = 0; - symbolBits = 0; - bytes = new byte[initialCapacity]; - index = 0; - } + // Node 10 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 115, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 116, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 32, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 37, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 45, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 46, - /* - * The idea here is to consume whole bytes at a time rather than individual bits. node - * represents the Huffman tree, with all bit patterns denormalized as 256 children. Each - * child represents the last 8 bits of the huffman code. The parents of each child each - * represent the successive 8 bit chunks that lead up to the last most part. 8 bit bytes - * from buf are used to traverse these tree until a terminal node is found. - * - * current is a bit buffer. The low order bits represent how much of the huffman code has - * not been used to traverse the tree. Thus, the high order bits are just garbage. - * currentBits represents how many of the low order bits of current are actually valid. - * currentBits will vary between 0 and 15. - * - * symbolBits is the number of bits of the symbol being decoded, *including* all those of - * the parent nodes. symbolBits tells how far down the tree we are. For example, when - * decoding the invalid sequence {0xff, 0xff}, currentBits will be 0, but symbolBits will be - * 16. This is used to know if buf ended early (before consuming a whole symbol) or if - * there is too much padding. - */ - @Override - public boolean process(byte value) throws Http2Exception { - current = (current << 8) | (value & 0xFF); - currentBits += 8; - symbolBits += 8; - // While there are unconsumed bits in current, keep consuming symbols. - do { - node = node.children[(current >>> (currentBits - 8)) & 0xFF]; - currentBits -= node.bits; - if (node.isTerminal()) { - if (node.symbol == HpackUtil.HUFFMAN_EOS) { - throw EOS_DECODED; - } - append(node.symbol); - node = ROOT; - // Upon consuming a whole symbol, reset the symbol bits to the number of bits - // left over in the byte. - symbolBits = currentBits; - } - } while (currentBits >= 8); - return true; - } + // Node 11 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 115, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 115, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 116, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 116, - AsciiString end() throws Http2Exception { - /* - * We have consumed all the bytes in buf, but haven't consumed all the symbols. We may be on - * a partial symbol, so consume until there is nothing left. This will loop at most 2 times. - */ - while (currentBits > 0) { - node = node.children[(current << (8 - currentBits)) & 0xFF]; - if (node.isTerminal() && node.bits <= currentBits) { - if (node.symbol == HpackUtil.HUFFMAN_EOS) { - throw EOS_DECODED; - } - currentBits -= node.bits; - append(node.symbol); - node = ROOT; - symbolBits = currentBits; - } else { - break; - } - } + // Node 12 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 32, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 37, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 45, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 46, + + // Node 13 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 32, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 32, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 37, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 37, + + // Node 14 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 45, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 45, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 46, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 46, + + // Node 15 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 47, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 51, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 52, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 53, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 54, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 55, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 56, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 57, + + // Node 16 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 47, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 51, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 52, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 53, + + // Node 17 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 47, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 47, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 51, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 51, + + // Node 18 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 52, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 52, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 53, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 53, + + // Node 19 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 54, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 55, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 56, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 57, + + // Node 20 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 54, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 54, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 55, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 55, + + // Node 21 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 56, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 56, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 57, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 57, + + // Node 22 + 26 << 16, + 27 << 16, + 29 << 16, + 30 << 16, + 33 << 16, + 34 << 16, + 36 << 16, + 37 << 16, + 43 << 16, + 46 << 16, + 50 << 16, + 53 << 16, + 58 << 16, + 61 << 16, + 65 << 16, + (68 << 16) + (HUFFMAN_COMPLETE << 8), + + // Node 23 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 61, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 65, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 95, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 98, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 100, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 102, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 103, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 104, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 108, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 109, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 110, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 112, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 114, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 117, + 38 << 16, + 39 << 16, + + // Node 24 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 61, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 65, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 95, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 98, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 100, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 102, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 103, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 104, + + // Node 25 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 61, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 65, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 95, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 98, + + // Node 26 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 61, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 61, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 65, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 65, + + // Node 27 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 95, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 95, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 98, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 98, + + // Node 28 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 100, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 102, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 103, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 104, + + // Node 29 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 100, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 100, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 102, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 102, + + // Node 30 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 103, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 103, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 104, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 104, + + // Node 31 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 108, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 109, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 110, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 112, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 114, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 117, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 58, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 66, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 67, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 68, + + // Node 32 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 108, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 109, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 110, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 112, + + // Node 33 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 108, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 108, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 109, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 109, + + // Node 34 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 110, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 110, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 112, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 112, + + // Node 35 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 114, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 117, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 58, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 66, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 67, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 68, + + // Node 36 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 114, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 114, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 117, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 117, + + // Node 37 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 58, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 66, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 67, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 68, + + // Node 38 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 58, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 58, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 66, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 66, + + // Node 39 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 67, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 67, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 68, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 68, + + // Node 40 + 44 << 16, + 45 << 16, + 47 << 16, + 48 << 16, + 51 << 16, + 52 << 16, + 54 << 16, + 55 << 16, + 59 << 16, + 60 << 16, + 62 << 16, + 63 << 16, + 66 << 16, + 67 << 16, + 69 << 16, + (72 << 16) + (HUFFMAN_COMPLETE << 8), + + // Node 41 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 69, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 70, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 71, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 72, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 73, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 74, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 75, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 76, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 77, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 78, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 79, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 80, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 81, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 82, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 83, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 84, + + // Node 42 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 69, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 70, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 71, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 72, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 73, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 74, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 75, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 76, + + // Node 43 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 69, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 70, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 71, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 72, + + // Node 44 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 69, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 69, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 70, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 70, + + // Node 45 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 71, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 71, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 72, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 72, + + // Node 46 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 73, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 74, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 75, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 76, + + // Node 47 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 73, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 73, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 74, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 74, + + // Node 48 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 75, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 75, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 76, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 76, + + // Node 49 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 77, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 78, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 79, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 80, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 81, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 82, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 83, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 84, + + // Node 50 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 77, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 78, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 79, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 80, + + // Node 51 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 77, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 77, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 78, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 78, + + // Node 52 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 79, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 79, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 80, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 80, + + // Node 53 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 81, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 82, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 83, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 84, + + // Node 54 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 81, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 81, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 82, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 82, + + // Node 55 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 83, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 83, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 84, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 84, + + // Node 56 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 85, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 86, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 87, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 89, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 106, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 107, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 113, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 118, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 119, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 120, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 121, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 122, + 70 << 16, + 71 << 16, + 73 << 16, + (74 << 16) + (HUFFMAN_COMPLETE << 8), + + // Node 57 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 85, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 86, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 87, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 89, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 106, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 107, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 113, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 118, + + // Node 58 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 85, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 86, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 87, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 89, + + // Node 59 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 85, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 85, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 86, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 86, + + // Node 60 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 87, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 87, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 89, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 89, + + // Node 61 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 106, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 107, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 113, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 118, + + // Node 62 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 106, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 106, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 107, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 107, + + // Node 63 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 113, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 113, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 118, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 118, + + // Node 64 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 119, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 120, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 121, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 122, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 38, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 42, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 44, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 59, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 88, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 90, + 75 << 16, + 78 << 16, + + // Node 65 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 119, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 120, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 121, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 122, + + // Node 66 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 119, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 119, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 120, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 120, + + // Node 67 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 121, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 121, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 122, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 122, + + // Node 68 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 38, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 42, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 44, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 59, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 88, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 90, + 76 << 16, + 77 << 16, + 79 << 16, + 81 << 16, + + // Node 69 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 38, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 42, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 44, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 59, + + // Node 70 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 38, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 38, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 42, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 42, + + // Node 71 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 44, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 44, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 59, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 59, + + // Node 72 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 88, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 90, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 33, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 34, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 40, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 41, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 63, + 80 << 16, + 82 << 16, + 84 << 16, + + // Node 73 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 88, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 88, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 90, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 90, + + // Node 74 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 33, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 34, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 40, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 41, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 63, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 39, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 43, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 124, + 83 << 16, + 85 << 16, + 88 << 16, + + // Node 75 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 33, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 34, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 40, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 41, + + // Node 76 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 33, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 33, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 34, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 34, + + // Node 77 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 40, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 40, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 41, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 41, + + // Node 78 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 63, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 39, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 43, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 124, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 35, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 62, + 86 << 16, + 87 << 16, + 89 << 16, + 90 << 16, + + // Node 79 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 63, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 63, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 39, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 43, + + // Node 80 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 39, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 39, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 43, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 43, + + // Node 81 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 124, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 35, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 62, + (HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 36, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 64, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 91, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 93, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 126, + 91 << 16, + 92 << 16, + + // Node 82 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 124, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 124, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 35, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 62, + + // Node 83 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 35, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 35, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 62, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 62, + + // Node 84 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8), + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 36, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 64, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 91, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 93, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 126, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 94, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 125, + 93 << 16, + 94 << 16, + + // Node 85 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8), + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 36, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 64, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 91, + + // Node 86 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8), + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8), + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 36, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 36, + + // Node 87 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 64, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 64, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 91, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 91, + + // Node 88 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 93, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 126, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 94, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 125, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 60, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 96, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 123, + 95 << 16, + + // Node 89 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 93, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 93, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 126, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 126, + + // Node 90 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 94, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 125, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 60, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 96, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 123, + 96 << 16, + 110 << 16, + + // Node 91 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 94, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 94, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 125, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 125, + + // Node 92 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 60, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 96, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 123, + 97 << 16, + 101 << 16, + 111 << 16, + 133 << 16, + + // Node 93 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 60, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 60, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 96, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 96, + + // Node 94 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 123, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 123, + 98 << 16, + 99 << 16, + 102 << 16, + 105 << 16, + 112 << 16, + 119 << 16, + 134 << 16, + 153 << 16, + + // Node 95 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 92, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 195, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 208, + 100 << 16, + 103 << 16, + 104 << 16, + 106 << 16, + 107 << 16, + 113 << 16, + 116 << 16, + 120 << 16, + 126 << 16, + 135 << 16, + 142 << 16, + 154 << 16, + 169 << 16, + + // Node 96 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 92, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 195, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 208, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 128, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 130, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 131, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 162, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 184, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 194, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 224, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 226, + 108 << 16, + 109 << 16, + + // Node 97 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 92, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 195, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 208, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 128, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 130, + + // Node 98 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 92, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 92, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 195, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 195, + + // Node 99 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 208, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 208, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 128, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 130, + + // Node 100 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 128, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 128, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 130, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 130, + + // Node 101 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 131, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 162, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 184, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 194, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 224, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 226, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 153, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 161, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 167, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 172, + + // Node 102 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 131, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 162, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 184, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 194, + + // Node 103 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 131, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 131, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 162, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 162, + + // Node 104 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 184, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 184, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 194, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 194, + + // Node 105 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 224, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 226, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 153, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 161, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 167, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 172, + + // Node 106 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 224, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 224, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 226, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 226, + + // Node 107 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 153, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 161, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 167, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 172, + + // Node 108 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 153, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 153, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 161, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 161, + + // Node 109 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 167, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 167, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 172, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 172, + + // Node 110 + 114 << 16, + 115 << 16, + 117 << 16, + 118 << 16, + 121 << 16, + 123 << 16, + 127 << 16, + 130 << 16, + 136 << 16, + 139 << 16, + 143 << 16, + 146 << 16, + 155 << 16, + 162 << 16, + 170 << 16, + 180 << 16, + + // Node 111 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 176, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 177, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 179, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 209, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 216, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 217, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 227, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 229, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 230, + 122 << 16, + 124 << 16, + 125 << 16, + 128 << 16, + 129 << 16, + 131 << 16, + 132 << 16, + + // Node 112 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 176, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 177, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 179, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 209, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 216, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 217, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 227, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 229, + + // Node 113 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 176, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 177, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 179, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 209, + + // Node 114 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 176, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 176, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 177, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 177, + + // Node 115 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 179, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 179, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 209, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 209, + + // Node 116 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 216, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 217, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 227, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 229, + + // Node 117 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 216, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 216, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 217, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 217, + + // Node 118 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 227, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 227, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 229, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 229, + + // Node 119 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 230, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 129, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 132, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 133, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 134, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 136, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 146, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 154, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 156, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 160, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 163, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 164, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 169, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 170, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 173, + + // Node 120 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 230, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 129, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 132, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 133, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 134, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 136, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 146, + + // Node 121 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 230, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 230, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 129, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 132, + + // Node 122 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 129, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 129, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 132, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 132, + + // Node 123 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 133, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 134, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 136, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 146, + + // Node 124 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 133, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 133, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 134, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 134, + + // Node 125 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 136, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 136, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 146, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 146, + + // Node 126 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 154, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 156, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 160, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 163, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 164, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 169, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 170, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 173, + + // Node 127 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 154, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 156, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 160, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 163, + + // Node 128 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 154, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 154, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 156, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 156, + + // Node 129 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 160, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 160, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 163, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 163, + + // Node 130 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 164, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 169, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 170, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 173, + + // Node 131 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 164, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 164, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 169, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 169, - // Section 5.2. String Literal Representation - // A padding strictly longer than 7 bits MUST be treated as a decoding error. - // Padding not corresponding to the most significant bits of the code - // for the EOS symbol (0xFF) MUST be treated as a decoding error. - int mask = (1 << symbolBits) - 1; - if (symbolBits > 7 || (current & mask) != mask) { - throw INVALID_PADDING; + // Node 132 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 170, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 170, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 173, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 173, + + // Node 133 + 137 << 16, + 138 << 16, + 140 << 16, + 141 << 16, + 144 << 16, + 145 << 16, + 147 << 16, + 150 << 16, + 156 << 16, + 159 << 16, + 163 << 16, + 166 << 16, + 171 << 16, + 174 << 16, + 181 << 16, + 190 << 16, + + // Node 134 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 178, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 181, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 185, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 186, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 187, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 189, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 190, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 196, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 198, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 228, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 232, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 233, + 148 << 16, + 149 << 16, + 151 << 16, + 152 << 16, + + // Node 135 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 178, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 181, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 185, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 186, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 187, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 189, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 190, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 196, + + // Node 136 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 178, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 181, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 185, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 186, + + // Node 137 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 178, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 178, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 181, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 181, + + // Node 138 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 185, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 185, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 186, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 186, + + // Node 139 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 187, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 189, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 190, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 196, + + // Node 140 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 187, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 187, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 189, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 189, + + // Node 141 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 190, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 190, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 196, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 196, + + // Node 142 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 198, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 228, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 232, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 233, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 1, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 135, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 137, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 138, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 139, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 140, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 141, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 143, + + // Node 143 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 198, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 228, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 232, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 233, + + // Node 144 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 198, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 198, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 228, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 228, + + // Node 145 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 232, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 232, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 233, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 233, + + // Node 146 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 1, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 135, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 137, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 138, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 139, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 140, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 141, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 143, + + // Node 147 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 1, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 135, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 137, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 138, + + // Node 148 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 1, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 1, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 135, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 135, + + // Node 149 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 137, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 137, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 138, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 138, + + // Node 150 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 139, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 140, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 141, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 143, + + // Node 151 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 139, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 139, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 140, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 140, + + // Node 152 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 141, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 141, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 143, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 143, + + // Node 153 + 157 << 16, + 158 << 16, + 160 << 16, + 161 << 16, + 164 << 16, + 165 << 16, + 167 << 16, + 168 << 16, + 172 << 16, + 173 << 16, + 175 << 16, + 177 << 16, + 182 << 16, + 185 << 16, + 191 << 16, + 207 << 16, + + // Node 154 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 147, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 149, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 150, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 151, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 152, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 155, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 157, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 158, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 165, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 166, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 168, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 174, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 175, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 180, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 182, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 183, + + // Node 155 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 147, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 149, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 150, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 151, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 152, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 155, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 157, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 158, + + // Node 156 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 147, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 149, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 150, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 151, + + // Node 157 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 147, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 147, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 149, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 149, + + // Node 158 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 150, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 150, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 151, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 151, + + // Node 159 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 152, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 155, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 157, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 158, + + // Node 160 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 152, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 152, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 155, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 155, + + // Node 161 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 157, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 157, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 158, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 158, + + // Node 162 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 165, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 166, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 168, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 174, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 175, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 180, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 182, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 183, + + // Node 163 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 165, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 166, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 168, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 174, + + // Node 164 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 165, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 165, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 166, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 166, + + // Node 165 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 168, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 168, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 174, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 174, + + // Node 166 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 175, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 180, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 182, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 183, + + // Node 167 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 175, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 175, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 180, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 180, + + // Node 168 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 182, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 182, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 183, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 183, + + // Node 169 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 188, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 191, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 197, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 231, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 239, + 176 << 16, + 178 << 16, + 179 << 16, + 183 << 16, + 184 << 16, + 186 << 16, + 187 << 16, + 192 << 16, + 199 << 16, + 208 << 16, + 223 << 16, + + // Node 170 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 188, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 191, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 197, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 231, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 239, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 9, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 142, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 144, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 145, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 148, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 159, + + // Node 171 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 188, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 191, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 197, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 231, + + // Node 172 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 188, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 188, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 191, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 191, + + // Node 173 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 197, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 197, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 231, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 231, + + // Node 174 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 239, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 9, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 142, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 144, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 145, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 148, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 159, + + // Node 175 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 239, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 239, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 9, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 142, + + // Node 176 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 9, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 9, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 142, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 142, + + // Node 177 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 144, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 145, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 148, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 159, + + // Node 178 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 144, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 144, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 145, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 145, + + // Node 179 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 148, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 148, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 159, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 159, + + // Node 180 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 171, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 206, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 215, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 225, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 236, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 237, + 188 << 16, + 189 << 16, + 193 << 16, + 196 << 16, + 200 << 16, + 203 << 16, + 209 << 16, + 216 << 16, + 224 << 16, + 238 << 16, + + // Node 181 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 171, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 206, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 215, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 225, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 236, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 237, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 199, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 207, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 234, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 235, + + // Node 182 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 171, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 206, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 215, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 225, + + // Node 183 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 171, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 171, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 206, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 206, + + // Node 184 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 215, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 215, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 225, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 225, + + // Node 185 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 236, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 237, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 199, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 207, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 234, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 235, + + // Node 186 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 236, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 236, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 237, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 237, + + // Node 187 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 199, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 207, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 234, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 235, + + // Node 188 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 199, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 199, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 207, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 207, + + // Node 189 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 234, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 234, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 235, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 235, + + // Node 190 + 194 << 16, + 195 << 16, + 197 << 16, + 198 << 16, + 201 << 16, + 202 << 16, + 204 << 16, + 205 << 16, + 210 << 16, + 213 << 16, + 217 << 16, + 220 << 16, + 225 << 16, + 231 << 16, + 239 << 16, + 246 << 16, + + // Node 191 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 192, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 193, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 200, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 201, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 202, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 205, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 210, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 213, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 218, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 219, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 238, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 240, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 242, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 243, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 255, + 206 << 16, + + // Node 192 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 192, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 193, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 200, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 201, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 202, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 205, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 210, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 213, + + // Node 193 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 192, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 193, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 200, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 201, + + // Node 194 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 192, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 192, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 193, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 193, + + // Node 195 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 200, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 200, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 201, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 201, + + // Node 196 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 202, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 205, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 210, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 213, + + // Node 197 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 202, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 202, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 205, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 205, + + // Node 198 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 210, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 210, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 213, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 213, + + // Node 199 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 218, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 219, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 238, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 240, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 242, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 243, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 255, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 203, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 204, + + // Node 200 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 218, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 219, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 238, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 240, + + // Node 201 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 218, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 218, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 219, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 219, + + // Node 202 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 238, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 238, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 240, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 240, + + // Node 203 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 242, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 243, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 255, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 203, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 204, + + // Node 204 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 242, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 242, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 243, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 243, + + // Node 205 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 255, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 255, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 203, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 204, + + // Node 206 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 203, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 203, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 204, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 204, + + // Node 207 + 211 << 16, + 212 << 16, + 214 << 16, + 215 << 16, + 218 << 16, + 219 << 16, + 221 << 16, + 222 << 16, + 226 << 16, + 228 << 16, + 232 << 16, + 235 << 16, + 240 << 16, + 243 << 16, + 247 << 16, + 250 << 16, + + // Node 208 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 211, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 212, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 214, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 221, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 222, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 223, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 241, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 244, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 245, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 246, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 247, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 248, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 250, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 251, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 252, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 253, + + // Node 209 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 211, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 212, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 214, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 221, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 222, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 223, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 241, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 244, + + // Node 210 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 211, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 212, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 214, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 221, + + // Node 211 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 211, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 211, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 212, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 212, + + // Node 212 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 214, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 214, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 221, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 221, + + // Node 213 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 222, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 223, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 241, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 244, + + // Node 214 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 222, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 222, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 223, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 223, + + // Node 215 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 241, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 241, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 244, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 244, + + // Node 216 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 245, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 246, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 247, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 248, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 250, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 251, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 252, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 253, + + // Node 217 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 245, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 246, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 247, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 248, + + // Node 218 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 245, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 245, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 246, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 246, + + // Node 219 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 247, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 247, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 248, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 248, + + // Node 220 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 250, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 251, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 252, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 253, + + // Node 221 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 250, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 250, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 251, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 251, + + // Node 222 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 252, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 252, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 253, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 253, + + // Node 223 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 254, + 227 << 16, + 229 << 16, + 230 << 16, + 233 << 16, + 234 << 16, + 236 << 16, + 237 << 16, + 241 << 16, + 242 << 16, + 244 << 16, + 245 << 16, + 248 << 16, + 249 << 16, + 251 << 16, + 252 << 16, + + // Node 224 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 254, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 2, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 3, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 4, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 5, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 6, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 7, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 8, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 11, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 12, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 14, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 15, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 16, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 17, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 18, + + // Node 225 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 254, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 2, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 3, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 4, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 5, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 6, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 7, + + // Node 226 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 254, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 254, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 2, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 3, + + // Node 227 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 2, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 2, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 3, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 3, + + // Node 228 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 4, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 5, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 6, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 7, + + // Node 229 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 4, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 4, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 5, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 5, + + // Node 230 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 6, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 6, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 7, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 7, + + // Node 231 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 8, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 11, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 12, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 14, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 15, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 16, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 17, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 18, + + // Node 232 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 8, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 11, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 12, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 14, + + // Node 233 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 8, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 8, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 11, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 11, + + // Node 234 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 12, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 12, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 14, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 14, + + // Node 235 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 15, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 16, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 17, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 18, + + // Node 236 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 15, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 15, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 16, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 16, + + // Node 237 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 17, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 17, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 18, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 18, + + // Node 238 + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 19, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 20, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 21, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 23, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 24, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 25, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 26, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 27, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 28, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 29, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 30, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 31, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 127, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 220, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 249, + 253 << 16, + + // Node 239 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 19, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 20, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 21, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 23, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 24, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 25, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 26, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 27, + + // Node 240 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 19, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 20, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 21, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 23, + + // Node 241 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 19, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 19, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 20, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 20, + + // Node 242 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 21, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 21, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 23, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 23, + + // Node 243 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 24, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 25, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 26, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 27, + + // Node 244 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 24, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 24, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 25, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 25, + + // Node 245 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 26, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 26, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 27, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 27, + + // Node 246 + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 28, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 29, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 30, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 31, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 127, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 220, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 249, + 254 << 16, + 255 << 16, + + // Node 247 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 28, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 29, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 30, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 31, + + // Node 248 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 28, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 28, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 29, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 29, + + // Node 249 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 30, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 30, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 31, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 31, + + // Node 250 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 127, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 220, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 249, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 10, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 13, + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 22, + HUFFMAN_FAIL << 8, + + // Node 251 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 127, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 127, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 220, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 220, + + // Node 252 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 249, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 249, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 10, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 13, + (1 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (22 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 22, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + + // Node 253 + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 10, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 13, + (2 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (9 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (23 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (40 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 22, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + + // Node 254 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 10, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 10, + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 13, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 13, + + // Node 255 + (3 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (6 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (10 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (15 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (24 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (31 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (41 << 16) + (HUFFMAN_EMIT_SYMBOL << 8) + 22, + (56 << 16) + ((HUFFMAN_COMPLETE | HUFFMAN_EMIT_SYMBOL) << 8) + 22, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + HUFFMAN_FAIL << 8, + }; + + private static final Http2Exception BAD_ENCODING = + Http2Exception.newStatic(COMPRESSION_ERROR, "HPACK - Bad Encoding", + Http2Exception.ShutdownHint.HARD_SHUTDOWN, HpackHuffmanDecoder.class, "decode(..)"); + + private byte[] dest; + private int k; + private int state; + + HpackHuffmanDecoder() { } + + /** + * Decompresses the given Huffman coded string literal. + * + * @param buf the string literal to be decoded + * @return the output stream for the compressed data + * @throws Http2Exception EOS Decoded + */ + public AsciiString decode(ByteBuf buf, int length) throws Http2Exception { + if (length == 0) { + return AsciiString.EMPTY_STRING; + } + dest = new byte[length * 8 / 5]; + try { + int readerIndex = buf.readerIndex(); + // Using ByteProcessor to reduce bounds-checking and reference-count checking during byte-by-byte + // processing of the ByteBuf. + int endIndex = buf.forEachByte(readerIndex, length, this); + if (endIndex == -1) { + // We did consume the requested length + buf.readerIndex(readerIndex + length); + if ((state & HUFFMAN_COMPLETE_SHIFT) != HUFFMAN_COMPLETE_SHIFT) { + throw BAD_ENCODING; + } + return new AsciiString(dest, 0, k, false); } - return new AsciiString(bytes, 0, index, false); + // The process(...) method returned before the requested length was requested. This means there + // was a bad encoding detected. + buf.readerIndex(endIndex); + throw BAD_ENCODING; + } finally { + dest = null; + k = 0; + state = 0; } + } - private void append(int i) { - if (bytes.length == index) { - // Choose an expanding strategy depending on how big the buffer already is. - // 1024 was choosen as a good guess and we may be able to investigate more if there are better choices. - // See also https://github.com/netty/netty/issues/6846 - final int newLength = bytes.length >= 1024 ? bytes.length + initialCapacity : bytes.length << 1; - byte[] newBytes = new byte[newLength]; - System.arraycopy(bytes, 0, newBytes, 0, bytes.length); - bytes = newBytes; - } - bytes[index++] = (byte) i; + /** + * This should never be called from anything but this class itself! + */ + @Override + public boolean process(byte input) { + return processNibble(input >> 4) && processNibble(input); + } + + private boolean processNibble(int input) { + // The high nibble of the flags byte of each row is always zero + // (low nibble after shifting row by 12), since there are only 3 flag bits + int index = state >> 12 | (input & 0x0F); + state = HUFFS[index]; + if ((state & HUFFMAN_FAIL_SHIFT) != 0) { + return false; + } + if ((state & HUFFMAN_EMIT_SYMBOL_SHIFT) != 0) { + // state is always positive so can cast without mask here + dest[k++] = (byte) state; } + return true; } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHuffmanEncoder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHuffmanEncoder.java index d271c6533f8..9a6a9a75127 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHuffmanEncoder.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackHuffmanEncoder.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -34,8 +34,8 @@ import io.netty.buffer.ByteBuf; import io.netty.util.AsciiString; import io.netty.util.ByteProcessor; -import io.netty.util.internal.ObjectUtil; -import io.netty.util.internal.PlatformDependent; + +import static java.util.Objects.requireNonNull; final class HpackHuffmanEncoder { @@ -66,14 +66,12 @@ private HpackHuffmanEncoder(int[] codes, byte[] lengths) { * @param data the string literal to be Huffman encoded */ public void encode(ByteBuf out, CharSequence data) { - ObjectUtil.checkNotNull(out, "out"); + requireNonNull(out, "out"); if (data instanceof AsciiString) { AsciiString string = (AsciiString) data; + encodeProcessor.out = out; try { - encodeProcessor.out = out; string.forEachByte(encodeProcessor); - } catch (Exception e) { - PlatformDependent.throwException(e); } finally { encodeProcessor.end(); } @@ -117,14 +115,9 @@ private void encodeSlowPath(ByteBuf out, CharSequence data) { int getEncodedLength(CharSequence data) { if (data instanceof AsciiString) { AsciiString string = (AsciiString) data; - try { - encodedLengthProcessor.reset(); - string.forEachByte(encodedLengthProcessor); - return encodedLengthProcessor.length(); - } catch (Exception e) { - PlatformDependent.throwException(e); - return -1; - } + encodedLengthProcessor.reset(); + string.forEachByte(encodedLengthProcessor); + return encodedLengthProcessor.length(); } else { return getEncodedLengthSlowPath(data); } @@ -135,7 +128,7 @@ private int getEncodedLengthSlowPath(CharSequence data) { for (int i = 0; i < data.length(); i++) { len += lengths[data.charAt(i) & 0xFF]; } - return (int) ((len + 7) >> 3); + return (int) (len + 7 >> 3); } private final class EncodeProcessor implements ByteProcessor { @@ -188,7 +181,7 @@ void reset() { } int length() { - return (int) ((len + 7) >> 3); + return (int) (len + 7 >> 3); } } } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackStaticTable.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackStaticTable.java index 9621daf2e08..8ab65ce4bf5 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackStaticTable.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackStaticTable.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -37,12 +37,14 @@ import java.util.Arrays; import java.util.List; -import static io.netty.handler.codec.http2.HpackUtil.equalsConstantTime; +import static io.netty.handler.codec.http2.HpackUtil.equalsVariableTime; final class HpackStaticTable { + static final int NOT_FOUND = -1; + // Appendix A: Static Table - // http://tools.ietf.org/html/rfc7541#appendix-A + // https://tools.ietf.org/html/rfc7541#appendix-A private static final List STATIC_TABLE = Arrays.asList( /* 1 */ newEmptyHeaderField(":authority"), /* 2 */ newHeaderField(":method", "GET"), @@ -117,6 +119,8 @@ private static HpackHeaderField newHeaderField(String name, String value) { private static final CharSequenceMap STATIC_INDEX_BY_NAME = createMap(); + private static final int MAX_SAME_NAME_FIELD_INDEX = maxSameNameFieldIndex(); + /** * The number of header fields in the static table. */ @@ -136,7 +140,7 @@ static HpackHeaderField getEntry(int index) { static int getIndex(CharSequence name) { Integer index = STATIC_INDEX_BY_NAME.get(name); if (index == null) { - return -1; + return NOT_FOUND; } return index; } @@ -145,33 +149,43 @@ static int getIndex(CharSequence name) { * Returns the index value for the given header field in the static table. Returns -1 if the * header field is not in the static table. */ - static int getIndex(CharSequence name, CharSequence value) { + static int getIndexInsensitive(CharSequence name, CharSequence value) { int index = getIndex(name); - if (index == -1) { - return -1; + if (index == NOT_FOUND) { + return NOT_FOUND; + } + + // Compare values for the first name match + HpackHeaderField entry = getEntry(index); + if (equalsVariableTime(value, entry.value)) { + return index; } // Note this assumes all entries for a given header field are sequential. - while (index <= length) { - HpackHeaderField entry = getEntry(index); - if (equalsConstantTime(name, entry.name) == 0) { - break; + index++; + while (index <= MAX_SAME_NAME_FIELD_INDEX) { + entry = getEntry(index); + if (!equalsVariableTime(name, entry.name)) { + // As far as fields with the same name are placed in the table sequentialy + // and INDEX_BY_NAME returns index of the fist position, - it's safe to + // exit immediatly. + return NOT_FOUND; } - if (equalsConstantTime(value, entry.value) != 0) { + if (equalsVariableTime(value, entry.value)) { return index; } index++; } - return -1; + return NOT_FOUND; } // create a map CharSequenceMap header name to index value to allow quick lookup private static CharSequenceMap createMap() { int length = STATIC_TABLE.size(); @SuppressWarnings("unchecked") - CharSequenceMap ret = new CharSequenceMap(true, - UnsupportedValueConverter.instance(), length); + CharSequenceMap ret = new CharSequenceMap<>(true, + UnsupportedValueConverter.instance(), length); // Iterate through the static table in reverse order to // save the smallest index for a given name in the map. for (int index = length; index > 0; index--) { @@ -182,6 +196,26 @@ private static CharSequenceMap createMap() { return ret; } + /** + * Returns the last position in the array that contains multiple + * fields with the same name. Starting from this position, all + * names are unique. Similary to {@link #getIndexInsensitive(CharSequence, CharSequence)} method + * assumes all entries for a given header field are sequential + */ + private static int maxSameNameFieldIndex() { + final int length = STATIC_TABLE.size(); + HpackHeaderField cursor = getEntry(length); + for (int index = length - 1; index > 0; index--) { + HpackHeaderField entry = getEntry(index); + if (equalsVariableTime(entry.name, cursor.name)) { + return index + 1; + } else { + cursor = entry; + } + } + return length; + } + // singleton private HpackStaticTable() { } diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackUtil.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackUtil.java index 62c24aa280b..5c51c52e8d0 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackUtil.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/HpackUtil.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -21,7 +21,7 @@ * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, @@ -65,6 +65,16 @@ static int equalsConstantTime(CharSequence s1, CharSequence s2) { return ConstantTimeUtils.equalsConstantTime(s1, s2); } + /** + * Compare two {@link CharSequence}s. + * @param s1 the first value. + * @param s2 the second value. + * @return {@code false} if not equal. {@code true} if equal. + */ + static boolean equalsVariableTime(CharSequence s1, CharSequence s2) { + return AsciiString.contentEquals(s1, s2); + } + // Section 6.2. Literal Header Field Representation enum IndexType { INCREMENTAL, // Section 6.2.1. Literal Header Field with Incremental Indexing @@ -73,7 +83,7 @@ enum IndexType { } // Appendix B: Huffman Codes - // http://tools.ietf.org/html/rfc7541#appendix-B + // https://tools.ietf.org/html/rfc7541#appendix-B static final int[] HUFFMAN_CODES = { 0x1ff8, 0x7fffd8, diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ChannelDuplexHandler.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ChannelDuplexHandler.java index b595696f4e9..abd902c9519 100644 --- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ChannelDuplexHandler.java +++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ChannelDuplexHandler.java @@ -5,7 +5,7 @@ * version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at: * - * http://www.apache.org/licenses/LICENSE-2.0 + * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT @@ -16,14 +16,14 @@ package io.netty.handler.codec.http2; -import io.netty.channel.ChannelDuplexHandler; +import io.netty.channel.ChannelHandler; import io.netty.channel.ChannelHandlerContext; import io.netty.channel.ChannelPipeline; import io.netty.util.internal.StringUtil; import io.netty.util.internal.UnstableApi; /** - * A {@link ChannelDuplexHandler} providing additional functionality for HTTP/2. Specifically it allows to: + * A {@link ChannelHandler} providing additional functionality for HTTP/2. Specifically it allows to: *