diff --git a/.asf.yaml b/.asf.yaml
index bec521f1a8c71..5c3b8cb98d964 100644
--- a/.asf.yaml
+++ b/.asf.yaml
@@ -35,12 +35,9 @@ github:
# Enable projects for project management boards
projects: true
enabled_merge_buttons:
- # enable squash button:
squash: true
- # disable merge button:
merge: false
- # disable rebase button:
- rebase: false
+ rebase: true
protected_branches:
master:
required_status_checks:
@@ -82,6 +79,7 @@ github:
branch-2.9: {}
branch-2.10: {}
branch-2.11: {}
+ branch-3.0: {}
notifications:
commits: commits@pulsar.apache.org
diff --git a/.github/ISSUE_TEMPLATE/pip.md b/.github/ISSUE_TEMPLATE/pip.md
new file mode 100644
index 0000000000000..e0bc586669493
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/pip.md
@@ -0,0 +1,9 @@
+---
+name: PIP
+about: '[DEPRECATED. see pip folder] Submit a Pulsar Improvement Proposal (PIP)'
+title: 'DEPRECATED - Read https://github.com/apache/pulsar/blob/master/pip/README.md'
+labels: PIP
+---
+
+We have stopped using GitHub issues to hold the PIP content.
+Please read [here](https://github.com/apache/pulsar/blob/master/pip/README.md) how to submit a PIP
diff --git a/.github/ISSUE_TEMPLATE/pip.yml b/.github/ISSUE_TEMPLATE/pip.yml
deleted file mode 100644
index d494d69947252..0000000000000
--- a/.github/ISSUE_TEMPLATE/pip.yml
+++ /dev/null
@@ -1,82 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-
-name: PIP
-title: "PIP-XYZ: "
-description: Submit a Pulsar Improvement Proposal (PIP)
-labels: [ "PIP" ]
-body:
- - type: markdown
- attributes:
- value: |
- Thank you very much for submitting a Pulsar Improvement Proposal (PIP)! Here are instructions for creating a PIP using this issue template.
-
- Please send a note to the dev@pulsar.apache.org mailing list to start the discussion, using subject prefix `[DISCUSS] PIP-XYZ`. To determine the appropriate PIP number XYZ, inspect the [mailing list](https://lists.apache.org/list.html?dev@pulsar.apache.org) for the most recent PIP. Add 1 to that PIP's number to get your PIP's number.
-
- Based on the discussion and feedback, some changes might be applied by the author(s) to the text of the proposal.
-
- Once some consensus is reached, there will be a vote to formally approve the proposal. The vote will be held on the dev@pulsar.apache.org mailing list. Everyone is welcome to vote on the proposal, though it will considered to be binding only the vote of PMC members. It will be required to have a lazy majority of at least 3 binding +1s votes. The vote should stay open for at least 48 hours.
-
- When the vote is closed, if the outcome is positive, the state of the proposal is updated and the Pull Requests associated with this proposal can start to get merged into the master branch.
- - type: textarea
- attributes:
- label: Motivation
- description: |
- Explain why this change is needed, what benefits it would bring to Apache Pulsar and what problem it's trying to solve.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Goal
- description: |
- Define the scope of this proposal. Given the motivation stated above, what are the problems that this proposal is addressing and what other items will be considering out of scope, perhaps to be left to a different PIP.
- validations:
- required: true
- - type: textarea
- attributes:
- label: API Changes
- description: |
- Illustrate all the proposed changes to the API or wire protocol, with examples of all the newly added classes/methods, including Javadoc.
- - type: textarea
- attributes:
- label: Implementation
- description: |
- This should be a detailed description of all the changes that are expected to be made. It should be detailed enough that any developer that is familiar with Pulsar internals would be able to understand all the parts of the code changes for this proposal.
-
- This should also serve as documentation for any person that is trying to understand or debug the behavior of a certain feature.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Security Considerations
- description: |
- A detailed description of the security details that ought to be considered for the PIP. This is most relevant for any new HTTP endpoints, new Pulsar Protocol Commands, and new security features. The goal is to describe details like which role will have permission to perform an action.
-
- If there is uncertainty for this section, please submit the PIP and request for feedback on the mailing list.
- validations:
- required: true
- - type: textarea
- attributes:
- label: Alternatives
- description: |
- If there are alternatives that were already considered by the authors or, after the discussion, by the community, and were rejected, please list them here along with the reason why they were rejected.
- - type: textarea
- attributes:
- label: Anything else?
- - type: markdown
- attributes:
- value: "Thanks for completing our form!"
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 01ac26570b2d1..fdb8459024b1f 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -24,7 +24,7 @@ Master Issue: #xyz
PIP: #xyz
-
+
### Motivation
diff --git a/.github/actions/gradle-enterprise/action.yml b/.github/actions/gradle-enterprise/action.yml
deleted file mode 100644
index 935e76d3cd645..0000000000000
--- a/.github/actions/gradle-enterprise/action.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied. See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-name: Configure Gradle Enterprise integration
-description: Configure Gradle Enterprise when secret GE_ACCESS_TOKEN is available
-inputs:
- token:
- description: 'The token for accessing Gradle Enterprise'
- required: true
-runs:
- using: composite
- steps:
- - run: |
- if [[ -n "${{ inputs.token }}" ]]; then
- echo "::group::Configuring Gradle Enterprise for build"
- cp .mvn/ge-extensions.xml .mvn/extensions.xml
- echo "GRADLE_ENTERPRISE_ACCESS_KEY=${{ inputs.token }}" >> $GITHUB_ENV
- echo "::endgroup::"
- fi
- shell: bash
\ No newline at end of file
diff --git a/.github/changes-filter.yaml b/.github/changes-filter.yaml
index 3ec2fc22946da..be6faa957887d 100644
--- a/.github/changes-filter.yaml
+++ b/.github/changes-filter.yaml
@@ -3,13 +3,13 @@
all:
- '**'
docs:
- - 'site2/**'
- - 'deployment/**'
- - '.asf.yaml'
- '*.md'
- '**/*.md'
+ - '.asf.yaml'
- '.github/changes-filter.yaml'
- '.github/ISSUE_TEMPLATE/**'
+ - '.idea/**'
+ - 'deployment/**'
- 'wiki/**'
tests:
- added|modified: '**/src/test/java/**/*.java'
diff --git a/.github/workflows/ci-go-functions.yaml b/.github/workflows/ci-go-functions.yaml
index f4d1ae0b99887..f96a6d6586e6e 100644
--- a/.github/workflows/ci-go-functions.yaml
+++ b/.github/workflows/ci-go-functions.yaml
@@ -32,7 +32,7 @@ concurrency:
cancel-in-progress: true
env:
- MAVEN_OPTS: -Xss1500k -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
jobs:
preconditions:
diff --git a/.github/workflows/ci-maven-cache-update.yaml b/.github/workflows/ci-maven-cache-update.yaml
index 87570586fde6e..15fefaf3f1645 100644
--- a/.github/workflows/ci-maven-cache-update.yaml
+++ b/.github/workflows/ci-maven-cache-update.yaml
@@ -42,13 +42,14 @@ on:
- cron: '30 */12 * * *'
env:
- MAVEN_OPTS: -Xss1500k -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
jobs:
update-maven-dependencies-cache:
name: Update Maven dependency cache for ${{ matrix.name }}
env:
JOB_NAME: Update Maven dependency cache for ${{ matrix.name }}
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ${{ matrix.runs-on }}
timeout-minutes: 45
@@ -77,11 +78,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Detect changed files
if: ${{ github.event_name != 'schedule' }}
id: changes
diff --git a/.github/workflows/ci-owasp-dependency-check.yaml b/.github/workflows/ci-owasp-dependency-check.yaml
index 194d88c582d42..06edbae51adde 100644
--- a/.github/workflows/ci-owasp-dependency-check.yaml
+++ b/.github/workflows/ci-owasp-dependency-check.yaml
@@ -24,7 +24,7 @@ on:
workflow_dispatch:
env:
- MAVEN_OPTS: -Xss1500k -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
jobs:
run-owasp-dependency-check:
@@ -32,6 +32,7 @@ jobs:
name: Check ${{ matrix.branch }}
env:
JOB_NAME: Check ${{ matrix.branch }}
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ubuntu-20.04
timeout-minutes: 45
strategy:
@@ -39,6 +40,7 @@ jobs:
matrix:
include:
- branch: master
+ - branch: branch-3.0
- branch: branch-2.11
- branch: branch-2.10
jdk: 11
@@ -56,12 +58,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- if: ${{ matrix.branch == 'master' }}
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Cache local Maven repository
uses: actions/cache@v3
timeout-minutes: 5
diff --git a/.github/workflows/pulsar-ci-flaky.yaml b/.github/workflows/pulsar-ci-flaky.yaml
index acfa66ff43c74..555ebdb17292f 100644
--- a/.github/workflows/pulsar-ci-flaky.yaml
+++ b/.github/workflows/pulsar-ci-flaky.yaml
@@ -36,7 +36,7 @@ concurrency:
cancel-in-progress: true
env:
- MAVEN_OPTS: -Xss1500k -Xmx1024m -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
# defines the retention period for the intermediate build artifacts needed for rerunning a failed build job
# it's possible to rerun individual failed jobs when the build artifacts are available
# if the artifacts have already been expired, the complete workflow can be rerun by closing and reopening the PR or by rebasing the PR
@@ -94,6 +94,7 @@ jobs:
env:
JOB_NAME: Flaky tests suite
COLLECT_COVERAGE: "${{ needs.preconditions.outputs.collect_coverage }}"
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ubuntu-20.04
timeout-minutes: 100
if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
@@ -104,11 +105,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
diff --git a/.github/workflows/pulsar-ci.yaml b/.github/workflows/pulsar-ci.yaml
index 721a1d2eafc72..57b9b082da266 100644
--- a/.github/workflows/pulsar-ci.yaml
+++ b/.github/workflows/pulsar-ci.yaml
@@ -36,7 +36,7 @@ concurrency:
cancel-in-progress: true
env:
- MAVEN_OPTS: -Xss1500k -Xmx1024m -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3
+ MAVEN_OPTS: -Xss1500k -Xmx1024m -Daether.connector.http.reuseConnections=false -Daether.connector.requestTimeout=60000 -Dhttp.keepAlive=false -Dmaven.wagon.http.pool=false -Dmaven.wagon.http.retryHandler.class=standard -Dmaven.wagon.http.retryHandler.count=3 -Dmaven.wagon.http.retryHandler.requestSentEnabled=true -Dmaven.wagon.http.serviceUnavailableRetryStrategy.class=standard -Dmaven.wagon.rto=60000
# defines the retention period for the intermediate build artifacts needed for rerunning a failed build job
# it's possible to rerun individual failed jobs when the build artifacts are available
# if the artifacts have already been expired, the complete workflow can be rerun by closing and reopening the PR or by rebasing the PR
@@ -95,6 +95,7 @@ jobs:
name: Build and License check
env:
JOB_NAME: Build and License check
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ubuntu-20.04
timeout-minutes: 60
if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
@@ -105,11 +106,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -175,6 +171,7 @@ jobs:
env:
JOB_NAME: CI - Unit - ${{ matrix.name }}
COLLECT_COVERAGE: "${{ needs.preconditions.outputs.collect_coverage }}"
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
runs-on: ubuntu-20.04
timeout-minutes: ${{ matrix.timeout || 60 }}
needs: ['preconditions', 'build-and-license-check']
@@ -201,6 +198,10 @@ jobs:
- name: Pulsar IO
group: PULSAR_IO
timeout: 75
+ - name: Pulsar IO - Elastic Search
+ group: PULSAR_IO_ELASTIC
+ - name: Pulsar IO - Kafka Connect Adaptor
+ group: PULSAR_IO_KAFKA_CONNECT
- name: Pulsar Client
group: CLIENT
@@ -211,11 +212,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -391,6 +387,8 @@ jobs:
timeout-minutes: 60
needs: ['preconditions', 'build-and-license-check']
if: ${{ needs.preconditions.outputs.docs_only != 'true'}}
+ env:
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
steps:
- name: checkout
uses: actions/checkout@v3
@@ -398,11 +396,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -469,6 +462,7 @@ jobs:
env:
JOB_NAME: CI - Integration - ${{ matrix.name }}
PULSAR_TEST_IMAGE_NAME: apachepulsar/java-test-image:latest
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
strategy:
fail-fast: false
matrix:
@@ -513,11 +507,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -721,11 +710,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Install gh-actions-artifact-client.js
uses: apache/pulsar-test-infra/gh-actions-artifact-client/dist@master
@@ -739,6 +723,8 @@ jobs:
timeout-minutes: 60
needs: ['preconditions', 'build-and-license-check']
if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
+ env:
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
steps:
- name: checkout
uses: actions/checkout@v3
@@ -746,11 +732,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -798,6 +779,7 @@ jobs:
# build docker image
# include building of Pulsar SQL, Connectors, Offloaders and server distros
mvn -B -am -pl pulsar-sql/presto-distribution,distribution/io,distribution/offloaders,distribution/server,distribution/shell,tests/docker-images/latest-version-image install \
+ -DUBUNTU_MIRROR="${UBUNTU_MIRROR}" -DUBUNTU_SECURITY_MIRROR="${UBUNTU_SECURITY_MIRROR}" \
-Pmain,docker -Dmaven.test.skip=true -Ddocker.squash=true \
-Dspotbugs.skip=true -Dlicense.skip=true -Dcheckstyle.skip=true -Drat.skip=true
@@ -850,6 +832,7 @@ jobs:
env:
JOB_NAME: CI - System - ${{ matrix.name }}
PULSAR_TEST_IMAGE_NAME: apachepulsar/pulsar-test-latest-version:latest
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
strategy:
fail-fast: false
matrix:
@@ -885,11 +868,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -1080,6 +1058,7 @@ jobs:
env:
JOB_NAME: CI Flaky - System - ${{ matrix.name }}
PULSAR_TEST_IMAGE_NAME: apachepulsar/pulsar-test-latest-version:latest
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
strategy:
fail-fast: false
matrix:
@@ -1097,11 +1076,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -1207,11 +1181,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Install gh-actions-artifact-client.js
uses: apache/pulsar-test-infra/gh-actions-artifact-client/dist@master
@@ -1225,6 +1194,8 @@ jobs:
timeout-minutes: 120
needs: ['preconditions', 'integration-tests']
if: ${{ needs.preconditions.outputs.docs_only != 'true' }}
+ env:
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
steps:
- name: checkout
uses: actions/checkout@v3
@@ -1232,11 +1203,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Cache Maven dependencies
uses: actions/cache@v3
timeout-minutes: 5
@@ -1263,6 +1229,8 @@ jobs:
timeout-minutes: 120
needs: [ 'preconditions', 'integration-tests' ]
if: ${{ needs.preconditions.outputs.need_owasp == 'true' }}
+ env:
+ GRADLE_ENTERPRISE_ACCESS_KEY: ${{ secrets.GE_ACCESS_TOKEN }}
steps:
- name: checkout
uses: actions/checkout@v3
@@ -1270,11 +1238,6 @@ jobs:
- name: Tune Runner VM
uses: ./.github/actions/tune-runner-vm
- - name: Configure Gradle Enterprise
- uses: ./.github/actions/gradle-enterprise
- with:
- token: ${{ secrets.GE_ACCESS_TOKEN }}
-
- name: Setup ssh access to build runner VM
# ssh access is enabled for builds in own forks
if: ${{ github.repository != 'apache/pulsar' && github.event_name == 'pull_request' }}
@@ -1310,8 +1273,10 @@ jobs:
cd $HOME
$GITHUB_WORKSPACE/build/pulsar_ci_tool.sh restore_tar_from_github_actions_artifacts pulsar-maven-repository-binaries
# Projects dependent on flume, hdfs, hbase, and presto currently excluded from the scan.
- - name: run "clean verify" to trigger dependency check
- run: mvn -q -B -ntp verify -PskipDocker,owasp-dependency-check -DskipTests -pl '!pulsar-sql,!distribution/io,!distribution/offloaders,!tiered-storage/file-system,!pulsar-io/flume,!pulsar-io/hbase,!pulsar-io/hdfs2,!pulsar-io/hdfs3,!pulsar-io/docs,!pulsar-io/jdbc/openmldb'
+ - name: trigger dependency check
+ run: |
+ mvn -B -ntp verify -PskipDocker,skip-all,owasp-dependency-check -Dcheckstyle.skip=true -DskipTests \
+ -pl '!pulsar-sql,!distribution/server,!distribution/io,!distribution/offloaders,!pulsar-sql/presto-distribution,!tiered-storage/file-system,!pulsar-io/flume,!pulsar-io/hbase,!pulsar-io/hdfs2,!pulsar-io/hdfs3,!pulsar-io/docs,!pulsar-io/jdbc/openmldb'
- name: Upload report
uses: actions/upload-artifact@v3
diff --git a/.gitignore b/.gitignore
index c584baaa0a0b8..cd00c44200059 100644
--- a/.gitignore
+++ b/.gitignore
@@ -97,4 +97,3 @@ test-reports/
# Gradle Enterprise
.mvn/.gradle-enterprise/
-.mvn/extensions.xml
diff --git a/.idea/icon.svg b/.idea/icon.svg
new file mode 100644
index 0000000000000..bf9b232def4a4
--- /dev/null
+++ b/.idea/icon.svg
@@ -0,0 +1,33 @@
+
+
+
+
\ No newline at end of file
diff --git a/.mvn/ge-extensions.xml b/.mvn/extensions.xml
similarity index 97%
rename from .mvn/ge-extensions.xml
rename to .mvn/extensions.xml
index 1c7a1611c1bcc..872764f899827 100644
--- a/.mvn/ge-extensions.xml
+++ b/.mvn/extensions.xml
@@ -24,7 +24,7 @@
com.gradlegradle-enterprise-maven-extension
- 1.16.3
+ 1.17.1com.gradle
diff --git a/bin/bookkeeper b/bin/bookkeeper
index fb516a98acdc2..0cc07dd49aba5 100755
--- a/bin/bookkeeper
+++ b/bin/bookkeeper
@@ -168,7 +168,7 @@ OPTS="$OPTS -Dlog4j.configurationFile=`basename $BOOKIE_LOG_CONF`"
# Allow Netty to use reflection access
OPTS="$OPTS -Dio.netty.tryReflectionSetAccessible=true"
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
# Start --add-opens options
# '--add-opens' option is not supported in jdk8
if [[ -z "$IS_JAVA_8" ]]; then
diff --git a/bin/function-localrunner b/bin/function-localrunner
index 45a37cb306794..2e0aa0f6dffe2 100755
--- a/bin/function-localrunner
+++ b/bin/function-localrunner
@@ -40,13 +40,15 @@ PULSAR_MEM=${PULSAR_MEM:-"-Xmx128m -XX:MaxDirectMemorySize=128m"}
PULSAR_GC=${PULSAR_GC:-"-XX:+UseZGC -XX:+PerfDisableSharedMem -XX:+AlwaysPreTouch"}
# Garbage collection log.
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
-# java version has space, use [[ -n $PARAM ]] to judge if variable exists
-if [[ -n "$IS_JAVA_8" ]]; then
- PULSAR_GC_LOG=${PULSAR_GC_LOG:-"-Xloggc:logs/pulsar_gc_%p.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=20M"}
-else
-# After jdk 9, gc log param should config like this. Ignoring version less than jdk 8
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
+if [[ -z "$IS_JAVA_8" ]]; then
+ # >= JDK 9
PULSAR_GC_LOG=${PULSAR_GC_LOG:-"-Xlog:gc:logs/pulsar_gc_%p.log:time,uptime:filecount=10,filesize=20M"}
+ # '--add-opens' option is not supported in JDK 1.8
+ OPTS="$OPTS --add-opens java.base/sun.net=ALL-UNNAMED --add-opens java.base/java.lang=ALL-UNNAMED"
+else
+ # == JDK 1.8
+ PULSAR_GC_LOG=${PULSAR_GC_LOG:-"-Xloggc:logs/pulsar_gc_%p.log -XX:+PrintGCDetails -XX:+PrintGCDateStamps -XX:+UseGCLogFileRotation -XX:NumberOfGCLogFiles=10 -XX:GCLogFileSize=20M"}
fi
# Extra options to be passed to the jvm
diff --git a/bin/pulsar b/bin/pulsar
index a033de947d4b3..20ed1f7f22b0f 100755
--- a/bin/pulsar
+++ b/bin/pulsar
@@ -291,7 +291,7 @@ OPTS="$OPTS -Dzookeeper.clientTcpKeepAlive=true"
# Allow Netty to use reflection access
OPTS="$OPTS -Dio.netty.tryReflectionSetAccessible=true"
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
# Start --add-opens options
# '--add-opens' option is not supported in jdk8
if [[ -z "$IS_JAVA_8" ]]; then
@@ -307,6 +307,8 @@ if [[ -z "$IS_JAVA_8" ]]; then
OPTS="$OPTS --add-opens java.management/sun.management=ALL-UNNAMED"
# MBeanStatsGenerator
OPTS="$OPTS --add-opens jdk.management/com.sun.management.internal=ALL-UNNAMED"
+ # LinuxInfoUtils
+ OPTS="$OPTS --add-opens java.base/jdk.internal.platform=ALL-UNNAMED"
fi
OPTS="-cp $PULSAR_CLASSPATH $OPTS"
diff --git a/bin/pulsar-admin-common.sh b/bin/pulsar-admin-common.sh
index 8223ac5b3bf24..8aa21c00f634d 100755
--- a/bin/pulsar-admin-common.sh
+++ b/bin/pulsar-admin-common.sh
@@ -91,7 +91,7 @@ PULSAR_CLASSPATH="`dirname $PULSAR_LOG_CONF`:$PULSAR_CLASSPATH"
OPTS="$OPTS -Dlog4j.configurationFile=`basename $PULSAR_LOG_CONF`"
OPTS="$OPTS -Djava.net.preferIPv4Stack=true"
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
# Start --add-opens options
# '--add-opens' option is not supported in jdk8
if [[ -z "$IS_JAVA_8" ]]; then
diff --git a/bin/pulsar-perf b/bin/pulsar-perf
index 47c02bc3d67d5..bdc1dc1ed8b8c 100755
--- a/bin/pulsar-perf
+++ b/bin/pulsar-perf
@@ -134,7 +134,7 @@ PULSAR_CLASSPATH="$PULSAR_JAR:$PULSAR_CLASSPATH:$PULSAR_EXTRA_CLASSPATH"
PULSAR_CLASSPATH="`dirname $PULSAR_LOG_CONF`:$PULSAR_CLASSPATH"
OPTS="$OPTS -Dlog4j.configurationFile=`basename $PULSAR_LOG_CONF` -Djava.net.preferIPv4Stack=true"
-IS_JAVA_8=`$JAVA -version 2>&1 |grep version|grep '"1\.8'`
+IS_JAVA_8=$( $JAVA -version 2>&1 | grep version | grep '"1\.8' )
# Start --add-opens options
# '--add-opens' option is not supported in jdk8
if [[ -z "$IS_JAVA_8" ]]; then
diff --git a/bouncy-castle/bc/pom.xml b/bouncy-castle/bc/pom.xml
index cc7e7952b69f0..d5882b4659528 100644
--- a/bouncy-castle/bc/pom.xml
+++ b/bouncy-castle/bc/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarbouncy-castle-parent
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/bouncy-castle/bcfips-include-test/pom.xml b/bouncy-castle/bcfips-include-test/pom.xml
index 44f0ada4630d7..e8348be9292cd 100644
--- a/bouncy-castle/bcfips-include-test/pom.xml
+++ b/bouncy-castle/bcfips-include-test/pom.xml
@@ -24,7 +24,7 @@
org.apache.pulsarbouncy-castle-parent
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
@@ -85,6 +85,28 @@
true
+
+ maven-resources-plugin
+
+
+ copy-resources
+ test-compile
+
+ copy-resources
+
+
+ ${project.build.testOutputDirectory}/certificate-authority
+ true
+
+
+ ${project.parent.parent.basedir}/tests/certificate-authority
+ false
+
+
+
+
+
+
diff --git a/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java b/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java
index 330d4fbc06897..e8e12838defef 100644
--- a/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java
+++ b/bouncy-castle/bcfips-include-test/src/test/java/org/apache/pulsar/client/TlsProducerConsumerBase.java
@@ -37,11 +37,6 @@
import org.testng.annotations.BeforeMethod;
public class TlsProducerConsumerBase extends ProducerConsumerBase {
- protected final String TLS_TRUST_CERT_FILE_PATH = "./src/test/resources/authentication/tls/cacert.pem";
- protected final String TLS_CLIENT_CERT_FILE_PATH = "./src/test/resources/authentication/tls/client-cert.pem";
- protected final String TLS_CLIENT_KEY_FILE_PATH = "./src/test/resources/authentication/tls/client-key.pem";
- protected final String TLS_SERVER_CERT_FILE_PATH = "./src/test/resources/authentication/tls/broker-cert.pem";
- protected final String TLS_SERVER_KEY_FILE_PATH = "./src/test/resources/authentication/tls/broker-key.pem";
private final String clusterName = "use";
@BeforeMethod(alwaysRun = true)
@@ -63,9 +58,9 @@ protected void cleanup() throws Exception {
protected void internalSetUpForBroker() throws Exception {
conf.setBrokerServicePortTls(Optional.of(0));
conf.setWebServicePortTls(Optional.of(0));
- conf.setTlsCertificateFilePath(TLS_SERVER_CERT_FILE_PATH);
- conf.setTlsKeyFilePath(TLS_SERVER_KEY_FILE_PATH);
- conf.setTlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH);
+ conf.setTlsCertificateFilePath(BROKER_CERT_FILE_PATH);
+ conf.setTlsKeyFilePath(BROKER_KEY_FILE_PATH);
+ conf.setTlsTrustCertsFilePath(CA_CERT_FILE_PATH);
conf.setClusterName(clusterName);
conf.setTlsRequireTrustedClientCertOnConnect(true);
Set tlsProtocols = Sets.newConcurrentHashSet();
@@ -81,12 +76,12 @@ protected void internalSetUpForClient(boolean addCertificates, String lookupUrl)
}
ClientBuilder clientBuilder = PulsarClient.builder().serviceUrl(lookupUrl)
- .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false)
+ .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).enableTls(true).allowTlsInsecureConnection(false)
.operationTimeout(1000, TimeUnit.MILLISECONDS);
if (addCertificates) {
Map authParams = new HashMap<>();
- authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH);
- authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH);
+ authParams.put("tlsCertFile", getTlsFileForClient("admin.cert"));
+ authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8"));
clientBuilder.authentication(AuthenticationTls.class.getName(), authParams);
}
pulsarClient = clientBuilder.build();
@@ -94,15 +89,15 @@ protected void internalSetUpForClient(boolean addCertificates, String lookupUrl)
protected void internalSetUpForNamespace() throws Exception {
Map authParams = new HashMap<>();
- authParams.put("tlsCertFile", TLS_CLIENT_CERT_FILE_PATH);
- authParams.put("tlsKeyFile", TLS_CLIENT_KEY_FILE_PATH);
+ authParams.put("tlsCertFile", getTlsFileForClient("admin.cert"));
+ authParams.put("tlsKeyFile", getTlsFileForClient("admin.key-pk8"));
if (admin != null) {
admin.close();
}
admin = spy(PulsarAdmin.builder().serviceHttpUrl(brokerUrlTls.toString())
- .tlsTrustCertsFilePath(TLS_TRUST_CERT_FILE_PATH).allowTlsInsecureConnection(false)
+ .tlsTrustCertsFilePath(CA_CERT_FILE_PATH).allowTlsInsecureConnection(false)
.authentication(AuthenticationTls.class.getName(), authParams).build());
admin.clusters().createCluster(clusterName, ClusterData.builder()
.serviceUrl(brokerUrl.toString())
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem
deleted file mode 100644
index e2b44e0bf0c42..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem
+++ /dev/null
@@ -1,71 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number: 15537474201172114493 (0xd7a0327703a8fc3d)
- Signature Algorithm: sha256WithRSAEncryption
- Issuer: CN=CARoot
- Validity
- Not Before: Feb 22 06:26:33 2023 GMT
- Not After : Feb 19 06:26:33 2033 GMT
- Subject: C=US, ST=CA, O=Apache, OU=Apache Pulsar, CN=localhost
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- Public-Key: (2048 bit)
- Modulus:
- 00:af:bf:b7:2d:98:ad:9d:f6:da:a3:13:d4:62:0f:
- 98:be:1c:a2:89:22:ba:6f:d5:fd:1f:67:e3:91:03:
- 98:80:81:0e:ed:d8:f6:70:7f:2c:36:68:3d:53:ea:
- 58:3a:a6:d5:89:66:4b:bd:1e:57:71:13:6d:4b:11:
- e5:40:a5:76:84:24:92:40:58:80:96:c9:1f:2c:c4:
- 55:eb:a3:79:73:70:5c:37:9a:89:ed:2f:ba:6b:e3:
- 82:7c:69:4a:02:54:8b:81:5e:3c:bf:4c:8a:cb:ea:
- 2c:5e:83:e7:b7:10:08:5f:82:58:a3:89:d1:da:92:
- ba:2a:28:ee:30:28:3f:5b:ae:10:71:96:c7:e1:12:
- c5:b0:1a:ad:44:6f:44:3a:11:4a:9a:3c:0f:8d:06:
- 80:7b:34:ef:3f:6c:f4:5e:c5:44:54:1e:c8:dd:c7:
- 80:85:80:d9:68:e6:c6:53:03:77:e1:fe:18:61:07:
- 77:05:4c:ed:59:bc:5d:41:38:6a:ef:5d:a1:b2:60:
- 98:d4:48:28:95:02:8a:0e:fd:cf:7b:1b:d2:11:cc:
- 10:0c:50:73:d7:cc:38:6c:83:dd:79:26:aa:90:c8:
- 9b:84:86:bc:59:e9:62:69:f4:98:1b:c4:80:78:7e:
- a0:1a:81:9d:d2:e1:66:dd:c4:cc:fc:63:04:ac:ec:
- a7:35
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Subject Alternative Name:
- DNS:localhost, IP Address:127.0.0.1
- Signature Algorithm: sha256WithRSAEncryption
- 5f:e0:73:7b:5e:db:c0:8b:5e:4c:43:5f:80:94:ca:0b:f8:e9:
- 9b:93:91:3d:b1:3a:99:ce:1c:fb:15:32:68:3e:b9:9c:52:d0:
- 4b:7f:17:09:ec:af:6b:05:3e:e2:a3:e6:cc:bb:53:d7:ea:4a:
- 82:3c:4e:a5:37:ca:f4:1e:38:e2:d6:a5:98:4d:ee:b9:e2:9a:
- 48:d2:9f:0a:bc:61:42:70:22:b9:fb:cd:73:72:fb:94:13:ac:
- 6e:c5:b6:4b:24:ef:0f:df:2d:e6:56:da:b2:76:e8:16:be:7f:
- 3f:1b:99:6e:32:3e:b9:f4:2b:35:72:c7:e4:c6:a5:92:68:c0:
- 1f:a0:f7:17:fd:a3:b6:73:98:d3:ea:1c:af:ea:7d:f8:a0:27:
- 40:dc:4e:8b:13:28:ba:65:60:c5:90:57:e8:54:c1:83:b4:9d:
- f0:ae:2a:de:27:57:e5:a2:e5:f4:87:1c:df:6b:dc:7b:43:ff:
- b6:be:0b:3b:b2:8b:1a:36:dc:e3:57:aa:52:ef:23:d6:50:d7:
- e4:72:8f:a0:0a:43:de:3d:f2:42:5b:fa:ed:1f:8d:0e:cf:c5:
- 6a:ce:3b:8e:fd:6b:68:01:a9:f9:d2:0e:0d:ac:39:8d:f5:6c:
- 80:f8:49:af:bb:b9:d4:81:b9:f3:b2:b6:ce:75:1c:20:e8:6a:
- 53:dc:26:86
------BEGIN CERTIFICATE-----
-MIIDCTCCAfGgAwIBAgIJANegMncDqPw9MA0GCSqGSIb3DQEBCwUAMBExDzANBgNV
-BAMMBkNBUm9vdDAeFw0yMzAyMjIwNjI2MzNaFw0zMzAyMTkwNjI2MzNaMFcxCzAJ
-BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEPMA0GA1UEChMGQXBhY2hlMRYwFAYDVQQL
-Ew1BcGFjaGUgUHVsc2FyMRIwEAYDVQQDEwlsb2NhbGhvc3QwggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQCvv7ctmK2d9tqjE9RiD5i+HKKJIrpv1f0fZ+OR
-A5iAgQ7t2PZwfyw2aD1T6lg6ptWJZku9HldxE21LEeVApXaEJJJAWICWyR8sxFXr
-o3lzcFw3montL7pr44J8aUoCVIuBXjy/TIrL6ixeg+e3EAhfglijidHakroqKO4w
-KD9brhBxlsfhEsWwGq1Eb0Q6EUqaPA+NBoB7NO8/bPRexURUHsjdx4CFgNlo5sZT
-A3fh/hhhB3cFTO1ZvF1BOGrvXaGyYJjUSCiVAooO/c97G9IRzBAMUHPXzDhsg915
-JqqQyJuEhrxZ6WJp9JgbxIB4fqAagZ3S4WbdxMz8YwSs7Kc1AgMBAAGjHjAcMBoG
-A1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAX+Bz
-e17bwIteTENfgJTKC/jpm5ORPbE6mc4c+xUyaD65nFLQS38XCeyvawU+4qPmzLtT
-1+pKgjxOpTfK9B444talmE3uueKaSNKfCrxhQnAiufvNc3L7lBOsbsW2SyTvD98t
-5lbasnboFr5/PxuZbjI+ufQrNXLH5MalkmjAH6D3F/2jtnOY0+ocr+p9+KAnQNxO
-ixMoumVgxZBX6FTBg7Sd8K4q3idX5aLl9Icc32vce0P/tr4LO7KLGjbc41eqUu8j
-1lDX5HKPoApD3j3yQlv67R+NDs/Fas47jv1raAGp+dIODaw5jfVsgPhJr7u51IG5
-87K2znUcIOhqU9wmhg==
------END CERTIFICATE-----
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem
deleted file mode 100644
index 004bf8e21a7a9..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvAIBADANBgkqhkiG9w0BAQEFAASCBKYwggSiAgEAAoIBAQCvv7ctmK2d9tqj
-E9RiD5i+HKKJIrpv1f0fZ+ORA5iAgQ7t2PZwfyw2aD1T6lg6ptWJZku9HldxE21L
-EeVApXaEJJJAWICWyR8sxFXro3lzcFw3montL7pr44J8aUoCVIuBXjy/TIrL6ixe
-g+e3EAhfglijidHakroqKO4wKD9brhBxlsfhEsWwGq1Eb0Q6EUqaPA+NBoB7NO8/
-bPRexURUHsjdx4CFgNlo5sZTA3fh/hhhB3cFTO1ZvF1BOGrvXaGyYJjUSCiVAooO
-/c97G9IRzBAMUHPXzDhsg915JqqQyJuEhrxZ6WJp9JgbxIB4fqAagZ3S4WbdxMz8
-YwSs7Kc1AgMBAAECggEAAaWEK9MwXTiA1+JJrRmETtOp2isPIBkbI/4vLZ6hASM0
-ZpoPxQIMAf58BJs/dF03xu/EaeMs4oxSC9ABG9fxAk/tZtjta3w65Ip6W5jOfHxj
-AMpb3HMEBhq9kDjUTq1IGVAutYQcEMkC3WfS9e4ahfqMpguWgbu6LsbvZFgcL9mv
-pGnKv9YVe6Xk6isvqtq6G1af0rd7c//xF0i0e/qEo83Buok3gLEZOELZbcRxjUYc
-jnyglnXnwkGjuL4E3wgS3l73ZKsb6+AYoqhMPVz8t4/PN3tTrsBJKOSYo8KzIm0U
-ek9T8XmPbP0cuheRxp9Dp8TXJJQZK0N9jz+EL0ogQQKBgQDnavm8GpR4pap9cDOc
-+YI5s823b507pNdSU8elO9gLsP0JlFzv+sqghVko29r85D7Vn3MkgYTy0S4ANLCs
-0NFDY8N2QH6U1dTkk1QXZydVZDuKJ5SSpC4v+Vafl8yDxhB4Nlxhbm9vJEMfLcXh
-2kL6UlAuFDtYD0AdczwnHu5DjQKBgQDCauocm55FpcyDMMBO2CjurxcjBYS3S1xT
-Bz+sPtxJLjlKbAt8kSHUQcCcX9zhrQBfsT38LATCmKaOFqUW5/PPh2LcrxiMqlL1
-OJBUJ3Te2LTjlUn8r+DHv/69UIh5tchwRr3YgB0DuIs7jfmr4VfiOWTBtPVhoGFR
-1Wt60j30SQKBgHzreS26J2VNAFBALgxRf6OIVMbtgDG/FOCDCyU9vazp+F2gcd61
-QYYPFYcBzx9uUiDctroBFHRCyJMh3jEbc6ruAogl3m6XUxmkEeOkMk5dEerM3N2f
-tLL+5Gy385U6aI+LwKhzhcG4EGeXPNdjC362ykNldnddnB2Jo/H2N2XNAoGAdnft
-xpbxP+GDGKIZXTIM5zzcLWQMdiC+1n1BSHVZiGJZWMczzKknYw7aDq+/iekApE79
-xW8RS373ZvfXi3i2Mcx+6pjrrbOQL4tTL2SHq8+DknaDCi4mG7IbyUKMlxW1WO1S
-e929UGogtZ6S+DCte9WbVwosyFuRUetpvgLk67kCgYBWetihZjgBWrqVYT24TTRH
-KxzSzH1JgzzF9qgTdlhXDv9hC+Kc0uTKsgViesDqVuCOjkwzY5OQr9c6duO0fwwP
-qNk/qltdgjMC5iiv7duyukfbEuqKEdGGer9HFb7en96dZdVQJpYHaaslAGurtD80
-ejCQZgzR2XaHSuIQb0IUVQ==
------END PRIVATE KEY-----
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem
deleted file mode 100644
index 4ed454ec52a52..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem
+++ /dev/null
@@ -1,78 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number: 15358526754272834781 (0xd52472b5c5c3f4dd)
- Signature Algorithm: sha256WithRSAEncryption
- Issuer: CN=CARoot
- Validity
- Not Before: Feb 22 06:26:32 2023 GMT
- Not After : Feb 19 06:26:32 2033 GMT
- Subject: CN=CARoot
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- Public-Key: (2048 bit)
- Modulus:
- 00:d0:87:45:0b:b4:83:11:ab:5a:b4:b6:1c:15:d4:
- 92:6a:0c:ac:3b:76:da:ff:8d:61:1b:bd:96:bd:d7:
- b0:70:23:87:d4:00:19:b2:e5:63:b7:80:58:4a:a4:
- d8:a8:a6:4f:eb:c8:8c:54:07:f5:56:52:23:64:fc:
- 66:54:39:f1:33:d0:e5:cc:b6:40:c8:d7:9a:9f:0e:
- c4:aa:57:b0:b3:e2:41:61:54:ca:1f:90:3b:18:ef:
- 60:d2:dc:ee:34:29:33:08:1b:37:4b:c4:ca:7e:cb:
- 94:7f:50:c4:8d:16:2f:90:03:94:07:bf:cf:52:ff:
- 24:54:56:ac:74:6c:d3:31:8c:ce:ef:b3:14:5a:5b:
- 8a:0c:83:2d:e1:f7:4d:60:2f:a1:4d:85:38:96:7f:
- 01:2f:9a:99:c7:2e:3d:09:4d:5e:53:df:fd:29:9f:
- ff:6b:e4:c2:a1:e3:67:85:db:e2:02:4d:6f:29:d4:
- e1:b3:a2:34:71:e0:90:dd:3f:b3:3f:86:41:8c:97:
- 09:e6:c3:de:a0:0e:d3:d4:3e:ce:ea:58:70:e6:9f:
- 24:a8:19:ca:df:61:b8:9c:c3:4e:53:d0:69:96:44:
- 84:76:2b:99:65:08:06:42:d4:b2:76:a7:2f:69:12:
- d5:c2:65:a6:ff:2c:77:73:00:e7:97:a5:77:6b:8a:
- 9c:3f
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Basic Constraints: critical
- CA:TRUE
- X509v3 Subject Key Identifier:
- A7:55:6B:51:10:75:CE:4E:5B:0B:64:FF:A9:6D:23:FB:57:88:59:69
- X509v3 Authority Key Identifier:
- keyid:A7:55:6B:51:10:75:CE:4E:5B:0B:64:FF:A9:6D:23:FB:57:88:59:69
- DirName:/CN=CARoot
- serial:D5:24:72:B5:C5:C3:F4:DD
-
- Signature Algorithm: sha256WithRSAEncryption
- 21:b1:4d:2b:14:1e:5a:91:5d:28:9e:ba:cb:ed:f1:96:da:c3:
- fa:8d:b5:74:e4:c5:fb:2f:3e:39:b4:a6:59:69:dd:84:64:a8:
- f0:e0:39:d2:ef:87:cc:8b:09:9f:0a:84:1f:d0:96:9c:4b:64:
- ea:08:09:26:1c:84:f4:06:5f:5e:b9:ba:b3:3c:6c:81:e0:93:
- 46:89:07:51:95:36:77:96:76:5d:a6:68:71:bb:60:88:a7:83:
- 27:7c:66:5d:64:36:cb:8e:bd:02:f7:fb:52:63:83:2f:fe:57:
- 4c:d5:0c:1b:ea:ef:88:ad:8c:a9:d4:b3:2c:b8:c4:e2:90:cb:
- 0f:24:0e:df:fc:2a:c6:83:08:49:45:b0:41:85:0e:b4:6f:f7:
- 18:56:7b:a5:0b:f6:1b:7f:72:88:ee:c8:ef:b3:e3:3e:f0:68:
- 1b:c9:55:bb:4d:21:65:6b:9e:5c:dd:60:4b:7f:f1:84:f8:67:
- 51:c2:60:88:42:6e:6c:9c:14:b8:96:b0:18:10:97:2c:94:e7:
- 79:14:7b:d1:a2:a4:d8:94:84:ac:a9:ca:17:95:c2:27:8b:2b:
- d8:19:6a:14:4b:c3:03:a6:30:55:40:bd:ce:0c:c2:d5:af:7d:
- 6d:65:89:6b:74:ed:21:12:f1:aa:c9:c9:ba:da:9a:ca:14:6c:
- 39:f4:02:32
------BEGIN CERTIFICATE-----
-MIIDGjCCAgKgAwIBAgIJANUkcrXFw/TdMA0GCSqGSIb3DQEBCwUAMBExDzANBgNV
-BAMMBkNBUm9vdDAeFw0yMzAyMjIwNjI2MzJaFw0zMzAyMTkwNjI2MzJaMBExDzAN
-BgNVBAMMBkNBUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANCH
-RQu0gxGrWrS2HBXUkmoMrDt22v+NYRu9lr3XsHAjh9QAGbLlY7eAWEqk2KimT+vI
-jFQH9VZSI2T8ZlQ58TPQ5cy2QMjXmp8OxKpXsLPiQWFUyh+QOxjvYNLc7jQpMwgb
-N0vEyn7LlH9QxI0WL5ADlAe/z1L/JFRWrHRs0zGMzu+zFFpbigyDLeH3TWAvoU2F
-OJZ/AS+amccuPQlNXlPf/Smf/2vkwqHjZ4Xb4gJNbynU4bOiNHHgkN0/sz+GQYyX
-CebD3qAO09Q+zupYcOafJKgZyt9huJzDTlPQaZZEhHYrmWUIBkLUsnanL2kS1cJl
-pv8sd3MA55eld2uKnD8CAwEAAaN1MHMwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4E
-FgQUp1VrURB1zk5bC2T/qW0j+1eIWWkwQQYDVR0jBDowOIAUp1VrURB1zk5bC2T/
-qW0j+1eIWWmhFaQTMBExDzANBgNVBAMMBkNBUm9vdIIJANUkcrXFw/TdMA0GCSqG
-SIb3DQEBCwUAA4IBAQAhsU0rFB5akV0onrrL7fGW2sP6jbV05MX7Lz45tKZZad2E
-ZKjw4DnS74fMiwmfCoQf0JacS2TqCAkmHIT0Bl9eubqzPGyB4JNGiQdRlTZ3lnZd
-pmhxu2CIp4MnfGZdZDbLjr0C9/tSY4Mv/ldM1Qwb6u+IrYyp1LMsuMTikMsPJA7f
-/CrGgwhJRbBBhQ60b/cYVnulC/Ybf3KI7sjvs+M+8GgbyVW7TSFla55c3WBLf/GE
-+GdRwmCIQm5snBS4lrAYEJcslOd5FHvRoqTYlISsqcoXlcIniyvYGWoUS8MDpjBV
-QL3ODMLVr31tZYlrdO0hEvGqycm62prKFGw59AIy
------END CERTIFICATE-----
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem
deleted file mode 100644
index 3cf236c401255..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem
+++ /dev/null
@@ -1,71 +0,0 @@
-Certificate:
- Data:
- Version: 3 (0x2)
- Serial Number: 15537474201172114494 (0xd7a0327703a8fc3e)
- Signature Algorithm: sha256WithRSAEncryption
- Issuer: CN=CARoot
- Validity
- Not Before: Feb 22 06:26:33 2023 GMT
- Not After : Feb 19 06:26:33 2033 GMT
- Subject: C=US, ST=CA, O=Apache, OU=Apache Pulsar, CN=superUser
- Subject Public Key Info:
- Public Key Algorithm: rsaEncryption
- Public-Key: (2048 bit)
- Modulus:
- 00:cd:43:7d:98:40:f9:b0:5b:bc:ae:db:c0:0b:ad:
- 26:90:96:e0:62:38:ed:68:b1:70:46:3b:de:44:f9:
- 14:51:86:10:eb:ca:90:e7:88:e8:f9:91:85:e0:dd:
- b5:b4:14:b9:78:e3:86:d5:54:6d:68:ec:14:92:b4:
- f8:22:5b:05:3d:ed:31:25:65:08:05:84:ca:e6:0c:
- 21:12:58:32:c7:1a:60:a3:4f:d2:4a:9e:28:19:7c:
- 45:84:00:8c:89:dc:de:8a:e5:4f:88:91:cc:a4:f1:
- 81:45:4c:7d:c2:ff:e2:c1:89:c6:12:73:95:e2:36:
- bd:db:ae:8b:5a:68:6a:90:51:de:2b:88:5f:aa:67:
- f4:a8:e3:63:dc:be:19:82:cc:9d:7f:e6:8d:fb:82:
- be:22:01:3d:56:13:3b:5b:04:b4:e8:c5:18:e6:2e:
- 0d:fa:ba:4a:8d:e8:c6:5a:a1:51:9a:4a:62:d7:af:
- dd:b4:fc:e2:d5:cd:ae:99:6c:5c:61:56:0b:d7:0c:
- 1a:77:5c:f5:3a:6a:54:b5:9e:33:ac:a9:75:28:9a:
- 76:af:d0:7a:57:00:1b:91:13:31:fd:42:88:21:47:
- 05:10:01:2f:59:bb:c7:3a:d9:e1:58:4c:1b:6c:71:
- b6:98:ef:dd:03:82:58:a3:32:dc:90:a1:b6:a6:1e:
- e1:0b
- Exponent: 65537 (0x10001)
- X509v3 extensions:
- X509v3 Subject Alternative Name:
- DNS:localhost, IP Address:127.0.0.1
- Signature Algorithm: sha256WithRSAEncryption
- b8:fc:d3:8f:8a:e0:6b:74:57:e2:a3:79:b2:18:60:0b:2c:05:
- f9:e3:ae:dd:e9:ad:52:88:52:73:b4:12:b0:39:90:65:12:f5:
- 95:0e:5f:4b:f2:06:4a:57:ab:e1:f9:b1:34:68:83:d7:d7:5e:
- 69:0a:16:44:ea:1d:97:53:51:10:51:8b:ec:0a:b3:c8:a3:3d:
- 85:4d:f4:8f:7d:b3:b5:72:e4:9e:d7:f3:01:bf:66:e1:40:92:
- 54:63:16:b6:b5:66:ed:30:38:94:1d:1a:8f:28:34:27:ab:c9:
- 5f:d5:16:7e:e4:f5:93:d2:19:35:44:0a:c4:2e:6a:25:38:1d:
- ee:5a:c8:29:fa:96:dc:95:82:38:9e:36:3a:68:34:7b:4e:d9:
- fa:0d:b2:88:a2:6c:4f:03:18:a7:e3:41:67:38:de:e5:f6:ff:
- 2a:1c:f0:ec:1a:02:a7:e8:4e:3a:c3:04:72:f8:6a:4f:28:a6:
- cf:0b:a2:db:33:74:d1:10:9e:ec:b4:ac:f8:b1:24:f4:ef:0e:
- 05:e4:9d:1b:9a:40:f7:09:66:9c:9d:86:8b:76:96:46:e8:d1:
- dc:10:c7:7d:0b:69:41:dc:a7:8e:e3:a3:36:e3:42:63:93:8c:
- 91:80:0d:27:11:1c:2d:ae:fb:92:88:6c:6b:09:40:1a:30:dd:
- 8f:ac:0f:62
------BEGIN CERTIFICATE-----
-MIIDCTCCAfGgAwIBAgIJANegMncDqPw+MA0GCSqGSIb3DQEBCwUAMBExDzANBgNV
-BAMMBkNBUm9vdDAeFw0yMzAyMjIwNjI2MzNaFw0zMzAyMTkwNjI2MzNaMFcxCzAJ
-BgNVBAYTAlVTMQswCQYDVQQIEwJDQTEPMA0GA1UEChMGQXBhY2hlMRYwFAYDVQQL
-Ew1BcGFjaGUgUHVsc2FyMRIwEAYDVQQDEwlzdXBlclVzZXIwggEiMA0GCSqGSIb3
-DQEBAQUAA4IBDwAwggEKAoIBAQDNQ32YQPmwW7yu28ALrSaQluBiOO1osXBGO95E
-+RRRhhDrypDniOj5kYXg3bW0FLl444bVVG1o7BSStPgiWwU97TElZQgFhMrmDCES
-WDLHGmCjT9JKnigZfEWEAIyJ3N6K5U+Ikcyk8YFFTH3C/+LBicYSc5XiNr3brota
-aGqQUd4riF+qZ/So42PcvhmCzJ1/5o37gr4iAT1WEztbBLToxRjmLg36ukqN6MZa
-oVGaSmLXr920/OLVza6ZbFxhVgvXDBp3XPU6alS1njOsqXUomnav0HpXABuREzH9
-QoghRwUQAS9Zu8c62eFYTBtscbaY790DglijMtyQobamHuELAgMBAAGjHjAcMBoG
-A1UdEQQTMBGCCWxvY2FsaG9zdIcEfwAAATANBgkqhkiG9w0BAQsFAAOCAQEAuPzT
-j4rga3RX4qN5shhgCywF+eOu3emtUohSc7QSsDmQZRL1lQ5fS/IGSler4fmxNGiD
-19deaQoWROodl1NREFGL7AqzyKM9hU30j32ztXLkntfzAb9m4UCSVGMWtrVm7TA4
-lB0ajyg0J6vJX9UWfuT1k9IZNUQKxC5qJTgd7lrIKfqW3JWCOJ42Omg0e07Z+g2y
-iKJsTwMYp+NBZzje5fb/Khzw7BoCp+hOOsMEcvhqTyimzwui2zN00RCe7LSs+LEk
-9O8OBeSdG5pA9wlmnJ2Gi3aWRujR3BDHfQtpQdynjuOjNuNCY5OMkYANJxEcLa77
-kohsawlAGjDdj6wPYg==
------END CERTIFICATE-----
diff --git a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem b/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem
deleted file mode 100644
index 3835b3eacccc0..0000000000000
--- a/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem
+++ /dev/null
@@ -1,28 +0,0 @@
------BEGIN PRIVATE KEY-----
-MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQDNQ32YQPmwW7yu
-28ALrSaQluBiOO1osXBGO95E+RRRhhDrypDniOj5kYXg3bW0FLl444bVVG1o7BSS
-tPgiWwU97TElZQgFhMrmDCESWDLHGmCjT9JKnigZfEWEAIyJ3N6K5U+Ikcyk8YFF
-TH3C/+LBicYSc5XiNr3brotaaGqQUd4riF+qZ/So42PcvhmCzJ1/5o37gr4iAT1W
-EztbBLToxRjmLg36ukqN6MZaoVGaSmLXr920/OLVza6ZbFxhVgvXDBp3XPU6alS1
-njOsqXUomnav0HpXABuREzH9QoghRwUQAS9Zu8c62eFYTBtscbaY790DglijMtyQ
-obamHuELAgMBAAECggEBALGnokJuqiz7mTj2NSdl+6TVEOuyPbiJKpV/J4cm1XEh
-ye9qaTQcCRhH3UmcWrG75jM9KevloLRY8A1x1/lUMhtA+XJWGTU9k6a8BLut3nT4
-3X87jNTMQgSczEXNe9WudmZcxhN7rVVtOOdTpt1pP0cnCWna5HTf0D8cuLvM975j
-r1YGTjKsCF1W+tp6ZAIIMfJkUI2qBRKvSxVCSs1vZBraox3yUVnq9oRLHxZZoqOd
-d51G5phRtn6ReVPBdT8fGUBEGg3jKxTu2/vLQMUyHy0hyCAM20gzOP4FIc2g+QZU
-y42byAuc89m0OrdRWsmzHCOxcq9DwY9npaz1RscR/2ECgYEA9bHJQ0Y1afpS5gn2
-KnXenRIw9oal1utQZnohCEJ4um+K/BCEHtDnI825LPNf34IKM2rSmssvHrYN51o0
-92j9lHHXsf6MVluwsTsIu8MtNaJ1BLt96dub4ScGT6vvzObKTwsajUfIHk+FNsKq
-zps8yh1q0qyyfAcvR82+Xr6JIsMCgYEA1d+RHGewi/Ub/GCG99A1KFKsgbiIJnWB
-IFmrcyPWignhzDUcw2SV9XqAzeK8EOIHNq3e5U/tkA7aCWxtLb5UsQ8xvmwQY2cy
-X2XvSdIhO4K2PgRLgjlzZ8RHSULglqyjB2i6TjwjFl8TsRzYr6JlV6+2cMujw4Bl
-g3a8gz071BkCgYBLP7BMkmw5kRliqxph1sffg3rLhmG0eU2elTkYtoMTVqZSnRxZ
-89FW/eMBCWkLo2BMbyMhlalQ1qFbgh1GyTkhBdzx/uwsZtiu7021dAmcq6z7ThE6
-VrBfPPyJ2jcPon/DxbrUGnAIGILMSsLVlGYB4RCehZYEto6chz8O9Xw60QKBgCnd
-us1BqviqwZC04JbQJie/j09RbS2CIQXRJ9PBNzUMXCwaVYgWP5ivI1mqQcBYTqsw
-fAqNi+aAUcQ4emLS+Ec0vzsUclzTDbRJAv+DZ8f7fWtEcfeLAYFVldLMiaRVJRDF
-OnsoIII3mGY6TFyNQKNanS8VXfheQQDsFFjoera5AoGBALXYEXkESXpw4LT6qJFz
-ktQuTZDfS6LtR14/+NkYL9c5wBC4Otkg4bNbT8xGlUjethRfpkm8xRTB6zfC1/p/
-Cg6YU1cwqlkRurAhE3PEv1dCc1IDbzou8xnwqHrd6sGPDQmQ3aEtU5eJhDZKIZfx
-nQqPGK92+Jtne7+W1mFZooxs
------END PRIVATE KEY-----
diff --git a/bouncy-castle/bcfips/pom.xml b/bouncy-castle/bcfips/pom.xml
index bd5d64bd84191..a07e5e19907f2 100644
--- a/bouncy-castle/bcfips/pom.xml
+++ b/bouncy-castle/bcfips/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarbouncy-castle-parent
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/bouncy-castle/pom.xml b/bouncy-castle/pom.xml
index 9f8ace79b77c3..daefeb83b5371 100644
--- a/bouncy-castle/pom.xml
+++ b/bouncy-castle/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarpulsar
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/build/build_java_test_image.sh b/build/build_java_test_image.sh
index 0747e6dacb82a..459bf26f98eff 100755
--- a/build/build_java_test_image.sh
+++ b/build/build_java_test_image.sh
@@ -27,5 +27,6 @@ if [[ "$(docker version -f '{{.Server.Experimental}}' 2>/dev/null)" == "true" ]]
SQUASH_PARAM="-Ddocker.squash=true"
fi
mvn -am -pl tests/docker-images/java-test-image -Pcore-modules,-main,integrationTests,docker \
+ -DUBUNTU_MIRROR="${UBUNTU_MIRROR}" -DUBUNTU_SECURITY_MIRROR="${UBUNTU_SECURITY_MIRROR}" \
-Dmaven.test.skip=true -DskipSourceReleaseAssembly=true -Dspotbugs.skip=true -Dlicense.skip=true $SQUASH_PARAM \
"$@" install
\ No newline at end of file
diff --git a/build/pulsar_ci_tool.sh b/build/pulsar_ci_tool.sh
index 61199eda2c5d8..d946edd395789 100755
--- a/build/pulsar_ci_tool.sh
+++ b/build/pulsar_ci_tool.sh
@@ -46,7 +46,8 @@ function ci_print_thread_dumps() {
# runs maven
function _ci_mvn() {
- mvn -B -ntp "$@"
+ mvn -B -ntp -DUBUNTU_MIRROR="${UBUNTU_MIRROR}" -DUBUNTU_SECURITY_MIRROR="${UBUNTU_SECURITY_MIRROR}" \
+ "$@"
}
# runs OWASP Dependency Check for all projects
diff --git a/build/regenerate_certs_for_tests.sh b/build/regenerate_certs_for_tests.sh
index fff1c057060f3..9582a7496cd1d 100755
--- a/build/regenerate_certs_for_tests.sh
+++ b/build/regenerate_certs_for_tests.sh
@@ -68,13 +68,6 @@ reissue_certificate_no_subject \
$ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/no-subject-alt-key.pem \
$ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/no-subject-alt-cert.pem
-generate_ca
-cp ca-cert.pem $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/cacert.pem
-reissue_certificate $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-key.pem \
- $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/broker-cert.pem
-reissue_certificate $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-key.pem \
- $ROOT_DIR/bouncy-castle/bcfips-include-test/src/test/resources/authentication/tls/client-cert.pem
-
generate_ca
cp ca-cert.pem $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-cacert.pem
reissue_certificate $ROOT_DIR/pulsar-proxy/src/test/resources/authentication/tls/ProxyWithAuthorizationTest/broker-key.pem \
diff --git a/build/run_unit_group.sh b/build/run_unit_group.sh
index ba49820ed1d33..69434b011b37e 100755
--- a/build/run_unit_group.sh
+++ b/build/run_unit_group.sh
@@ -188,6 +188,18 @@ function test_group_pulsar_io() {
echo "::endgroup::"
}
+function test_group_pulsar_io_elastic() {
+ echo "::group::Running elastic-search tests"
+ mvn_test --install -Ppulsar-io-elastic-tests,-main
+ echo "::endgroup::"
+}
+
+function test_group_pulsar_io_kafka_connect() {
+ echo "::group::Running Pulsar IO Kafka connect adaptor tests"
+ mvn_test --install -Ppulsar-io-kafka-connect-tests,-main
+ echo "::endgroup::"
+}
+
function list_test_groups() {
declare -F | awk '{print $NF}' | sort | grep -E '^test_group_' | sed 's/^test_group_//g' | tr '[:lower:]' '[:upper:]'
}
diff --git a/buildtools/pom.xml b/buildtools/pom.xml
index de52ac0930a33..5a391777f2567 100644
--- a/buildtools/pom.xml
+++ b/buildtools/pom.xml
@@ -25,39 +25,39 @@
org.apacheapache
- 23
+ 29org.apache.pulsarbuildtools
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOTjarPulsar Build Tools
- ${maven.build.timestamp}
+ 2023-05-03T02:53:27Z1.81.8
- 3.0.0-M3
+ 3.1.02.18.01.7.32
- 7.7.0
+ 7.7.13.114.1
- 3.4.08.373.1.2
- 4.1.89.Final
+ 4.1.93.Final4.2.331.0.1-jre1.10.12
- 1.32
+ 2.03.12.4
--add-opens java.base/jdk.internal.loader=ALL-UNNAMED
--add-opens java.base/java.lang=ALL-UNNAMED
+ --add-opens java.base/jdk.internal.platform=ALL-UNNAMED
@@ -176,18 +176,24 @@
listener
- org.apache.pulsar.tests.PulsarTestListener,org.apache.pulsar.tests.AnnotationListener,org.apache.pulsar.tests.FailFastNotifier
+ org.apache.pulsar.tests.PulsarTestListener,org.apache.pulsar.tests.JacocoDumpListener,org.apache.pulsar.tests.AnnotationListener,org.apache.pulsar.tests.FailFastNotifier
${test.additional.args}
+
+
+ org.apache.maven.surefire
+ surefire-testng
+ ${surefire.version}
+
+ org.apache.maven.pluginsmaven-shade-plugin
- ${maven-shade-plugin.version}truetrue
@@ -255,7 +261,7 @@
org.apache.maven.wagonwagon-ssh-external
- 2.10
+ 3.5.3
diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java b/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java
index 627a4ec30547b..fe76a79b2c4ce 100644
--- a/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java
+++ b/buildtools/src/main/java/org/apache/pulsar/tests/FailFastNotifier.java
@@ -124,8 +124,7 @@ public void beforeInvocation(IInvokedMethod iInvokedMethod, ITestResult iTestRes
|| iTestNGMethod.isAfterTestConfiguration())) {
throw new FailFastSkipException("Skipped after failure since testFailFast system property is set.");
}
- }
- if (FAIL_FAST_KILLSWITCH_FILE != null && FAIL_FAST_KILLSWITCH_FILE.exists()) {
+ } else if (FAIL_FAST_KILLSWITCH_FILE != null && FAIL_FAST_KILLSWITCH_FILE.exists()) {
throw new FailFastSkipException("Skipped after failure since kill switch file exists.");
}
}
diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/JacocoDumpListener.java b/buildtools/src/main/java/org/apache/pulsar/tests/JacocoDumpListener.java
new file mode 100644
index 0000000000000..2c49d5118ae52
--- /dev/null
+++ b/buildtools/src/main/java/org/apache/pulsar/tests/JacocoDumpListener.java
@@ -0,0 +1,103 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.tests;
+
+import java.lang.management.ManagementFactory;
+import java.util.concurrent.TimeUnit;
+import javax.management.InstanceNotFoundException;
+import javax.management.MBeanServer;
+import javax.management.MBeanServerInvocationHandler;
+import javax.management.MalformedObjectNameException;
+import javax.management.ObjectName;
+import org.testng.IExecutionListener;
+import org.testng.ISuite;
+import org.testng.ISuiteListener;
+
+/**
+ * A TestNG listener that dumps Jacoco coverage data to file using the Jacoco JMX interface.
+ *
+ * This ensures that coverage data is dumped even if the shutdown sequence of the Test JVM gets stuck. Coverage
+ * data will be dumped every 2 minutes by default and once all test suites have been run.
+ * Each test class runs in its own suite when run with maven-surefire-plugin.
+ */
+public class JacocoDumpListener implements ISuiteListener, IExecutionListener {
+ private final MBeanServer platformMBeanServer = ManagementFactory.getPlatformMBeanServer();
+ private final ObjectName jacocoObjectName;
+ private final JacocoProxy jacocoProxy;
+ private final boolean enabled;
+
+ private long lastDumpTime;
+
+ private static final long DUMP_INTERVAL_MILLIS = TimeUnit.SECONDS.toMillis(120);
+
+ public JacocoDumpListener() {
+ try {
+ jacocoObjectName = new ObjectName("org.jacoco:type=Runtime");
+ } catch (MalformedObjectNameException e) {
+ // this won't happen since the ObjectName is static and valid
+ throw new RuntimeException(e);
+ }
+ enabled = checkEnabled();
+ if (enabled) {
+ jacocoProxy = MBeanServerInvocationHandler.newProxyInstance(platformMBeanServer, jacocoObjectName,
+ JacocoProxy.class, false);
+ } else {
+ jacocoProxy = null;
+ }
+ lastDumpTime = System.currentTimeMillis();
+ }
+
+ private boolean checkEnabled() {
+ try {
+ platformMBeanServer.getObjectInstance(jacocoObjectName);
+ } catch (InstanceNotFoundException e) {
+ // jacoco jmx is not enabled
+ return false;
+ }
+ return true;
+ }
+
+ public void onFinish(ISuite suite) {
+ // dump jacoco coverage data to file using the Jacoco JMX interface if more than DUMP_INTERVAL_MILLIS has passed
+ // since the last dump
+ if (enabled && System.currentTimeMillis() - lastDumpTime > DUMP_INTERVAL_MILLIS) {
+ // dump jacoco coverage data to file using the Jacoco JMX interface
+ triggerJacocoDump();
+ }
+ }
+ @Override
+ public void onExecutionFinish() {
+ if (enabled) {
+ // dump jacoco coverage data to file using the Jacoco JMX interface when all tests have finished
+ triggerJacocoDump();
+ }
+ }
+
+ private void triggerJacocoDump() {
+ System.out.println("Dumping Jacoco coverage data to file...");
+ long start = System.currentTimeMillis();
+ jacocoProxy.dump(true);
+ lastDumpTime = System.currentTimeMillis();
+ System.out.println("Completed in " + (lastDumpTime - start) + "ms.");
+ }
+
+ public interface JacocoProxy {
+ void dump(boolean reset);
+ }
+}
diff --git a/buildtools/src/main/java/org/apache/pulsar/tests/PulsarTestListener.java b/buildtools/src/main/java/org/apache/pulsar/tests/PulsarTestListener.java
index b3d70621843ca..2d1f1273272c5 100644
--- a/buildtools/src/main/java/org/apache/pulsar/tests/PulsarTestListener.java
+++ b/buildtools/src/main/java/org/apache/pulsar/tests/PulsarTestListener.java
@@ -44,20 +44,29 @@ public void onTestFailure(ITestResult result) {
if (!(result.getThrowable() instanceof SkipException)) {
System.out.format("!!!!!!!!! FAILURE-- %s.%s(%s)-------\n", result.getTestClass(),
result.getMethod().getMethodName(), Arrays.toString(result.getParameters()));
- }
- if (result.getThrowable() != null) {
- result.getThrowable().printStackTrace();
- if (result.getThrowable() instanceof ThreadTimeoutException) {
- System.out.println("====== THREAD DUMPS ======");
- System.out.println(ThreadDumpUtil.buildThreadDiagnosticString());
+ if (result.getThrowable() != null) {
+ result.getThrowable().printStackTrace();
+ if (result.getThrowable() instanceof ThreadTimeoutException) {
+ System.out.println("====== THREAD DUMPS ======");
+ System.out.println(ThreadDumpUtil.buildThreadDiagnosticString());
+ }
}
}
}
@Override
public void onTestSkipped(ITestResult result) {
- System.out.format("~~~~~~~~~ SKIPPED -- %s.%s(%s)-------\n", result.getTestClass(),
- result.getMethod().getMethodName(), Arrays.toString(result.getParameters()));
+ if (!(result.getThrowable() instanceof SkipException)) {
+ System.out.format("~~~~~~~~~ SKIPPED -- %s.%s(%s)-------\n", result.getTestClass(),
+ result.getMethod().getMethodName(), Arrays.toString(result.getParameters()));
+ if (result.getThrowable() != null) {
+ result.getThrowable().printStackTrace();
+ if (result.getThrowable() instanceof ThreadTimeoutException) {
+ System.out.println("====== THREAD DUMPS ======");
+ System.out.println(ThreadDumpUtil.buildThreadDiagnosticString());
+ }
+ }
+ }
}
@Override
diff --git a/buildtools/src/main/resources/pulsar/suppressions.xml b/buildtools/src/main/resources/pulsar/suppressions.xml
index 7c78988db3e90..57a01c60f6a27 100644
--- a/buildtools/src/main/resources/pulsar/suppressions.xml
+++ b/buildtools/src/main/resources/pulsar/suppressions.xml
@@ -38,7 +38,7 @@
-
+
diff --git a/conf/broker.conf b/conf/broker.conf
index d52adb254563d..22ca71864e9cd 100644
--- a/conf/broker.conf
+++ b/conf/broker.conf
@@ -85,6 +85,7 @@ advertisedAddress=
# internalListenerName=
# Enable or disable the HAProxy protocol.
+# If true, the real IP addresses of consumers and producers can be obtained when getting topic statistics data.
haProxyProtocolEnabled=false
# Number of threads to config Netty Acceptor. Default is 1
@@ -549,13 +550,11 @@ delayedDeliveryTrackerFactoryClassName=org.apache.pulsar.broker.delayed.InMemory
# Control the tick time for when retrying on delayed delivery,
# affecting the accuracy of the delivery time compared to the scheduled time.
-# Note that this time is used to configure the HashedWheelTimer's tick time for the
-# InMemoryDelayedDeliveryTrackerFactory (the default DelayedDeliverTrackerFactory).
+# Note that this time is used to configure the HashedWheelTimer's tick time.
# Default is 1 second.
delayedDeliveryTickTimeMillis=1000
-# When using the InMemoryDelayedDeliveryTrackerFactory (the default DelayedDeliverTrackerFactory), whether
-# the deliverAt time is strictly followed. When false (default), messages may be sent to consumers before the deliverAt
+# Whether the deliverAt time is strictly followed. When false (default), messages may be sent to consumers before the deliverAt
# time by as much as the tickTimeMillis. This can reduce the overhead on the broker of maintaining the delayed index
# for a potentially very short time period. When true, messages will not be sent to consumer until the deliverAt time
# has passed, and they may be as late as the deliverAt time plus the tickTimeMillis for the topic plus the
@@ -577,10 +576,11 @@ delayedDeliveryMaxIndexesPerBucketSnapshotSegment=5000
# The max number of delayed message index bucket,
# after reaching the max buckets limitation, the adjacent buckets will be merged.
-delayedDeliveryMaxNumBuckets=50
+# (disable with value -1)
+delayedDeliveryMaxNumBuckets=-1
# Size of the lookahead window to use when detecting if all the messages in the topic
-# have a fixed delay.
+# have a fixed delay for InMemoryDelayedDeliveryTracker (the default DelayedDeliverTracker).
# Default is 50,000. Setting the lookahead window to 0 will disable the logic to handle
# fixed delays in messages in a different way.
delayedDeliveryFixedDelayDetectionLookahead=50000
@@ -903,6 +903,11 @@ saslJaasServerRoleTokenSignerSecretPath=
# If >0, it will reject all HTTP requests with bodies larged than the configured limit
httpMaxRequestSize=-1
+# The maximum size in bytes of the request header. Larger headers will allow for more and/or larger cookies plus larger
+# form content encoded in a URL.However, larger headers consume more memory and can make a server more vulnerable to
+# denial of service attacks.
+httpMaxRequestHeaderSize = 8192
+
# If true, the broker will reject all HTTP requests using the TRACE and TRACK verbs.
# This setting may be necessary if the broker is deployed into an environment that uses http port
# scanning and flags web servers allowing the TRACE method as insecure.
@@ -1073,8 +1078,8 @@ bookkeeperExplicitLacIntervalInMills=0
# bookkeeperClientExposeStatsToPrometheus=false
# If bookkeeperClientExposeStatsToPrometheus is set to true, we can set bookkeeperClientLimitStatsLogging=true
-# to limit per_channel_bookie_client metrics. default is false
-# bookkeeperClientLimitStatsLogging=false
+# to limit per_channel_bookie_client metrics. default is true
+# bookkeeperClientLimitStatsLogging=true
### --- Managed Ledger --- ###
@@ -1373,6 +1378,91 @@ loadBalancerBundleUnloadMinThroughputThreshold=10
# Time to wait for the unloading of a namespace bundle
namespaceBundleUnloadingTimeoutMs=60000
+### --- Load balancer extension --- ###
+
+# Option to enable the debug mode for the load balancer logics.
+# The debug mode prints more logs to provide more information such as load balance states and decisions.
+# (only used in load balancer extension logics)
+loadBalancerDebugModeEnabled=false
+
+# The target standard deviation of the resource usage across brokers
+# (100% resource usage is 1.0 load).
+# The shedder logic tries to distribute bundle load across brokers to meet this target std.
+# The smaller value will incur load balancing more frequently.
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerBrokerLoadTargetStd=0.25
+
+# Threshold to the consecutive count of fulfilled shedding(unload) conditions.
+# If the unload scheduler consecutively finds bundles that meet unload conditions
+# many times bigger than this threshold, the scheduler will shed the bundles.
+# The bigger value will incur less bundle unloading/transfers.
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerSheddingConditionHitCountThreshold=3
+
+# Option to enable the bundle transfer mode when distributing bundle loads.
+# On: transfer bundles from overloaded brokers to underloaded
+# -- pre-assigns the destination broker upon unloading).
+# Off: unload bundles from overloaded brokers
+# -- post-assigns the destination broker upon lookups).
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerTransferEnabled=true
+
+# Maximum number of brokers to unload bundle load for each unloading cycle.
+# The bigger value will incur more unloading/transfers for each unloading cycle.
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerMaxNumberOfBrokerSheddingPerCycle=3
+
+# Delay (in seconds) to the next unloading cycle after unloading.
+# The logic tries to give enough time for brokers to recompute load after unloading.
+# The bigger value will delay the next unloading cycle longer.
+# (only used in load balancer extension TransferSheddeer)
+loadBalanceSheddingDelayInSeconds=180
+
+# Broker load data time to live (TTL in seconds).
+# The logic tries to avoid (possibly unavailable) brokers with out-dated load data,
+# and those brokers will be ignored in the load computation.
+# When tuning this value, please consider loadBalancerReportUpdateMaxIntervalMinutes.
+#The current default is loadBalancerReportUpdateMaxIntervalMinutes * 2.
+# (only used in load balancer extension TransferSheddeer)
+loadBalancerBrokerLoadDataTTLInSeconds=1800
+
+# Max number of bundles in bundle load report from each broker.
+# The load balancer distributes bundles across brokers,
+# based on topK bundle load data and other broker load data.
+# The bigger value will increase the overhead of reporting many bundles in load data.
+# (only used in load balancer extension logics)
+loadBalancerMaxNumberOfBundlesInBundleLoadReport=10
+
+# Service units'(bundles) split interval. Broker periodically checks whether
+# some service units(e.g. bundles) should split if they become hot-spots.
+# (only used in load balancer extension logics)
+loadBalancerSplitIntervalMinutes=1
+
+# Max number of bundles to split to per cycle.
+# (only used in load balancer extension logics)
+loadBalancerMaxNumberOfBundlesToSplitPerCycle=10
+
+# Threshold to the consecutive count of fulfilled split conditions.
+# If the split scheduler consecutively finds bundles that meet split conditions
+# many times bigger than this threshold, the scheduler will trigger splits on the bundles
+# (if the number of bundles is less than loadBalancerNamespaceMaximumBundles).
+# (only used in load balancer extension logics)
+loadBalancerNamespaceBundleSplitConditionHitCountThreshold=3
+
+# After this delay, the service-unit state channel tombstones any service units (e.g., bundles)
+# in semi-terminal states. For example, after splits, parent bundles will be `deleted`,
+# and then after this delay, the parent bundles' state will be `tombstoned`
+# in the service-unit state channel.
+# Pulsar does not immediately remove such semi-terminal states
+# to avoid unnecessary system confusion,
+# as the bundles in the `tombstoned` state might temporarily look available to reassign.
+# Rarely, one could lower this delay in order to aggressively clean
+# the service-unit state channel when there are a large number of bundles.
+# minimum value = 30 secs
+# (only used in load balancer extension logics)
+loadBalancerServiceUnitStateTombstoneDelayTimeInSeconds=3600
+
+
### --- Replication --- ###
# Enable replication metrics
diff --git a/conf/functions_worker.yml b/conf/functions_worker.yml
index b41ac8f37a44f..4c5b6aab1b7f4 100644
--- a/conf/functions_worker.yml
+++ b/conf/functions_worker.yml
@@ -311,6 +311,8 @@ authenticationProviders:
authorizationProvider: org.apache.pulsar.broker.authorization.PulsarAuthorizationProvider
# Set of role names that are treated as "super-user", meaning they will be able to access any admin-api
superUserRoles:
+# Set of role names that are treated as "proxy" roles. These are the roles that can supply the originalPrincipal.
+proxyRoles:
#### tls configuration for worker service
# Enable TLS
@@ -405,6 +407,12 @@ validateConnectorConfig: false
# If it is set to true, you must ensure that it has been initialized by "bin/pulsar initialize-cluster-metadata" command.
initializedDlogMetadata: false
+# Whether to ignore unknown properties when deserializing the connector configuration.
+# After upgrading a connector to a new version with a new configuration, the new configuration may not be compatible with the old connector.
+# In case of rollback, it's required to also rollback the connector configuration.
+# Ignoring unknown fields makes possible to keep the new configuration and only rollback the connector.
+ignoreUnknownConfigFields: false
+
###########################
# Arbitrary Configuration
###########################
diff --git a/conf/proxy.conf b/conf/proxy.conf
index a5110ae57471a..cfc1e47b7c445 100644
--- a/conf/proxy.conf
+++ b/conf/proxy.conf
@@ -58,6 +58,7 @@ bindAddress=0.0.0.0
advertisedAddress=
# Enable or disable the HAProxy protocol.
+# If true, the real IP addresses of consumers and producers can be obtained when getting topic statistics data.
haProxyProtocolEnabled=false
# Enables zero-copy transport of data across network interfaces using the splice system call.
@@ -277,6 +278,11 @@ maxHttpServerConnections=2048
# Max concurrent web requests
maxConcurrentHttpRequests=1024
+# The maximum size in bytes of the request header. Larger headers will allow for more and/or larger cookies plus larger
+# form content encoded in a URL.However, larger headers consume more memory and can make a server more vulnerable to
+# denial of service attacks.
+httpMaxRequestHeaderSize = 8192
+
## Configure the datasource of basic authenticate, supports the file and Base64 format.
# file:
# basicAuthConf=/path/my/.htpasswd
diff --git a/conf/pulsar_tools_env.sh b/conf/pulsar_tools_env.sh
index a356dbb9a28df..9d22b73905df3 100755
--- a/conf/pulsar_tools_env.sh
+++ b/conf/pulsar_tools_env.sh
@@ -42,6 +42,19 @@
# PULSAR_GLOBAL_ZK_CONF=
# Extra options to be passed to the jvm
+# Discard parameter "-Xms" of $PULSAR_MEM, which tends to be the Broker's minimum memory, to avoid using too much
+# memory by tools.
+if [ -n "$PULSAR_MEM" ]; then
+ PULSAR_MEM_ARR=("${PULSAR_MEM}")
+ PULSAR_MEM_REWRITE=""
+ for i in ${PULSAR_MEM_ARR}
+ do
+ if [ "${i:0:4}" != "-Xms" ]; then
+ PULSAR_MEM_REWRITE="$PULSAR_MEM_REWRITE $i";
+ fi
+ done
+ PULSAR_MEM=${PULSAR_MEM_REWRITE}
+fi
PULSAR_MEM=${PULSAR_MEM:-"-Xmx128m -XX:MaxDirectMemorySize=128m"}
# Garbage collection options
diff --git a/conf/standalone.conf b/conf/standalone.conf
index f141946c29f4e..46e6aed76e42a 100644
--- a/conf/standalone.conf
+++ b/conf/standalone.conf
@@ -48,6 +48,7 @@ bindAddresses=
advertisedAddress=
# Enable or disable the HAProxy protocol.
+# If true, the real IP addresses of consumers and producers can be obtained when getting topic statistics data.
haProxyProtocolEnabled=false
# Number of threads to use for Netty IO. Default is set to 2 * Runtime.getRuntime().availableProcessors()
@@ -696,8 +697,8 @@ bookkeeperUseV2WireProtocol=true
# bookkeeperClientExposeStatsToPrometheus=false
# If bookkeeperClientExposeStatsToPrometheus is set to true, we can set bookkeeperClientLimitStatsLogging=true
-# to limit per_channel_bookie_client metrics. default is false
-# bookkeeperClientLimitStatsLogging=false
+# to limit per_channel_bookie_client metrics. default is true
+# bookkeeperClientLimitStatsLogging=true
### --- Managed Ledger --- ###
@@ -1236,13 +1237,11 @@ delayedDeliveryTrackerFactoryClassName=org.apache.pulsar.broker.delayed.InMemory
# Control the tick time for when retrying on delayed delivery,
# affecting the accuracy of the delivery time compared to the scheduled time.
-# Note that this time is used to configure the HashedWheelTimer's tick time for the
-# InMemoryDelayedDeliveryTrackerFactory (the default DelayedDeliverTrackerFactory).
+# Note that this time is used to configure the HashedWheelTimer's tick time.
# Default is 1 second.
delayedDeliveryTickTimeMillis=1000
-# When using the InMemoryDelayedDeliveryTrackerFactory (the default DelayedDeliverTrackerFactory), whether
-# the deliverAt time is strictly followed. When false (default), messages may be sent to consumers before the deliverAt
+# Whether the deliverAt time is strictly followed. When false (default), messages may be sent to consumers before the deliverAt
# time by as much as the tickTimeMillis. This can reduce the overhead on the broker of maintaining the delayed index
# for a potentially very short time period. When true, messages will not be sent to consumer until the deliverAt time
# has passed, and they may be as late as the deliverAt time plus the tickTimeMillis for the topic plus the
@@ -1264,4 +1263,5 @@ delayedDeliveryMaxIndexesPerBucketSnapshotSegment=5000
# The max number of delayed message index bucket,
# after reaching the max buckets limitation, the adjacent buckets will be merged.
-delayedDeliveryMaxNumBuckets=50
+# (disable with value -1)
+delayedDeliveryMaxNumBuckets=-1
diff --git a/deployment/terraform-ansible/templates/broker.conf b/deployment/terraform-ansible/templates/broker.conf
index f42d4c807d5d9..37e512fb35cc6 100644
--- a/deployment/terraform-ansible/templates/broker.conf
+++ b/deployment/terraform-ansible/templates/broker.conf
@@ -745,8 +745,8 @@ bookkeeperExplicitLacIntervalInMills=0
# bookkeeperClientExposeStatsToPrometheus=false
# If bookkeeperClientExposeStatsToPrometheus is set to true, we can set bookkeeperClientLimitStatsLogging=true
-# to limit per_channel_bookie_client metrics. default is false
-# bookkeeperClientLimitStatsLogging=false
+# to limit per_channel_bookie_client metrics. default is true
+# bookkeeperClientLimitStatsLogging=true
### --- Managed Ledger --- ###
diff --git a/distribution/io/pom.xml b/distribution/io/pom.xml
index 99105bef950d5..568d76922bf4e 100644
--- a/distribution/io/pom.xml
+++ b/distribution/io/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsardistribution
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/distribution/offloaders/pom.xml b/distribution/offloaders/pom.xml
index 1e86758ed5a6a..d23ebec2ef26d 100644
--- a/distribution/offloaders/pom.xml
+++ b/distribution/offloaders/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsardistribution
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/distribution/pom.xml b/distribution/pom.xml
index 9782f269284bf..36a3fa1c5835a 100644
--- a/distribution/pom.xml
+++ b/distribution/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarpulsar
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/distribution/server/pom.xml b/distribution/server/pom.xml
index 2043da516cf40..f804c9c54b9cd 100644
--- a/distribution/server/pom.xml
+++ b/distribution/server/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsardistribution
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
@@ -46,6 +46,12 @@
${project.version}
+
+ ${project.groupId}
+ pulsar-broker-auth-oidc
+ ${project.version}
+
+
${project.groupId}pulsar-broker-auth-sasl
diff --git a/distribution/server/src/assemble/LICENSE.bin.txt b/distribution/server/src/assemble/LICENSE.bin.txt
index 6b3455127b423..487e4e96b6a66 100644
--- a/distribution/server/src/assemble/LICENSE.bin.txt
+++ b/distribution/server/src/assemble/LICENSE.bin.txt
@@ -246,21 +246,21 @@ The Apache Software License, Version 2.0
* JCommander -- com.beust-jcommander-1.82.jar
* High Performance Primitive Collections for Java -- com.carrotsearch-hppc-0.9.1.jar
* Jackson
- - com.fasterxml.jackson.core-jackson-annotations-2.13.4.jar
- - com.fasterxml.jackson.core-jackson-core-2.13.4.jar
- - com.fasterxml.jackson.core-jackson-databind-2.13.4.2.jar
- - com.fasterxml.jackson.dataformat-jackson-dataformat-yaml-2.13.4.jar
- - com.fasterxml.jackson.jaxrs-jackson-jaxrs-base-2.13.4.jar
- - com.fasterxml.jackson.jaxrs-jackson-jaxrs-json-provider-2.13.4.jar
- - com.fasterxml.jackson.module-jackson-module-jaxb-annotations-2.13.4.jar
- - com.fasterxml.jackson.module-jackson-module-jsonSchema-2.13.4.jar
- - com.fasterxml.jackson.datatype-jackson-datatype-jdk8-2.13.4.jar
- - com.fasterxml.jackson.datatype-jackson-datatype-jsr310-2.13.4.jar
- - com.fasterxml.jackson.module-jackson-module-parameter-names-2.13.4.jar
+ - com.fasterxml.jackson.core-jackson-annotations-2.14.2.jar
+ - com.fasterxml.jackson.core-jackson-core-2.14.2.jar
+ - com.fasterxml.jackson.core-jackson-databind-2.14.2.jar
+ - com.fasterxml.jackson.dataformat-jackson-dataformat-yaml-2.14.2.jar
+ - com.fasterxml.jackson.jaxrs-jackson-jaxrs-base-2.14.2.jar
+ - com.fasterxml.jackson.jaxrs-jackson-jaxrs-json-provider-2.14.2.jar
+ - com.fasterxml.jackson.module-jackson-module-jaxb-annotations-2.14.2.jar
+ - com.fasterxml.jackson.module-jackson-module-jsonSchema-2.14.2.jar
+ - com.fasterxml.jackson.datatype-jackson-datatype-jdk8-2.14.2.jar
+ - com.fasterxml.jackson.datatype-jackson-datatype-jsr310-2.14.2.jar
+ - com.fasterxml.jackson.module-jackson-module-parameter-names-2.14.2.jar
* Caffeine -- com.github.ben-manes.caffeine-caffeine-2.9.1.jar
* Conscrypt -- org.conscrypt-conscrypt-openjdk-uber-2.5.2.jar
* Proto Google Common Protos -- com.google.api.grpc-proto-google-common-protos-2.0.1.jar
- * Bitbucket -- org.bitbucket.b_c-jose4j-0.7.6.jar
+ * Bitbucket -- org.bitbucket.b_c-jose4j-0.9.3.jar
* Gson
- com.google.code.gson-gson-2.8.9.jar
- io.gsonfire-gson-fire-1.8.5.jar
@@ -289,37 +289,37 @@ The Apache Software License, Version 2.0
- org.apache.commons-commons-lang3-3.11.jar
- org.apache.commons-commons-text-1.10.0.jar
* Netty
- - io.netty-netty-buffer-4.1.89.Final.jar
- - io.netty-netty-codec-4.1.89.Final.jar
- - io.netty-netty-codec-dns-4.1.89.Final.jar
- - io.netty-netty-codec-http-4.1.89.Final.jar
- - io.netty-netty-codec-http2-4.1.89.Final.jar
- - io.netty-netty-codec-socks-4.1.89.Final.jar
- - io.netty-netty-codec-haproxy-4.1.89.Final.jar
- - io.netty-netty-common-4.1.89.Final.jar
- - io.netty-netty-handler-4.1.89.Final.jar
- - io.netty-netty-handler-proxy-4.1.89.Final.jar
- - io.netty-netty-resolver-4.1.89.Final.jar
- - io.netty-netty-resolver-dns-4.1.89.Final.jar
- - io.netty-netty-resolver-dns-classes-macos-4.1.89.Final.jar
- - io.netty-netty-resolver-dns-native-macos-4.1.89.Final-osx-aarch_64.jar
- - io.netty-netty-resolver-dns-native-macos-4.1.89.Final-osx-x86_64.jar
- - io.netty-netty-transport-4.1.89.Final.jar
- - io.netty-netty-transport-classes-epoll-4.1.89.Final.jar
- - io.netty-netty-transport-native-epoll-4.1.89.Final-linux-x86_64.jar
- - io.netty-netty-transport-native-epoll-4.1.89.Final.jar
- - io.netty-netty-transport-native-unix-common-4.1.89.Final.jar
- - io.netty-netty-transport-native-unix-common-4.1.89.Final-linux-x86_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.56.Final.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-linux-aarch_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-linux-x86_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-osx-aarch_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-osx-x86_64.jar
- - io.netty-netty-tcnative-boringssl-static-2.0.56.Final-windows-x86_64.jar
- - io.netty-netty-tcnative-classes-2.0.56.Final.jar
- - io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.18.Final.jar
- - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.18.Final-linux-x86_64.jar
- - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.18.Final-linux-aarch_64.jar
+ - io.netty-netty-buffer-4.1.93.Final.jar
+ - io.netty-netty-codec-4.1.93.Final.jar
+ - io.netty-netty-codec-dns-4.1.93.Final.jar
+ - io.netty-netty-codec-http-4.1.93.Final.jar
+ - io.netty-netty-codec-http2-4.1.93.Final.jar
+ - io.netty-netty-codec-socks-4.1.93.Final.jar
+ - io.netty-netty-codec-haproxy-4.1.93.Final.jar
+ - io.netty-netty-common-4.1.93.Final.jar
+ - io.netty-netty-handler-4.1.93.Final.jar
+ - io.netty-netty-handler-proxy-4.1.93.Final.jar
+ - io.netty-netty-resolver-4.1.93.Final.jar
+ - io.netty-netty-resolver-dns-4.1.93.Final.jar
+ - io.netty-netty-resolver-dns-classes-macos-4.1.93.Final.jar
+ - io.netty-netty-resolver-dns-native-macos-4.1.93.Final-osx-aarch_64.jar
+ - io.netty-netty-resolver-dns-native-macos-4.1.93.Final-osx-x86_64.jar
+ - io.netty-netty-transport-4.1.93.Final.jar
+ - io.netty-netty-transport-classes-epoll-4.1.93.Final.jar
+ - io.netty-netty-transport-native-epoll-4.1.93.Final-linux-x86_64.jar
+ - io.netty-netty-transport-native-epoll-4.1.93.Final.jar
+ - io.netty-netty-transport-native-unix-common-4.1.93.Final.jar
+ - io.netty-netty-transport-native-unix-common-4.1.93.Final-linux-x86_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-aarch_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar
+ - io.netty-netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar
+ - io.netty-netty-tcnative-classes-2.0.61.Final.jar
+ - io.netty.incubator-netty-incubator-transport-classes-io_uring-0.0.21.Final.jar
+ - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar
+ - io.netty.incubator-netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar
* Prometheus client
- io.prometheus.jmx-collector-0.16.1.jar
- io.prometheus-simpleclient-0.16.0.jar
@@ -343,35 +343,37 @@ The Apache Software License, Version 2.0
- org.apache.logging.log4j-log4j-slf4j-impl-2.18.0.jar
- org.apache.logging.log4j-log4j-web-2.18.0.jar
* Java Native Access JNA
- - net.java.dev.jna-jna-5.12.1.jar
- net.java.dev.jna-jna-jpms-5.12.1.jar
- net.java.dev.jna-jna-platform-jpms-5.12.1.jar
* BookKeeper
- - org.apache.bookkeeper-bookkeeper-common-4.15.4.jar
- - org.apache.bookkeeper-bookkeeper-common-allocator-4.15.4.jar
- - org.apache.bookkeeper-bookkeeper-proto-4.15.4.jar
- - org.apache.bookkeeper-bookkeeper-server-4.15.4.jar
- - org.apache.bookkeeper-bookkeeper-tools-framework-4.15.4.jar
- - org.apache.bookkeeper-circe-checksum-4.15.4.jar
- - org.apache.bookkeeper-cpu-affinity-4.15.4.jar
- - org.apache.bookkeeper-statelib-4.15.4.jar
- - org.apache.bookkeeper-stream-storage-api-4.15.4.jar
- - org.apache.bookkeeper-stream-storage-common-4.15.4.jar
- - org.apache.bookkeeper-stream-storage-java-client-4.15.4.jar
- - org.apache.bookkeeper-stream-storage-java-client-base-4.15.4.jar
- - org.apache.bookkeeper-stream-storage-proto-4.15.4.jar
- - org.apache.bookkeeper-stream-storage-server-4.15.4.jar
- - org.apache.bookkeeper-stream-storage-service-api-4.15.4.jar
- - org.apache.bookkeeper-stream-storage-service-impl-4.15.4.jar
- - org.apache.bookkeeper.http-http-server-4.15.4.jar
- - org.apache.bookkeeper.http-vertx-http-server-4.15.4.jar
- - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.15.4.jar
- - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.15.4.jar
- - org.apache.distributedlog-distributedlog-common-4.15.4.jar
- - org.apache.distributedlog-distributedlog-core-4.15.4-tests.jar
- - org.apache.distributedlog-distributedlog-core-4.15.4.jar
- - org.apache.distributedlog-distributedlog-protocol-4.15.4.jar
- - org.apache.bookkeeper.stats-codahale-metrics-provider-4.15.4.jar
+ - org.apache.bookkeeper-bookkeeper-common-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-common-allocator-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-proto-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-server-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-tools-framework-4.16.1.jar
+ - org.apache.bookkeeper-circe-checksum-4.16.1.jar
+ - org.apache.bookkeeper-cpu-affinity-4.16.1.jar
+ - org.apache.bookkeeper-statelib-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-api-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-common-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-java-client-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-java-client-base-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-proto-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-server-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-service-api-4.16.1.jar
+ - org.apache.bookkeeper-stream-storage-service-impl-4.16.1.jar
+ - org.apache.bookkeeper.http-http-server-4.16.1.jar
+ - org.apache.bookkeeper.http-vertx-http-server-4.16.1.jar
+ - org.apache.bookkeeper.stats-bookkeeper-stats-api-4.16.1.jar
+ - org.apache.bookkeeper.stats-prometheus-metrics-provider-4.16.1.jar
+ - org.apache.distributedlog-distributedlog-common-4.16.1.jar
+ - org.apache.distributedlog-distributedlog-core-4.16.1-tests.jar
+ - org.apache.distributedlog-distributedlog-core-4.16.1.jar
+ - org.apache.distributedlog-distributedlog-protocol-4.16.1.jar
+ - org.apache.bookkeeper.stats-codahale-metrics-provider-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-slogger-api-4.16.1.jar
+ - org.apache.bookkeeper-bookkeeper-slogger-slf4j-4.16.1.jar
+ - org.apache.bookkeeper-native-io-4.16.1.jar
* Apache HTTP Client
- org.apache.httpcomponents-httpclient-4.5.13.jar
- org.apache.httpcomponents-httpcore-4.4.15.jar
@@ -400,8 +402,8 @@ The Apache Software License, Version 2.0
- org.eclipse.jetty.websocket-websocket-servlet-9.4.48.v20220622.jar
- org.eclipse.jetty-jetty-alpn-conscrypt-server-9.4.48.v20220622.jar
- org.eclipse.jetty-jetty-alpn-server-9.4.48.v20220622.jar
- * SnakeYaml -- org.yaml-snakeyaml-1.32.jar
- * RocksDB - org.rocksdb-rocksdbjni-6.29.4.1.jar
+ * SnakeYaml -- org.yaml-snakeyaml-2.0.jar
+ * RocksDB - org.rocksdb-rocksdbjni-7.9.2.jar
* Google Error Prone Annotations - com.google.errorprone-error_prone_annotations-2.5.1.jar
* Apache Thrift - org.apache.thrift-libthrift-0.14.2.jar
* OkHttp3
@@ -410,10 +412,10 @@ The Apache Software License, Version 2.0
* Okio - com.squareup.okio-okio-2.8.0.jar
* Javassist -- org.javassist-javassist-3.25.0-GA.jar
* Kotlin Standard Lib
- - org.jetbrains.kotlin-kotlin-stdlib-1.4.32.jar
- - org.jetbrains.kotlin-kotlin-stdlib-common-1.4.32.jar
- - org.jetbrains.kotlin-kotlin-stdlib-jdk7-1.4.32.jar
- - org.jetbrains.kotlin-kotlin-stdlib-jdk8-1.4.32.jar
+ - org.jetbrains.kotlin-kotlin-stdlib-1.8.20.jar
+ - org.jetbrains.kotlin-kotlin-stdlib-common-1.8.20.jar
+ - org.jetbrains.kotlin-kotlin-stdlib-jdk7-1.8.20.jar
+ - org.jetbrains.kotlin-kotlin-stdlib-jdk8-1.8.20.jar
- org.jetbrains-annotations-13.0.jar
* gRPC
- io.grpc-grpc-all-1.45.1.jar
@@ -431,7 +433,6 @@ The Apache Software License, Version 2.0
- io.grpc-grpc-services-1.45.1.jar
- io.grpc-grpc-xds-1.45.1.jar
- io.grpc-grpc-rls-1.45.1.jar
- - com.google.auto.service-auto-service-annotations-1.0.jar
* Perfmark
- io.perfmark-perfmark-api-0.19.0.jar
* OpenCensus
@@ -451,9 +452,9 @@ The Apache Software License, Version 2.0
* Apache Yetus
- org.apache.yetus-audience-annotations-0.12.0.jar
* Kubernetes Client
- - io.kubernetes-client-java-12.0.1.jar
- - io.kubernetes-client-java-api-12.0.1.jar
- - io.kubernetes-client-java-proto-12.0.1.jar
+ - io.kubernetes-client-java-18.0.0.jar
+ - io.kubernetes-client-java-api-18.0.0.jar
+ - io.kubernetes-client-java-proto-18.0.0.jar
* Dropwizard
- io.dropwizard.metrics-metrics-core-4.1.12.1.jar
- io.dropwizard.metrics-metrics-graphite-4.1.12.1.jar
@@ -468,11 +469,12 @@ The Apache Software License, Version 2.0
* JCTools - Java Concurrency Tools for the JVM
- org.jctools-jctools-core-2.1.2.jar
* Vertx
- - io.vertx-vertx-auth-common-3.9.8.jar
- - io.vertx-vertx-bridge-common-3.9.8.jar
- - io.vertx-vertx-core-3.9.8.jar
- - io.vertx-vertx-web-3.9.8.jar
- - io.vertx-vertx-web-common-3.9.8.jar
+ - io.vertx-vertx-auth-common-4.3.8.jar
+ - io.vertx-vertx-bridge-common-4.3.8.jar
+ - io.vertx-vertx-core-4.3.8.jar
+ - io.vertx-vertx-web-4.3.8.jar
+ - io.vertx-vertx-web-common-4.3.8.jar
+ - io.vertx-vertx-grpc-4.3.5.jar
* Apache ZooKeeper
- org.apache.zookeeper-zookeeper-3.8.1.jar
- org.apache.zookeeper-zookeeper-jute-3.8.1.jar
@@ -485,8 +487,10 @@ The Apache Software License, Version 2.0
- com.google.auto.value-auto-value-annotations-1.9.jar
- com.google.re2j-re2j-1.5.jar
* Jetcd
- - io.etcd-jetcd-common-0.5.11.jar
- - io.etcd-jetcd-core-0.5.11.jar
+ - io.etcd-jetcd-api-0.7.5.jar
+ - io.etcd-jetcd-common-0.7.5.jar
+ - io.etcd-jetcd-core-0.7.5.jar
+ - io.etcd-jetcd-grpc-0.7.5.jar
* IPAddress
- com.github.seancfoley-ipaddress-5.3.3.jar
* RxJava
@@ -494,7 +498,7 @@ The Apache Software License, Version 2.0
* RabbitMQ Java Client
- com.rabbitmq-amqp-client-5.5.3.jar
* RoaringBitmap
- - org.roaringbitmap-RoaringBitmap-0.9.15.jar
+ - org.roaringbitmap-RoaringBitmap-0.9.44.jar
BSD 3-clause "New" or "Revised" License
* Google auth library
@@ -517,6 +521,9 @@ MIT License
- org.checkerframework-checker-qual-3.12.0.jar
* oshi
- com.github.oshi-oshi-core-java11-6.4.0.jar
+ * Auth0, Inc.
+ - com.auth0-java-jwt-4.3.0.jar
+ - com.auth0-jwks-rsa-0.22.0.jar
Protocol Buffers License
* Protocol Buffers
- com.google.protobuf-protobuf-java-3.19.6.jar -- ../licenses/LICENSE-protobuf.txt
diff --git a/distribution/shell/pom.xml b/distribution/shell/pom.xml
index b38baee4257ba..9e3134a75e5bf 100644
--- a/distribution/shell/pom.xml
+++ b/distribution/shell/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsardistribution
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/distribution/shell/src/assemble/LICENSE.bin.txt b/distribution/shell/src/assemble/LICENSE.bin.txt
index 90896790b1fba..c04ac2b7d0363 100644
--- a/distribution/shell/src/assemble/LICENSE.bin.txt
+++ b/distribution/shell/src/assemble/LICENSE.bin.txt
@@ -311,17 +311,17 @@ This projects includes binary packages with the following licenses:
The Apache Software License, Version 2.0
* JCommander -- jcommander-1.82.jar
* Jackson
- - jackson-annotations-2.13.4.jar
- - jackson-core-2.13.4.jar
- - jackson-databind-2.13.4.2.jar
- - jackson-dataformat-yaml-2.13.4.jar
- - jackson-jaxrs-base-2.13.4.jar
- - jackson-jaxrs-json-provider-2.13.4.jar
- - jackson-module-jaxb-annotations-2.13.4.jar
- - jackson-module-jsonSchema-2.13.4.jar
- - jackson-datatype-jdk8-2.13.4.jar
- - jackson-datatype-jsr310-2.13.4.jar
- - jackson-module-parameter-names-2.13.4.jar
+ - jackson-annotations-2.14.2.jar
+ - jackson-core-2.14.2.jar
+ - jackson-databind-2.14.2.jar
+ - jackson-dataformat-yaml-2.14.2.jar
+ - jackson-jaxrs-base-2.14.2.jar
+ - jackson-jaxrs-json-provider-2.14.2.jar
+ - jackson-module-jaxb-annotations-2.14.2.jar
+ - jackson-module-jsonSchema-2.14.2.jar
+ - jackson-datatype-jdk8-2.14.2.jar
+ - jackson-datatype-jsr310-2.14.2.jar
+ - jackson-module-parameter-names-2.14.2.jar
* Conscrypt -- conscrypt-openjdk-uber-2.5.2.jar
* Gson
- gson-2.8.9.jar
@@ -348,35 +348,35 @@ The Apache Software License, Version 2.0
- commons-text-1.10.0.jar
- commons-compress-1.21.jar
* Netty
- - netty-buffer-4.1.89.Final.jar
- - netty-codec-4.1.89.Final.jar
- - netty-codec-dns-4.1.89.Final.jar
- - netty-codec-http-4.1.89.Final.jar
- - netty-codec-socks-4.1.89.Final.jar
- - netty-codec-haproxy-4.1.89.Final.jar
- - netty-common-4.1.89.Final.jar
- - netty-handler-4.1.89.Final.jar
- - netty-handler-proxy-4.1.89.Final.jar
- - netty-resolver-4.1.89.Final.jar
- - netty-resolver-dns-4.1.89.Final.jar
- - netty-transport-4.1.89.Final.jar
- - netty-transport-classes-epoll-4.1.89.Final.jar
- - netty-transport-native-epoll-4.1.89.Final-linux-x86_64.jar
- - netty-transport-native-unix-common-4.1.89.Final.jar
- - netty-transport-native-unix-common-4.1.89.Final-linux-x86_64.jar
- - netty-tcnative-boringssl-static-2.0.56.Final.jar
- - netty-tcnative-boringssl-static-2.0.56.Final-linux-aarch_64.jar
- - netty-tcnative-boringssl-static-2.0.56.Final-linux-x86_64.jar
- - netty-tcnative-boringssl-static-2.0.56.Final-osx-aarch_64.jar
- - netty-tcnative-boringssl-static-2.0.56.Final-osx-x86_64.jar
- - netty-tcnative-boringssl-static-2.0.56.Final-windows-x86_64.jar
- - netty-tcnative-classes-2.0.56.Final.jar
- - netty-incubator-transport-classes-io_uring-0.0.18.Final.jar
- - netty-incubator-transport-native-io_uring-0.0.18.Final-linux-aarch_64.jar
- - netty-incubator-transport-native-io_uring-0.0.18.Final-linux-x86_64.jar
- - netty-resolver-dns-classes-macos-4.1.89.Final.jar
- - netty-resolver-dns-native-macos-4.1.89.Final-osx-aarch_64.jar
- - netty-resolver-dns-native-macos-4.1.89.Final-osx-x86_64.jar
+ - netty-buffer-4.1.93.Final.jar
+ - netty-codec-4.1.93.Final.jar
+ - netty-codec-dns-4.1.93.Final.jar
+ - netty-codec-http-4.1.93.Final.jar
+ - netty-codec-socks-4.1.93.Final.jar
+ - netty-codec-haproxy-4.1.93.Final.jar
+ - netty-common-4.1.93.Final.jar
+ - netty-handler-4.1.93.Final.jar
+ - netty-handler-proxy-4.1.93.Final.jar
+ - netty-resolver-4.1.93.Final.jar
+ - netty-resolver-dns-4.1.93.Final.jar
+ - netty-transport-4.1.93.Final.jar
+ - netty-transport-classes-epoll-4.1.93.Final.jar
+ - netty-transport-native-epoll-4.1.93.Final-linux-x86_64.jar
+ - netty-transport-native-unix-common-4.1.93.Final.jar
+ - netty-transport-native-unix-common-4.1.93.Final-linux-x86_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-linux-aarch_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-linux-x86_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-osx-aarch_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-osx-x86_64.jar
+ - netty-tcnative-boringssl-static-2.0.61.Final-windows-x86_64.jar
+ - netty-tcnative-classes-2.0.61.Final.jar
+ - netty-incubator-transport-classes-io_uring-0.0.21.Final.jar
+ - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-aarch_64.jar
+ - netty-incubator-transport-native-io_uring-0.0.21.Final-linux-x86_64.jar
+ - netty-resolver-dns-classes-macos-4.1.93.Final.jar
+ - netty-resolver-dns-native-macos-4.1.93.Final-osx-aarch_64.jar
+ - netty-resolver-dns-native-macos-4.1.93.Final-osx-x86_64.jar
* Prometheus client
- simpleclient-0.16.0.jar
- simpleclient_log4j2-0.16.0.jar
@@ -390,9 +390,9 @@ The Apache Software License, Version 2.0
- log4j-web-2.18.0.jar
* BookKeeper
- - bookkeeper-common-allocator-4.15.4.jar
- - cpu-affinity-4.15.4.jar
- - circe-checksum-4.15.4.jar
+ - bookkeeper-common-allocator-4.16.1.jar
+ - cpu-affinity-4.16.1.jar
+ - circe-checksum-4.16.1.jar
* AirCompressor
- aircompressor-0.20.jar
* AsyncHttpClient
@@ -407,7 +407,7 @@ The Apache Software License, Version 2.0
- websocket-api-9.4.48.v20220622.jar
- websocket-client-9.4.48.v20220622.jar
- websocket-common-9.4.48.v20220622.jar
- * SnakeYaml -- snakeyaml-1.32.jar
+ * SnakeYaml -- snakeyaml-2.0.jar
* Google Error Prone Annotations - error_prone_annotations-2.5.1.jar
* Javassist -- javassist-3.25.0-GA.jar
* Apache Avro
diff --git a/docker/pom.xml b/docker/pom.xml
index 4d3b05fe33a76..882240925ef24 100644
--- a/docker/pom.xml
+++ b/docker/pom.xml
@@ -26,7 +26,7 @@
org.apache.pulsarpulsar
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOTdocker-imagesApache Pulsar :: Docker Images
diff --git a/docker/publish.sh b/docker/publish.sh
index af0d72d4b3437..45b338d85f8ef 100755
--- a/docker/publish.sh
+++ b/docker/publish.sh
@@ -62,11 +62,11 @@ set -x
# Fail if any of the subsequent commands fail
set -e
-docker tag pulsar:latest ${docker_registry_org}/pulsar:latest
-docker tag pulsar-all:latest ${docker_registry_org}/pulsar-all:latest
+docker tag apachepulsar/pulsar:latest ${docker_registry_org}/pulsar:latest
+docker tag apachepulsar/pulsar-all:latest ${docker_registry_org}/pulsar-all:latest
-docker tag pulsar:latest ${docker_registry_org}/pulsar:$MVN_VERSION
-docker tag pulsar-all:latest ${docker_registry_org}/pulsar-all:$MVN_VERSION
+docker tag apachepulsar/pulsar:latest ${docker_registry_org}/pulsar:$MVN_VERSION
+docker tag apachepulsar/pulsar-all:latest ${docker_registry_org}/pulsar-all:$MVN_VERSION
# Push all images and tags
docker push ${docker_registry_org}/pulsar:latest
diff --git a/docker/pulsar-all/pom.xml b/docker/pulsar-all/pom.xml
index c63ff6d656957..7a2f492632135 100644
--- a/docker/pulsar-all/pom.xml
+++ b/docker/pulsar-all/pom.xml
@@ -23,7 +23,7 @@
org.apache.pulsardocker-images
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT4.0.0pulsar-all-docker-image
@@ -68,10 +68,6 @@
docker
-
- target/apache-pulsar-io-connectors-${project.version}-bin
- target/pulsar-offloader-distribution-${project.version}-bin.tar.gz
-
@@ -143,17 +139,25 @@
- pulsar-all
+ ${docker.organization}/pulsar-all${project.basedir}latest
+ ${project.version}
+
+ target/apache-pulsar-io-connectors-${project.version}-bin
+ target/pulsar-offloader-distribution-${project.version}-bin.tar.gz
+
+
+
+ ${docker.platforms}
+
+
- latest
- ${docker.organization}
@@ -161,5 +165,29 @@
+
+
+ docker-push
+
+
+
+ io.fabric8
+ docker-maven-plugin
+
+
+ default
+ package
+
+ build
+ tag
+ push
+
+
+
+
+
+
+
+
diff --git a/docker/pulsar/pom.xml b/docker/pulsar/pom.xml
index 647f68bf1672c..e1c1503a3f381 100644
--- a/docker/pulsar/pom.xml
+++ b/docker/pulsar/pom.xml
@@ -23,7 +23,7 @@
org.apache.pulsardocker-images
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT4.0.0pulsar-docker-image
@@ -47,15 +47,14 @@
+
+ mirror://mirrors.ubuntu.com/mirrors.txt
+ http://security.ubuntu.com/ubuntu/
+
+
docker
-
- target/pulsar-server-distribution-${project.version}-bin.tar.gz
- ${pulsar.client.python.version}
- ${env.UBUNTU_MIRROR}
- ${env.UBUNTU_SECURITY_MIRROR}
-
@@ -72,17 +71,27 @@
- pulsar
+ ${docker.organization}/pulsar
+
+ target/pulsar-server-distribution-${project.version}-bin.tar.gz
+ ${pulsar.client.python.version}
+ ${UBUNTU_MIRROR}
+ ${UBUNTU_SECURITY_MIRROR}
+ ${project.basedir}latest
+ ${project.version}
+
+
+ ${docker.platforms}
+
+
- latest
- ${docker.organization}
@@ -108,5 +117,29 @@
+
+
+ docker-push
+
+
+
+ io.fabric8
+ docker-maven-plugin
+
+
+ default
+ package
+
+ build
+ tag
+ push
+
+
+
+
+
+
+
+
diff --git a/jclouds-shaded/pom.xml b/jclouds-shaded/pom.xml
index d4138ea041317..dfb155c2d5a7d 100644
--- a/jclouds-shaded/pom.xml
+++ b/jclouds-shaded/pom.xml
@@ -26,7 +26,7 @@
org.apache.pulsarpulsar
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/managed-ledger/pom.xml b/managed-ledger/pom.xml
index 2a7b9c576d318..a8cb560b7b376 100644
--- a/managed-ledger/pom.xml
+++ b/managed-ledger/pom.xml
@@ -25,7 +25,7 @@
org.apache.pulsarpulsar
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOT..
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderFactory.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderFactory.java
index 42f92359f9a94..7ecb8f08d573d 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderFactory.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderFactory.java
@@ -42,7 +42,7 @@ public interface LedgerOffloaderFactory {
boolean isDriverSupported(String driverName);
/**
- * Create a ledger offloader with the provided configuration, user-metadata and scheduler.
+ * Create a ledger offloader with the provided configuration, user-metadata, scheduler and offloaderStats.
*
* @param offloadPolicies offload policies
* @param userMetadata user metadata
@@ -50,12 +50,29 @@ public interface LedgerOffloaderFactory {
* @return the offloader instance
* @throws IOException when fail to create an offloader
*/
+ T create(OffloadPoliciesImpl offloadPolicies,
+ Map userMetadata,
+ OrderedScheduler scheduler)
+ throws IOException;
+
+
+ /**
+ * Create a ledger offloader with the provided configuration, user-metadata, scheduler and offloaderStats.
+ *
+ * @param offloadPolicies offload policies
+ * @param userMetadata user metadata
+ * @param scheduler scheduler
+ * @param offloaderStats offloaderStats
+ * @return the offloader instance
+ * @throws IOException when fail to create an offloader
+ */
T create(OffloadPoliciesImpl offloadPolicies,
Map userMetadata,
OrderedScheduler scheduler,
LedgerOffloaderStats offloaderStats)
throws IOException;
+
/**
* Create a ledger offloader with the provided configuration, user-metadata, schema storage and scheduler.
*
@@ -66,6 +83,26 @@ T create(OffloadPoliciesImpl offloadPolicies,
* @return the offloader instance
* @throws IOException when fail to create an offloader
*/
+ default T create(OffloadPoliciesImpl offloadPolicies,
+ Map userMetadata,
+ SchemaStorage schemaStorage,
+ OrderedScheduler scheduler)
+ throws IOException {
+ return create(offloadPolicies, userMetadata, scheduler);
+ }
+
+ /**
+ * Create a ledger offloader with the provided configuration, user-metadata, schema storage,
+ * scheduler and offloaderStats.
+ *
+ * @param offloadPolicies offload policies
+ * @param userMetadata user metadata
+ * @param schemaStorage used for schema lookup in offloader
+ * @param scheduler scheduler
+ * @param offloaderStats offloaderStats
+ * @return the offloader instance
+ * @throws IOException when fail to create an offloader
+ */
default T create(OffloadPoliciesImpl offloadPolicies,
Map userMetadata,
SchemaStorage schemaStorage,
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderStatsDisable.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderStatsDisable.java
index 0fe0f453347bf..eeac9cfcfa994 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderStatsDisable.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/LedgerOffloaderStatsDisable.java
@@ -20,9 +20,9 @@
import java.util.concurrent.TimeUnit;
-class LedgerOffloaderStatsDisable implements LedgerOffloaderStats {
+public class LedgerOffloaderStatsDisable implements LedgerOffloaderStats {
- static final LedgerOffloaderStats INSTANCE = new LedgerOffloaderStatsDisable();
+ public static final LedgerOffloaderStats INSTANCE = new LedgerOffloaderStatsDisable();
private LedgerOffloaderStatsDisable() {
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java
index 7802ed07781ba..edbfa0b43204e 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedCursor.java
@@ -786,6 +786,12 @@ Set extends Position> asyncReplayEntries(
*/
long getEstimatedSizeSinceMarkDeletePosition();
+ /**
+ * If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is
+ * used to delete information about this ledger in the ManagedCursor.
+ */
+ default void skipNonRecoverableLedger(long ledgerId){}
+
/**
* Returns cursor throttle mark-delete rate.
*
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java
index 4ca56508891a1..c7dd8ea9129b7 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedger.java
@@ -631,6 +631,12 @@ void asyncSetProperties(Map properties, AsyncCallbacks.UpdatePro
*/
void trimConsumedLedgersInBackground(CompletableFuture> promise);
+ /**
+ * If a ledger is lost, this ledger will be skipped after enabled "autoSkipNonRecoverableData", and the method is
+ * used to delete information about this ledger in the ManagedCursor.
+ */
+ default void skipNonRecoverableLedger(long ledgerId){}
+
/**
* Roll current ledger if it is full.
*/
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java
index 6e88a8e650d58..0c93a5b642cf6 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerConfig.java
@@ -62,7 +62,7 @@ public class ManagedLedgerConfig {
private int ledgerRolloverTimeout = 4 * 3600;
private double throttleMarkDelete = 0;
private long retentionTimeMs = 0;
- private int retentionSizeInMB = 0;
+ private long retentionSizeInMB = 0;
private boolean autoSkipNonRecoverableData;
private boolean lazyCursorRecovery = false;
private long metadataOperationsTimeoutSeconds = 60;
@@ -396,7 +396,7 @@ public ManagedLedgerConfig setThrottleMarkDelete(double throttleMarkDelete) {
/**
* Set the retention time for the ManagedLedger.
*
- * Retention time and retention size ({@link #setRetentionSizeInMB(int)}) are together used to retain the
+ * Retention time and retention size ({@link #setRetentionSizeInMB(long)}) are together used to retain the
* ledger data when there are no cursors or when all the cursors have marked the data for deletion.
* Data will be deleted in this case when both retention time and retention size settings don't prevent deleting
* the data marked for deletion.
@@ -438,7 +438,7 @@ public long getRetentionTimeMillis() {
* @param retentionSizeInMB
* quota for message retention
*/
- public ManagedLedgerConfig setRetentionSizeInMB(int retentionSizeInMB) {
+ public ManagedLedgerConfig setRetentionSizeInMB(long retentionSizeInMB) {
this.retentionSizeInMB = retentionSizeInMB;
return this;
}
@@ -447,7 +447,7 @@ public ManagedLedgerConfig setRetentionSizeInMB(int retentionSizeInMB) {
* @return quota for message retention
*
*/
- public int getRetentionSizeInMB() {
+ public long getRetentionSizeInMB() {
return retentionSizeInMB;
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java
index 5aa4e8374d73a..8a4b4d4013f8f 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerFactoryConfig.java
@@ -92,8 +92,30 @@ public class ManagedLedgerFactoryConfig {
*/
private String managedLedgerInfoCompressionType = MLDataFormats.CompressionType.NONE.name();
+ /**
+ * ManagedLedgerInfo compression threshold. If the origin metadata size below configuration.
+ * compression will not apply.
+ */
+ private long managedLedgerInfoCompressionThresholdInBytes = 0;
+
/**
* ManagedCursorInfo compression type. If the compression type is null or invalid, don't compress data.
*/
private String managedCursorInfoCompressionType = MLDataFormats.CompressionType.NONE.name();
+
+ /**
+ * ManagedCursorInfo compression threshold. If the origin metadata size below configuration.
+ * compression will not apply.
+ */
+ private long managedCursorInfoCompressionThresholdInBytes = 0;
+
+ public MetadataCompressionConfig getCompressionConfigForManagedLedgerInfo() {
+ return new MetadataCompressionConfig(managedLedgerInfoCompressionType,
+ managedLedgerInfoCompressionThresholdInBytes);
+ }
+
+ public MetadataCompressionConfig getCompressionConfigForManagedCursorInfo() {
+ return new MetadataCompressionConfig(managedCursorInfoCompressionType,
+ managedCursorInfoCompressionThresholdInBytes);
+ }
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerMXBean.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerMXBean.java
index 94c2f61e00afe..50a3ffb157961 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerMXBean.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/ManagedLedgerMXBean.java
@@ -100,6 +100,11 @@ public interface ManagedLedgerMXBean {
*/
long getReadEntriesErrors();
+ /**
+ * @return the number of readEntries requests that cache miss Rate
+ */
+ double getReadEntriesOpsCacheMissesRate();
+
// Entry size statistics
double getEntrySizeAverage();
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/MetadataCompressionConfig.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/MetadataCompressionConfig.java
new file mode 100644
index 0000000000000..601c270ab7680
--- /dev/null
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/MetadataCompressionConfig.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.bookkeeper.mledger;
+
+import lombok.AllArgsConstructor;
+import lombok.Data;
+import lombok.ToString;
+import org.apache.bookkeeper.mledger.proto.MLDataFormats;
+import org.apache.commons.lang.StringUtils;
+
+@Data
+@AllArgsConstructor
+@ToString
+public class MetadataCompressionConfig {
+ MLDataFormats.CompressionType compressionType;
+ long compressSizeThresholdInBytes;
+
+ public MetadataCompressionConfig(String compressionType) throws IllegalArgumentException {
+ this(compressionType, 0);
+ }
+
+ public MetadataCompressionConfig(String compressionType, long compressThreshold) throws IllegalArgumentException {
+ this.compressionType = parseCompressionType(compressionType);
+ this.compressSizeThresholdInBytes = compressThreshold;
+ }
+
+ public static MetadataCompressionConfig noCompression =
+ new MetadataCompressionConfig(MLDataFormats.CompressionType.NONE, 0);
+
+ private MLDataFormats.CompressionType parseCompressionType(String value) throws IllegalArgumentException {
+ if (StringUtils.isEmpty(value)) {
+ return MLDataFormats.CompressionType.NONE;
+ }
+
+ MLDataFormats.CompressionType compressionType;
+ compressionType = MLDataFormats.CompressionType.valueOf(value);
+
+ return compressionType;
+ }
+}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java
index 5851395b08566..1ce0403a54762 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedCursorImpl.java
@@ -25,7 +25,6 @@
import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.DEFAULT_LEDGER_DELETE_RETRIES;
import static org.apache.bookkeeper.mledger.impl.ManagedLedgerImpl.createManagedLedgerException;
import static org.apache.bookkeeper.mledger.util.Errors.isNoSuchLedgerExistsException;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.MoreObjects;
import com.google.common.collect.Collections2;
@@ -363,8 +362,7 @@ private CompletableFuture computeCursorProperties(
name, copy, lastCursorLedgerStat, new MetaStoreCallback<>() {
@Override
public void operationComplete(Void result, Stat stat) {
- log.info("[{}] Updated ledger cursor: {} properties {}", ledger.getName(),
- name, cursorProperties);
+ log.info("[{}] Updated ledger cursor: {}", ledger.getName(), name);
ManagedCursorImpl.this.cursorProperties = Collections.unmodifiableMap(newProperties);
updateCursorLedgerStat(copy, stat);
updateCursorPropertiesResult.complete(result);
@@ -373,7 +371,7 @@ public void operationComplete(Void result, Stat stat) {
@Override
public void operationFailed(MetaStoreException e) {
log.error("[{}] Error while updating ledger cursor: {} properties {}", ledger.getName(),
- name, cursorProperties, e);
+ name, newProperties, e);
updateCursorPropertiesResult.completeExceptionally(e);
}
});
@@ -1359,7 +1357,7 @@ public void asyncResetCursor(Position newPos, boolean forceReset, AsyncCallbacks
final PositionImpl newPosition = (PositionImpl) newPos;
// order trim and reset operations on a ledger
- ledger.getExecutor().execute(safeRun(() -> {
+ ledger.getExecutor().execute(() -> {
PositionImpl actualPosition = newPosition;
if (!ledger.isValidPosition(actualPosition)
@@ -1376,7 +1374,7 @@ public void asyncResetCursor(Position newPos, boolean forceReset, AsyncCallbacks
}
internalResetCursor(actualPosition, callback);
- }));
+ });
}
@Override
@@ -1781,7 +1779,6 @@ long getNumIndividualDeletedEntriesToSkip(long numEntries) {
} finally {
if (r.lowerEndpoint() instanceof PositionImplRecyclable) {
((PositionImplRecyclable) r.lowerEndpoint()).recycle();
- ((PositionImplRecyclable) r.upperEndpoint()).recycle();
}
}
}, recyclePositionRangeConverter);
@@ -2056,7 +2053,7 @@ void internalMarkDelete(final MarkDeleteEntry mdEntry) {
+ "is later.", mdEntry.newPosition, persistentMarkDeletePosition);
}
// run with executor to prevent deadlock
- ledger.getExecutor().execute(safeRun(() -> mdEntry.triggerComplete()));
+ ledger.getExecutor().execute(() -> mdEntry.triggerComplete());
return;
}
@@ -2075,7 +2072,7 @@ void internalMarkDelete(final MarkDeleteEntry mdEntry) {
+ "in progress {} is later.", mdEntry.newPosition, inProgressLatest);
}
// run with executor to prevent deadlock
- ledger.getExecutor().execute(safeRun(() -> mdEntry.triggerComplete()));
+ ledger.getExecutor().execute(() -> mdEntry.triggerComplete());
return;
}
@@ -2612,8 +2609,8 @@ private boolean shouldPersistUnackRangesToLedger() {
private void persistPositionMetaStore(long cursorsLedgerId, PositionImpl position, Map properties,
MetaStoreCallback callback, boolean persistIndividualDeletedMessageRanges) {
if (state == State.Closed) {
- ledger.getExecutor().execute(safeRun(() -> callback.operationFailed(new MetaStoreException(
- new CursorAlreadyClosedException(name + " cursor already closed")))));
+ ledger.getExecutor().execute(() -> callback.operationFailed(new MetaStoreException(
+ new CursorAlreadyClosedException(name + " cursor already closed"))));
return;
}
@@ -2720,6 +2717,46 @@ void setReadPosition(Position newReadPositionInt) {
}
}
+ /**
+ * Manually acknowledge all entries in the lost ledger.
+ * - Since this is an uncommon event, we focus on maintainability. So we do not modify
+ * {@link #individualDeletedMessages} and {@link #batchDeletedIndexes}, but call
+ * {@link #asyncDelete(Position, AsyncCallbacks.DeleteCallback, Object)}.
+ * - This method is valid regardless of the consumer ACK type.
+ * - If there is a consumer ack request after this event, it will also work.
+ */
+ @Override
+ public void skipNonRecoverableLedger(final long ledgerId){
+ LedgerInfo ledgerInfo = ledger.getLedgersInfo().get(ledgerId);
+ if (ledgerInfo == null) {
+ return;
+ }
+ lock.writeLock().lock();
+ log.warn("[{}] [{}] Since the ledger [{}] is lost and the autoSkipNonRecoverableData is true, this ledger will"
+ + " be auto acknowledge in subscription", ledger.getName(), name, ledgerId);
+ try {
+ for (int i = 0; i < ledgerInfo.getEntries(); i++) {
+ if (!individualDeletedMessages.contains(ledgerId, i)) {
+ asyncDelete(PositionImpl.get(ledgerId, i), new AsyncCallbacks.DeleteCallback() {
+ @Override
+ public void deleteComplete(Object ctx) {
+ // ignore.
+ }
+
+ @Override
+ public void deleteFailed(ManagedLedgerException ex, Object ctx) {
+ // The method internalMarkDelete already handled the failure operation. We only need to
+ // make sure the memory state is updated.
+ // If the broker crashed, the non-recoverable ledger will be detected again.
+ }
+ }, null);
+ }
+ }
+ } finally {
+ lock.writeLock().unlock();
+ }
+ }
+
// //////////////////////////////////////////////////
void startCreatingNewMetadataLedger() {
@@ -2846,7 +2883,7 @@ private CompletableFuture doCreateNewMetadataLedger() {
return;
}
- ledger.getExecutor().execute(safeRun(() -> {
+ ledger.getExecutor().execute(() -> {
ledger.mbean.endCursorLedgerCreateOp();
if (rc != BKException.Code.OK) {
log.warn("[{}] Error creating ledger for cursor {}: {}", ledger.getName(), name,
@@ -2859,7 +2896,7 @@ private CompletableFuture doCreateNewMetadataLedger() {
log.debug("[{}] Created ledger {} for cursor {}", ledger.getName(), lh.getId(), name);
}
future.complete(lh);
- }));
+ });
}, LedgerMetadataUtils.buildAdditionalMetadataForCursor(name));
return future;
@@ -3193,7 +3230,7 @@ private void asyncDeleteLedger(final LedgerHandle lh, int retry) {
log.warn("[{}] Failed to delete ledger {}: {}", ledger.getName(), lh.getId(),
BKException.getMessage(rc));
if (!isNoSuchLedgerExistsException(rc)) {
- ledger.getScheduledExecutor().schedule(safeRun(() -> asyncDeleteLedger(lh, retry - 1)),
+ ledger.getScheduledExecutor().schedule(() -> asyncDeleteLedger(lh, retry - 1),
DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS);
}
return;
@@ -3228,7 +3265,7 @@ private void asyncDeleteCursorLedger(int retry) {
log.warn("[{}][{}] Failed to delete ledger {}: {}", ledger.getName(), name, cursorLedger.getId(),
BKException.getMessage(rc));
if (!isNoSuchLedgerExistsException(rc)) {
- ledger.getScheduledExecutor().schedule(safeRun(() -> asyncDeleteCursorLedger(retry - 1)),
+ ledger.getScheduledExecutor().schedule(() -> asyncDeleteCursorLedger(retry - 1),
DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS);
}
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java
index 9f3fe9bb0c4a7..f076f68299dd0 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerFactoryImpl.java
@@ -193,8 +193,9 @@ private ManagedLedgerFactoryImpl(MetadataStoreExtended metadataStore,
this.bookkeeperFactory = bookKeeperGroupFactory;
this.isBookkeeperManaged = isBookkeeperManaged;
this.metadataStore = metadataStore;
- this.store = new MetaStoreImpl(metadataStore, scheduledExecutor, config.getManagedLedgerInfoCompressionType(),
- config.getManagedCursorInfoCompressionType());
+ this.store = new MetaStoreImpl(metadataStore, scheduledExecutor,
+ config.getCompressionConfigForManagedLedgerInfo(),
+ config.getCompressionConfigForManagedCursorInfo());
this.config = config;
this.mbean = new ManagedLedgerFactoryMBeanImpl(this);
this.entryCacheManager = new RangeEntryCacheManagerImpl(this);
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
index 8376ee1bb8467..9b3d7e46aaa8c 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerImpl.java
@@ -22,7 +22,6 @@
import static com.google.common.base.Preconditions.checkState;
import static java.lang.Math.min;
import static org.apache.bookkeeper.mledger.util.Errors.isNoSuchLedgerExistsException;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.collect.BoundType;
import com.google.common.collect.Lists;
@@ -409,7 +408,7 @@ public void operationComplete(ManagedLedgerInfo mlInfo, Stat stat) {
if (!ledgers.isEmpty()) {
final long id = ledgers.lastKey();
OpenCallback opencb = (rc, lh, ctx1) -> {
- executor.execute(safeRun(() -> {
+ executor.execute(() -> {
mbean.endDataLedgerOpenOp();
if (log.isDebugEnabled()) {
log.debug("[{}] Opened ledger {}: {}", name, id, BKException.getMessage(rc));
@@ -439,7 +438,7 @@ public void operationComplete(ManagedLedgerInfo mlInfo, Stat stat) {
callback.initializeFailed(createManagedLedgerException(rc));
return;
}
- }));
+ });
};
if (log.isDebugEnabled()) {
@@ -522,7 +521,7 @@ public void operationFailed(MetaStoreException e) {
return;
}
- executor.execute(safeRun(() -> {
+ executor.execute(() -> {
mbean.endDataLedgerCreateOp();
if (rc != BKException.Code.OK) {
callback.initializeFailed(createManagedLedgerException(rc));
@@ -551,7 +550,7 @@ public void operationFailed(MetaStoreException e) {
// Save it back to ensure all nodes exist
store.asyncUpdateLedgerIds(name, getManagedLedgerInfo(), ledgersStat, storeLedgersCb);
- }));
+ });
}, ledgerMetadata);
}
@@ -774,10 +773,10 @@ public void asyncAddEntry(ByteBuf buffer, AddEntryCallback callback, Object ctx)
buffer.retain();
// Jump to specific thread to avoid contention from writers writing from different threads
- executor.execute(safeRun(() -> {
+ executor.execute(() -> {
OpAddEntry addOperation = OpAddEntry.createNoRetainBuffer(this, buffer, callback, ctx);
internalAsyncAddEntry(addOperation);
- }));
+ });
}
@Override
@@ -790,10 +789,10 @@ public void asyncAddEntry(ByteBuf buffer, int numberOfMessages, AddEntryCallback
buffer.retain();
// Jump to specific thread to avoid contention from writers writing from different threads
- executor.execute(safeRun(() -> {
+ executor.execute(() -> {
OpAddEntry addOperation = OpAddEntry.createNoRetainBuffer(this, buffer, numberOfMessages, callback, ctx);
internalAsyncAddEntry(addOperation);
- }));
+ });
}
protected synchronized void internalAsyncAddEntry(OpAddEntry addOperation) {
@@ -1742,6 +1741,13 @@ synchronized void ledgerClosed(final LedgerHandle lh) {
}
}
+ @Override
+ public void skipNonRecoverableLedger(long ledgerId){
+ for (ManagedCursor managedCursor : cursors) {
+ managedCursor.skipNonRecoverableLedger(ledgerId);
+ }
+ }
+
synchronized void createLedgerAfterClosed() {
if (isNeededCreateNewLedgerAfterCloseLedger()) {
log.info("[{}] Creating a new ledger after closed", name);
@@ -1776,15 +1782,19 @@ public void closeComplete(int rc, LedgerHandle lh, Object o) {
+ "acked ledgerId %s", currentLedger.getId(), lh.getId());
if (rc == BKException.Code.OK) {
- log.debug("Successfully closed ledger {}", lh.getId());
+ if (log.isDebugEnabled()) {
+ log.debug("[{}] Successfully closed ledger {}, trigger by rollover full ledger",
+ name, lh.getId());
+ }
} else {
- log.warn("Error when closing ledger {}. Status={}", lh.getId(), BKException.getMessage(rc));
+ log.warn("[{}] Error when closing ledger {}, trigger by rollover full ledger, Status={}",
+ name, lh.getId(), BKException.getMessage(rc));
}
ledgerClosed(lh);
createLedgerAfterClosed();
}
- }, System.nanoTime());
+ }, null);
}
}
@@ -2374,7 +2384,7 @@ void notifyCursors() {
break;
}
- executor.execute(safeRun(waitingCursor::notifyEntriesAvailable));
+ executor.execute(waitingCursor::notifyEntriesAvailable);
}
}
@@ -2385,7 +2395,7 @@ void notifyWaitingEntryCallBacks() {
break;
}
- executor.execute(safeRun(cb::entriesAvailable));
+ executor.execute(cb::entriesAvailable);
}
}
@@ -2432,16 +2442,16 @@ private void trimConsumedLedgersInBackground() {
@Override
public void trimConsumedLedgersInBackground(CompletableFuture> promise) {
- executor.execute(safeRun(() -> internalTrimConsumedLedgers(promise)));
+ executor.execute(() -> internalTrimConsumedLedgers(promise));
}
public void trimConsumedLedgersInBackground(boolean isTruncate, CompletableFuture> promise) {
- executor.execute(safeRun(() -> internalTrimLedgers(isTruncate, promise)));
+ executor.execute(() -> internalTrimLedgers(isTruncate, promise));
}
private void scheduleDeferredTrimming(boolean isTruncate, CompletableFuture> promise) {
- scheduledExecutor.schedule(safeRun(() -> trimConsumedLedgersInBackground(isTruncate, promise)), 100,
- TimeUnit.MILLISECONDS);
+ scheduledExecutor.schedule(() -> trimConsumedLedgersInBackground(isTruncate, promise),
+ 100, TimeUnit.MILLISECONDS);
}
private void maybeOffloadInBackground(CompletableFuture promise) {
@@ -2456,7 +2466,7 @@ private void maybeOffloadInBackground(CompletableFuture promise) {
final long offloadThresholdInSeconds =
Optional.ofNullable(policies.getManagedLedgerOffloadThresholdInSeconds()).orElse(-1L);
if (offloadThresholdInBytes >= 0 || offloadThresholdInSeconds >= 0) {
- executor.execute(safeRun(() -> maybeOffload(offloadThresholdInBytes, offloadThresholdInSeconds, promise)));
+ executor.execute(() -> maybeOffload(offloadThresholdInBytes, offloadThresholdInSeconds, promise));
}
}
@@ -2477,7 +2487,7 @@ private void maybeOffload(long offloadThresholdInBytes, long offloadThresholdInS
}
if (!offloadMutex.tryLock()) {
- scheduledExecutor.schedule(safeRun(() -> maybeOffloadInBackground(finalPromise)),
+ scheduledExecutor.schedule(() -> maybeOffloadInBackground(finalPromise),
100, TimeUnit.MILLISECONDS);
return;
}
@@ -2956,7 +2966,7 @@ private void asyncDeleteLedger(long ledgerId, long retry) {
log.warn("[{}] Ledger was already deleted {}", name, ledgerId);
} else if (rc != BKException.Code.OK) {
log.error("[{}] Error deleting ledger {} : {}", name, ledgerId, BKException.getMessage(rc));
- scheduledExecutor.schedule(safeRun(() -> asyncDeleteLedger(ledgerId, retry - 1)),
+ scheduledExecutor.schedule(() -> asyncDeleteLedger(ledgerId, retry - 1),
DEFAULT_LEDGER_DELETE_BACKOFF_TIME_SEC, TimeUnit.SECONDS);
} else {
if (log.isDebugEnabled()) {
@@ -3260,7 +3270,7 @@ private void tryTransformLedgerInfo(long ledgerId, LedgerInfoTransformation tran
if (!metadataMutex.tryLock()) {
// retry in 100 milliseconds
scheduledExecutor.schedule(
- safeRun(() -> tryTransformLedgerInfo(ledgerId, transformation, finalPromise)), 100,
+ () -> tryTransformLedgerInfo(ledgerId, transformation, finalPromise), 100,
TimeUnit.MILLISECONDS);
} else { // lock acquired
CompletableFuture unlockingPromise = new CompletableFuture<>();
@@ -4011,9 +4021,8 @@ private void scheduleTimeoutTask() {
timeoutSec = timeoutSec <= 0
? Math.max(config.getAddEntryTimeoutSeconds(), config.getReadEntryTimeoutSeconds())
: timeoutSec;
- this.timeoutTask = this.scheduledExecutor.scheduleAtFixedRate(safeRun(() -> {
- checkTimeouts();
- }), timeoutSec, timeoutSec, TimeUnit.SECONDS);
+ this.timeoutTask = this.scheduledExecutor.scheduleAtFixedRate(
+ this::checkTimeouts, timeoutSec, timeoutSec, TimeUnit.SECONDS);
}
}
@@ -4336,7 +4345,7 @@ protected void updateLastLedgerCreatedTimeAndScheduleRolloverTask() {
checkLedgerRollTask.cancel(true);
}
this.checkLedgerRollTask = this.scheduledExecutor.schedule(
- safeRun(this::rollCurrentLedgerIfFull), this.maximumRolloverTimeMs, TimeUnit.MILLISECONDS);
+ this::rollCurrentLedgerIfFull, this.maximumRolloverTimeMs, TimeUnit.MILLISECONDS);
}
}
@@ -4355,7 +4364,26 @@ public void checkInactiveLedgerAndRollOver() {
long currentTimeMs = System.currentTimeMillis();
if (inactiveLedgerRollOverTimeMs > 0 && currentTimeMs > (lastAddEntryTimeMs + inactiveLedgerRollOverTimeMs)) {
log.info("[{}] Closing inactive ledger, last-add entry {}", name, lastAddEntryTimeMs);
- ledgerClosed(currentLedger);
+ if (STATE_UPDATER.compareAndSet(this, State.LedgerOpened, State.ClosingLedger)) {
+ LedgerHandle currentLedger = this.currentLedger;
+ currentLedger.asyncClose((rc, lh, o) -> {
+ checkArgument(currentLedger.getId() == lh.getId(), "ledgerId %s doesn't match with "
+ + "acked ledgerId %s", currentLedger.getId(), lh.getId());
+
+ if (rc == BKException.Code.OK) {
+ if (log.isDebugEnabled()) {
+ log.debug("[{}] Successfully closed ledger {}, trigger by inactive ledger check",
+ name, lh.getId());
+ }
+ } else {
+ log.warn("[{}] Error when closing ledger {}, trigger by inactive ledger check, Status={}",
+ name, lh.getId(), BKException.getMessage(rc));
+ }
+
+ ledgerClosed(lh);
+ // we do not create ledger here, since topic is inactive for a long time.
+ }, null);
+ }
}
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java
index dad101c9b72d1..e057dee99538e 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerMBeanImpl.java
@@ -39,6 +39,7 @@ public class ManagedLedgerMBeanImpl implements ManagedLedgerMXBean {
private final Rate addEntryOpsFailed = new Rate();
private final Rate readEntriesOps = new Rate();
private final Rate readEntriesOpsFailed = new Rate();
+ private final Rate readEntriesOpsCacheMisses = new Rate();
private final Rate markDeleteOps = new Rate();
private final LongAdder dataLedgerOpenOp = new LongAdder();
@@ -72,6 +73,7 @@ public void refreshStats(long period, TimeUnit unit) {
addEntryOpsFailed.calculateRate(seconds);
readEntriesOps.calculateRate(seconds);
readEntriesOpsFailed.calculateRate(seconds);
+ readEntriesOpsCacheMisses.calculateRate(seconds);
markDeleteOps.calculateRate(seconds);
addEntryLatencyStatsUsec.refresh();
@@ -98,6 +100,10 @@ public void recordReadEntriesError() {
readEntriesOpsFailed.recordEvent();
}
+ public void recordReadEntriesOpsCacheMisses(int count, long totalSize) {
+ readEntriesOpsCacheMisses.recordMultipleEvents(count, totalSize);
+ }
+
public void addAddEntryLatencySample(long latency, TimeUnit unit) {
addEntryLatencyStatsUsec.addValue(unit.toMicros(latency));
}
@@ -228,6 +234,11 @@ public long getReadEntriesErrors() {
return readEntriesOpsFailed.getCount();
}
+ @Override
+ public double getReadEntriesOpsCacheMissesRate() {
+ return readEntriesOpsCacheMisses.getRate();
+ }
+
@Override
public double getMarkDeleteRate() {
return markDeleteOps.getRate();
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java
index bcb73553324dd..d9269ec83b179 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/MetaStoreImpl.java
@@ -23,7 +23,6 @@
import io.netty.buffer.CompositeByteBuf;
import io.netty.buffer.Unpooled;
import java.util.ArrayList;
-import java.util.Collections;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
@@ -38,15 +37,15 @@
import org.apache.bookkeeper.mledger.ManagedLedgerException;
import org.apache.bookkeeper.mledger.ManagedLedgerException.MetaStoreException;
import org.apache.bookkeeper.mledger.ManagedLedgerException.MetadataNotFoundException;
+import org.apache.bookkeeper.mledger.MetadataCompressionConfig;
import org.apache.bookkeeper.mledger.proto.MLDataFormats;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.CompressionType;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedCursorInfo;
import org.apache.bookkeeper.mledger.proto.MLDataFormats.ManagedLedgerInfo;
-import org.apache.bookkeeper.util.SafeRunnable;
-import org.apache.commons.lang.StringUtils;
import org.apache.pulsar.common.allocator.PulsarByteBufAllocator;
import org.apache.pulsar.common.compression.CompressionCodec;
import org.apache.pulsar.common.compression.CompressionCodecProvider;
+import org.apache.pulsar.common.util.FutureUtil;
import org.apache.pulsar.metadata.api.MetadataStore;
import org.apache.pulsar.metadata.api.MetadataStoreException;
import org.apache.pulsar.metadata.api.Notification;
@@ -63,50 +62,35 @@ public class MetaStoreImpl implements MetaStore, Consumer {
private final OrderedExecutor executor;
private static final int MAGIC_MANAGED_INFO_METADATA = 0x4778; // 0100 0111 0111 1000
- private final CompressionType ledgerInfoCompressionType;
- private final CompressionType cursorInfoCompressionType;
+ private final MetadataCompressionConfig ledgerInfoCompressionConfig;
+ private final MetadataCompressionConfig cursorInfoCompressionConfig;
private final Map> managedLedgerInfoUpdateCallbackMap;
public MetaStoreImpl(MetadataStore store, OrderedExecutor executor) {
this.store = store;
this.executor = executor;
- this.ledgerInfoCompressionType = CompressionType.NONE;
- this.cursorInfoCompressionType = CompressionType.NONE;
+ this.ledgerInfoCompressionConfig = MetadataCompressionConfig.noCompression;
+ this.cursorInfoCompressionConfig = MetadataCompressionConfig.noCompression;
managedLedgerInfoUpdateCallbackMap = new ConcurrentHashMap<>();
if (store != null) {
store.registerListener(this);
}
}
- public MetaStoreImpl(MetadataStore store, OrderedExecutor executor, String ledgerInfoCompressionType,
- String cursorInfoCompressionType) {
+ public MetaStoreImpl(MetadataStore store, OrderedExecutor executor,
+ MetadataCompressionConfig ledgerInfoCompressionConfig,
+ MetadataCompressionConfig cursorInfoCompressionConfig) {
this.store = store;
this.executor = executor;
- this.ledgerInfoCompressionType = parseCompressionType(ledgerInfoCompressionType);
- this.cursorInfoCompressionType = parseCompressionType(cursorInfoCompressionType);
+ this.ledgerInfoCompressionConfig = ledgerInfoCompressionConfig;
+ this.cursorInfoCompressionConfig = cursorInfoCompressionConfig;
managedLedgerInfoUpdateCallbackMap = new ConcurrentHashMap<>();
if (store != null) {
store.registerListener(this);
}
}
- private CompressionType parseCompressionType(String value) {
- if (StringUtils.isEmpty(value)) {
- return CompressionType.NONE;
- }
-
- CompressionType compressionType;
- try {
- compressionType = CompressionType.valueOf(value);
- } catch (Exception e) {
- log.error("Failed to get compression type {} error msg: {}.", value, e.getMessage());
- throw e;
- }
-
- return compressionType;
- }
-
@Override
public void getManagedLedgerInfo(String ledgerName, boolean createIfMissing, Map properties,
MetaStoreCallback callback) {
@@ -155,7 +139,7 @@ public void getManagedLedgerInfo(String ledgerName, boolean createIfMissing, Map
.exceptionally(ex -> {
try {
executor.executeOrdered(ledgerName,
- SafeRunnable.safeRun(() -> callback.operationFailed(getException(ex))));
+ () -> callback.operationFailed(getException(ex)));
} catch (RejectedExecutionException e) {
//executor maybe shutdown, use common pool to run callback.
CompletableFuture.runAsync(() -> callback.operationFailed(getException(ex)));
@@ -182,7 +166,7 @@ public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat)
@Override
public void operationFailed(MetaStoreException e) {
if (e instanceof MetadataNotFoundException) {
- result.complete(Collections.emptyMap());
+ result.complete(new HashMap<>());
} else {
result.completeExceptionally(e);
}
@@ -203,8 +187,8 @@ public void asyncUpdateLedgerIds(String ledgerName, ManagedLedgerInfo mlInfo, St
.thenAcceptAsync(newVersion -> callback.operationComplete(null, newVersion),
executor.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -220,8 +204,8 @@ public void getCursors(String ledgerName, MetaStoreCallback> callba
.thenAcceptAsync(cursors -> callback.operationComplete(cursors, null), executor
.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -248,8 +232,8 @@ public void asyncGetCursorInfo(String ledgerName, String cursorName,
}
}, executor.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -283,8 +267,8 @@ public void asyncUpdateCursorInfo(String ledgerName, String cursorName, ManagedC
.thenAcceptAsync(optStat -> callback.operationComplete(null, optStat), executor
.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -292,7 +276,7 @@ public void asyncUpdateCursorInfo(String ledgerName, String cursorName, ManagedC
@Override
public void asyncRemoveCursor(String ledgerName, String cursorName, MetaStoreCallback callback) {
String path = PREFIX + ledgerName + "/" + cursorName;
- log.info("[{}] Remove consumer={}", ledgerName, cursorName);
+ log.info("[{}] Remove cursor={}", ledgerName, cursorName);
store.delete(path, Optional.empty())
.thenAcceptAsync(v -> {
@@ -302,8 +286,15 @@ public void asyncRemoveCursor(String ledgerName, String cursorName, MetaStoreCal
callback.operationComplete(null, null);
}, executor.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName, () -> {
+ Throwable actEx = FutureUtil.unwrapCompletionException(ex);
+ if (actEx instanceof MetadataStoreException.NotFoundException){
+ log.info("[{}] [{}] cursor delete done because it did not exist.", ledgerName, cursorName);
+ callback.operationComplete(null, null);
+ return;
+ }
+ callback.operationFailed(getException(ex));
+ });
return null;
});
}
@@ -321,8 +312,8 @@ public void removeManagedLedger(String ledgerName, MetaStoreCallback callb
callback.operationComplete(null, null);
}, executor.chooseThread(ledgerName))
.exceptionally(ex -> {
- executor.executeOrdered(ledgerName, SafeRunnable.safeRun(() -> callback
- .operationFailed(getException(ex))));
+ executor.executeOrdered(ledgerName,
+ () -> callback.operationFailed(getException(ex)));
return null;
});
}
@@ -415,29 +406,43 @@ private static MetaStoreException getException(Throwable t) {
}
public byte[] compressLedgerInfo(ManagedLedgerInfo managedLedgerInfo) {
- if (ledgerInfoCompressionType.equals(CompressionType.NONE)) {
+ CompressionType compressionType = ledgerInfoCompressionConfig.getCompressionType();
+ if (compressionType.equals(CompressionType.NONE)) {
return managedLedgerInfo.toByteArray();
}
- MLDataFormats.ManagedLedgerInfoMetadata mlInfoMetadata = MLDataFormats.ManagedLedgerInfoMetadata
- .newBuilder()
- .setCompressionType(ledgerInfoCompressionType)
- .setUncompressedSize(managedLedgerInfo.getSerializedSize())
- .build();
- return compressManagedInfo(managedLedgerInfo.toByteArray(), mlInfoMetadata.toByteArray(),
- mlInfoMetadata.getSerializedSize(), ledgerInfoCompressionType);
+
+ int uncompressedSize = managedLedgerInfo.getSerializedSize();
+ if (uncompressedSize > ledgerInfoCompressionConfig.getCompressSizeThresholdInBytes()) {
+ MLDataFormats.ManagedLedgerInfoMetadata mlInfoMetadata = MLDataFormats.ManagedLedgerInfoMetadata
+ .newBuilder()
+ .setCompressionType(compressionType)
+ .setUncompressedSize(uncompressedSize)
+ .build();
+ return compressManagedInfo(managedLedgerInfo.toByteArray(), mlInfoMetadata.toByteArray(),
+ mlInfoMetadata.getSerializedSize(), compressionType);
+ }
+
+ return managedLedgerInfo.toByteArray();
}
public byte[] compressCursorInfo(ManagedCursorInfo managedCursorInfo) {
- if (cursorInfoCompressionType.equals(CompressionType.NONE)) {
+ CompressionType compressionType = cursorInfoCompressionConfig.getCompressionType();
+ if (compressionType.equals(CompressionType.NONE)) {
return managedCursorInfo.toByteArray();
}
- MLDataFormats.ManagedCursorInfoMetadata metadata = MLDataFormats.ManagedCursorInfoMetadata
- .newBuilder()
- .setCompressionType(cursorInfoCompressionType)
- .setUncompressedSize(managedCursorInfo.getSerializedSize())
- .build();
- return compressManagedInfo(managedCursorInfo.toByteArray(), metadata.toByteArray(),
- metadata.getSerializedSize(), cursorInfoCompressionType);
+
+ int uncompressedSize = managedCursorInfo.getSerializedSize();
+ if (uncompressedSize > cursorInfoCompressionConfig.getCompressSizeThresholdInBytes()) {
+ MLDataFormats.ManagedCursorInfoMetadata metadata = MLDataFormats.ManagedCursorInfoMetadata
+ .newBuilder()
+ .setCompressionType(compressionType)
+ .setUncompressedSize(uncompressedSize)
+ .build();
+ return compressManagedInfo(managedCursorInfo.toByteArray(), metadata.toByteArray(),
+ metadata.getSerializedSize(), compressionType);
+ }
+
+ return managedCursorInfo.toByteArray();
}
public ManagedLedgerInfo parseManagedLedgerInfo(byte[] data) throws InvalidProtocolBufferException {
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpAddEntry.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpAddEntry.java
index c56123c24cac1..ae2beafb64374 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpAddEntry.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpAddEntry.java
@@ -35,8 +35,6 @@
import org.apache.bookkeeper.mledger.ManagedLedgerException;
import org.apache.bookkeeper.mledger.Position;
import org.apache.bookkeeper.mledger.intercept.ManagedLedgerInterceptor;
-import org.apache.bookkeeper.mledger.util.SafeRun;
-import org.apache.bookkeeper.util.SafeRunnable;
/**
@@ -44,7 +42,7 @@
*
*/
@Slf4j
-public class OpAddEntry extends SafeRunnable implements AddCallback, CloseCallback {
+public class OpAddEntry implements AddCallback, CloseCallback, Runnable {
protected ManagedLedgerImpl ml;
LedgerHandle ledger;
private long entryId;
@@ -212,7 +210,7 @@ public void addComplete(int rc, final LedgerHandle lh, long entryId, Object ctx)
// Called in executor hashed on managed ledger name, once the add operation is complete
@Override
- public void safeRun() {
+ public void run() {
if (payloadProcessorHandle != null) {
payloadProcessorHandle.release();
}
@@ -328,11 +326,11 @@ void handleAddFailure(final LedgerHandle lh) {
ManagedLedgerImpl finalMl = this.ml;
finalMl.mbean.recordAddEntryError();
- finalMl.getExecutor().execute(SafeRun.safeRun(() -> {
+ finalMl.getExecutor().execute(() -> {
// Force the creation of a new ledger. Doing it in a background thread to avoid acquiring ML lock
// from a BK callback.
finalMl.ledgerClosed(lh);
- }));
+ });
}
void close() {
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java
index 81b14359514b9..7b59c3903d5bc 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/OpReadEntry.java
@@ -18,7 +18,6 @@
*/
package org.apache.bookkeeper.mledger.impl;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import io.netty.util.Recycler;
import io.netty.util.Recycler.Handle;
import java.util.ArrayList;
@@ -108,18 +107,20 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
if (!entries.isEmpty()) {
// There were already some entries that were read before, we can return them
- cursor.ledger.getExecutor().execute(safeRun(() -> {
+ cursor.ledger.getExecutor().execute(() -> {
callback.readEntriesComplete(entries, ctx);
recycle();
- }));
+ });
} else if (cursor.config.isAutoSkipNonRecoverableData() && exception instanceof NonRecoverableLedgerException) {
log.warn("[{}][{}] read failed from ledger at position:{} : {}", cursor.ledger.getName(), cursor.getName(),
readPosition, exception.getMessage());
final ManagedLedgerImpl ledger = (ManagedLedgerImpl) cursor.getManagedLedger();
Position nexReadPosition;
+ Long lostLedger = null;
if (exception instanceof ManagedLedgerException.LedgerNotExistException) {
// try to find and move to next valid ledger
nexReadPosition = cursor.getNextLedgerPosition(readPosition.getLedgerId());
+ lostLedger = readPosition.ledgerId;
} else {
// Skip this read operation
nexReadPosition = ledger.getValidPositionAfterSkippedEntries(readPosition, count);
@@ -132,6 +133,9 @@ public void readEntriesFailed(ManagedLedgerException exception, Object ctx) {
return;
}
updateReadPosition(nexReadPosition);
+ if (lostLedger != null) {
+ cursor.getManagedLedger().skipNonRecoverableLedger(lostLedger);
+ }
checkReadCompletion();
} else {
if (!(exception instanceof TooManyRequestsException)) {
@@ -161,20 +165,20 @@ void checkReadCompletion() {
&& maxPosition.compareTo(readPosition) > 0) {
// We still have more entries to read from the next ledger, schedule a new async operation
- cursor.ledger.getExecutor().execute(safeRun(() -> {
+ cursor.ledger.getExecutor().execute(() -> {
readPosition = cursor.ledger.startReadOperationOnLedger(nextReadPosition);
cursor.ledger.asyncReadEntries(OpReadEntry.this);
- }));
+ });
} else {
// The reading was already completed, release resources and trigger callback
try {
cursor.readOperationCompleted();
} finally {
- cursor.ledger.getExecutor().execute(safeRun(() -> {
+ cursor.ledger.getExecutor().execute(() -> {
callback.readEntriesComplete(entries, ctx);
recycle();
- }));
+ });
}
}
}
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java
index b1f239413472f..b33dd87543f77 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/ShadowManagedLedgerImpl.java
@@ -19,7 +19,6 @@
package org.apache.bookkeeper.mledger.impl;
import static org.apache.bookkeeper.mledger.util.Errors.isNoSuchLedgerExistsException;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
@@ -66,13 +65,13 @@ public ShadowManagedLedgerImpl(ManagedLedgerFactoryImpl factory, BookKeeper book
@Override
synchronized void initialize(ManagedLedgerInitializeLedgerCallback callback, Object ctx) {
log.info("Opening shadow managed ledger {} with source={}", name, sourceMLName);
- executor.execute(safeRun(() -> doInitialize(callback, ctx)));
+ executor.execute(() -> doInitialize(callback, ctx));
}
private void doInitialize(ManagedLedgerInitializeLedgerCallback callback, Object ctx) {
// Fetch the list of existing ledgers in the source managed ledger
store.watchManagedLedgerInfo(sourceMLName, (managedLedgerInfo, stat) ->
- executor.execute(safeRun(() -> processSourceManagedLedgerInfo(managedLedgerInfo, stat)))
+ executor.execute(() -> processSourceManagedLedgerInfo(managedLedgerInfo, stat))
);
store.getManagedLedgerInfo(sourceMLName, false, null, new MetaStore.MetaStoreCallback<>() {
@Override
@@ -106,7 +105,7 @@ public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat)
final long lastLedgerId = ledgers.lastKey();
mbean.startDataLedgerOpenOp();
- AsyncCallback.OpenCallback opencb = (rc, lh, ctx1) -> executor.execute(safeRun(() -> {
+ AsyncCallback.OpenCallback opencb = (rc, lh, ctx1) -> executor.execute(() -> {
mbean.endDataLedgerOpenOp();
if (log.isDebugEnabled()) {
log.debug("[{}] Opened source ledger {}", name, lastLedgerId);
@@ -145,7 +144,7 @@ public void operationComplete(MLDataFormats.ManagedLedgerInfo mlInfo, Stat stat)
BKException.getMessage(rc));
callback.initializeFailed(createManagedLedgerException(rc));
}
- }));
+ });
//open ledger in readonly mode.
bookKeeper.asyncOpenLedgerNoRecovery(lastLedgerId, digestType, config.getPassword(), opencb, null);
@@ -317,7 +316,7 @@ private synchronized void processSourceManagedLedgerInfo(MLDataFormats.ManagedLe
mbean.startDataLedgerOpenOp();
//open ledger in readonly mode.
bookKeeper.asyncOpenLedgerNoRecovery(lastLedgerId, digestType, config.getPassword(),
- (rc, lh, ctx1) -> executor.execute(safeRun(() -> {
+ (rc, lh, ctx1) -> executor.execute(() -> {
mbean.endDataLedgerOpenOp();
if (log.isDebugEnabled()) {
log.debug("[{}] Opened new source ledger {}", name, lastLedgerId);
@@ -342,7 +341,7 @@ private synchronized void processSourceManagedLedgerInfo(MLDataFormats.ManagedLe
log.error("[{}] Failed to open source ledger {}: {}", name, lastLedgerId,
BKException.getMessage(rc));
}
- })), null);
+ }), null);
}
//handle old ledgers deleted.
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java
index 1c5563b38b120..d1050e0062826 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/EntryCacheDisabled.java
@@ -93,6 +93,7 @@ public void asyncReadEntry(ReadHandle lh, long firstEntry, long lastEntry, boole
} finally {
ledgerEntries.close();
}
+ ml.getMbean().recordReadEntriesOpsCacheMisses(entries.size(), totalSize);
ml.getFactory().getMbean().recordCacheMiss(entries.size(), totalSize);
ml.getMbean().addReadEntriesSample(entries.size(), totalSize);
@@ -120,6 +121,7 @@ public void asyncReadEntry(ReadHandle lh, PositionImpl position, AsyncCallbacks.
LedgerEntry ledgerEntry = iterator.next();
EntryImpl returnEntry = RangeEntryCacheManagerImpl.create(ledgerEntry, interceptor);
+ ml.getMbean().recordReadEntriesOpsCacheMisses(1, returnEntry.getLength());
ml.getFactory().getMbean().recordCacheMiss(1, returnEntry.getLength());
ml.getMbean().addReadEntriesSample(1, returnEntry.getLength());
callback.readEntryComplete(returnEntry, ctx);
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java
index 28a2f00cf683c..27aec6f178e39 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheImpl.java
@@ -256,6 +256,7 @@ private void asyncReadEntry0(ReadHandle lh, PositionImpl position, final ReadEnt
LedgerEntry ledgerEntry = iterator.next();
EntryImpl returnEntry = RangeEntryCacheManagerImpl.create(ledgerEntry, interceptor);
+ ml.getMbean().recordReadEntriesOpsCacheMisses(1, returnEntry.getLength());
manager.mlFactoryMBean.recordCacheMiss(1, returnEntry.getLength());
ml.getMbean().addReadEntriesSample(1, returnEntry.getLength());
callback.readEntryComplete(returnEntry, ctx);
@@ -449,6 +450,7 @@ CompletableFuture> readFromStorage(ReadHandle lh,
}
}
+ ml.getMbean().recordReadEntriesOpsCacheMisses(entriesToReturn.size(), totalSize);
manager.mlFactoryMBean.recordCacheMiss(entriesToReturn.size(), totalSize);
ml.getMbean().addReadEntriesSample(entriesToReturn.size(), totalSize);
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheManagerImpl.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheManagerImpl.java
index 080c70b5873cd..d5a3019855cb5 100644
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheManagerImpl.java
+++ b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/impl/cache/RangeEntryCacheManagerImpl.java
@@ -18,7 +18,6 @@
*/
package org.apache.bookkeeper.mledger.impl.cache;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import com.google.common.collect.Lists;
import io.netty.buffer.ByteBuf;
import java.util.concurrent.ConcurrentHashMap;
@@ -116,7 +115,7 @@ boolean hasSpaceInCache() {
// Trigger a single eviction in background. While the eviction is running we stop inserting entries in the cache
if (currentSize > evictionTriggerThreshold && evictionInProgress.compareAndSet(false, true)) {
- mlFactory.getScheduledExecutor().execute(safeRun(() -> {
+ mlFactory.getScheduledExecutor().execute(() -> {
// Trigger a new cache eviction cycle to bring the used memory below the cacheEvictionWatermark
// percentage limit
long sizeToEvict = currentSize - (long) (maxSize * cacheEvictionWatermark);
@@ -136,7 +135,7 @@ boolean hasSpaceInCache() {
mlFactoryMBean.recordCacheEviction();
evictionInProgress.set(false);
}
- }));
+ });
}
return currentSize < maxSize;
diff --git a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/SafeRun.java b/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/SafeRun.java
deleted file mode 100644
index 570cb7ae735ab..0000000000000
--- a/managed-ledger/src/main/java/org/apache/bookkeeper/mledger/util/SafeRun.java
+++ /dev/null
@@ -1,57 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.bookkeeper.mledger.util;
-
-import java.util.function.Consumer;
-import org.apache.bookkeeper.util.SafeRunnable;
-
-/**
- * Static builders for {@link SafeRunnable}s.
- */
-public class SafeRun {
- public static SafeRunnable safeRun(Runnable runnable) {
- return new SafeRunnable() {
- @Override
- public void safeRun() {
- runnable.run();
- }
- };
- }
-
- /**
- *
- * @param runnable
- * @param exceptionHandler
- * handler that will be called when there are any exception
- * @return
- */
- public static SafeRunnable safeRun(Runnable runnable, Consumer exceptionHandler) {
- return new SafeRunnable() {
- @Override
- public void safeRun() {
- try {
- runnable.run();
- } catch (Throwable t) {
- exceptionHandler.accept(t);
- throw t;
- }
- }
- };
- }
-}
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorConcurrencyTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorConcurrencyTest.java
index 3fa0234e13a55..7558f07db76ca 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorConcurrencyTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorConcurrencyTest.java
@@ -18,7 +18,6 @@
*/
package org.apache.bookkeeper.mledger.impl;
-import static org.apache.bookkeeper.mledger.util.SafeRun.safeRun;
import static org.testng.Assert.assertEquals;
import static org.testng.Assert.assertFalse;
import static org.testng.Assert.assertNull;
@@ -383,7 +382,7 @@ public void testConcurrentIndividualDeletesWithGetNthEntry() throws Exception {
final AtomicInteger iteration = new AtomicInteger(0);
for (int i = 0; i < deleteEntries; i++) {
- executor.submit(safeRun(() -> {
+ executor.submit(() -> {
try {
cursor.asyncDelete(addedEntries.get(iteration.getAndIncrement()), new DeleteCallback() {
@Override
@@ -403,7 +402,7 @@ public void deleteFailed(ManagedLedgerException exception, Object ctx) {
} finally {
counter.countDown();
}
- }));
+ });
}
counter.await();
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java
index 08d8fd939a01d..70ba4b543ec09 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorInfoMetadataTest.java
@@ -19,11 +19,13 @@
package org.apache.bookkeeper.mledger.impl;
import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
import static org.testng.Assert.expectThrows;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import lombok.extern.slf4j.Slf4j;
+import org.apache.bookkeeper.mledger.MetadataCompressionConfig;
import org.apache.bookkeeper.mledger.proto.MLDataFormats;
import org.apache.pulsar.common.api.proto.CompressionType;
import org.testng.annotations.DataProvider;
@@ -49,16 +51,14 @@ private Object[][] compressionTypeProvider() {
};
}
- @Test(dataProvider = "compressionTypeProvider")
- public void testEncodeAndDecode(String compressionType) throws IOException {
- long ledgerId = 10000;
+ private MLDataFormats.ManagedCursorInfo.Builder generateManagedCursorInfo(long ledgerId, int positionNumber) {
MLDataFormats.ManagedCursorInfo.Builder builder = MLDataFormats.ManagedCursorInfo.newBuilder();
builder.setCursorsLedgerId(ledgerId);
builder.setMarkDeleteLedgerId(ledgerId);
List batchedEntryDeletionIndexInfos = new ArrayList<>();
- for (int i = 0; i < 1000; i++) {
+ for (int i = 0; i < positionNumber; i++) {
MLDataFormats.NestedPositionInfo nestedPositionInfo = MLDataFormats.NestedPositionInfo.newBuilder()
.setEntryId(i).setLedgerId(i).build();
MLDataFormats.BatchedEntryDeletionIndexInfo batchedEntryDeletionIndexInfo = MLDataFormats
@@ -67,17 +67,24 @@ public void testEncodeAndDecode(String compressionType) throws IOException {
}
builder.addAllBatchedEntryDeletionIndexInfo(batchedEntryDeletionIndexInfos);
+ return builder;
+ }
+
+ @Test(dataProvider = "compressionTypeProvider")
+ public void testEncodeAndDecode(String compressionType) throws IOException {
+ long ledgerId = 10000;
+ MLDataFormats.ManagedCursorInfo.Builder builder = generateManagedCursorInfo(ledgerId, 1000);
MetaStoreImpl metaStore;
if (INVALID_TYPE.equals(compressionType)) {
IllegalArgumentException compressionTypeEx = expectThrows(IllegalArgumentException.class, () -> {
- new MetaStoreImpl(null, null, null, compressionType);
+ new MetaStoreImpl(null, null, null, new MetadataCompressionConfig(compressionType));
});
assertEquals(compressionTypeEx.getMessage(),
"No enum constant org.apache.bookkeeper.mledger.proto.MLDataFormats.CompressionType."
+ compressionType);
return;
} else {
- metaStore = new MetaStoreImpl(null, null, null, compressionType);
+ metaStore = new MetaStoreImpl(null, null, null, new MetadataCompressionConfig(compressionType));
}
MLDataFormats.ManagedCursorInfo managedCursorInfo = builder.build();
@@ -93,4 +100,42 @@ public void testEncodeAndDecode(String compressionType) throws IOException {
MLDataFormats.ManagedCursorInfo info2 = metaStore.parseManagedCursorInfo(managedCursorInfo.toByteArray());
assertEquals(info1, info2);
}
+
+ @Test(dataProvider = "compressionTypeProvider")
+ public void testCompressionThreshold(String compressionType) throws IOException {
+ int compressThreshold = 512;
+
+ long ledgerId = 10000;
+ // should not compress
+ MLDataFormats.ManagedCursorInfo smallInfo = generateManagedCursorInfo(ledgerId, 1).build();
+ assertTrue(smallInfo.getSerializedSize() < compressThreshold);
+
+ // should compress
+ MLDataFormats.ManagedCursorInfo bigInfo = generateManagedCursorInfo(ledgerId, 1000).build();
+ assertTrue(bigInfo.getSerializedSize() > compressThreshold);
+
+ MetaStoreImpl metaStore;
+ if (INVALID_TYPE.equals(compressionType)) {
+ IllegalArgumentException compressionTypeEx = expectThrows(IllegalArgumentException.class, () -> {
+ new MetaStoreImpl(null, null, null,
+ new MetadataCompressionConfig(compressionType, compressThreshold));
+ });
+ assertEquals(compressionTypeEx.getMessage(),
+ "No enum constant org.apache.bookkeeper.mledger.proto.MLDataFormats.CompressionType."
+ + compressionType);
+ return;
+ } else {
+ metaStore = new MetaStoreImpl(null, null, null,
+ new MetadataCompressionConfig(compressionType, compressThreshold));
+ }
+
+ byte[] compressionBytes = metaStore.compressCursorInfo(smallInfo);
+ // not compressed
+ assertEquals(compressionBytes.length, smallInfo.getSerializedSize());
+
+
+ byte[] compressionBigBytes = metaStore.compressCursorInfo(bigInfo);
+ // compressed
+ assertTrue(compressionBigBytes.length != smallInfo.getSerializedSize());
+ }
}
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java
index 8dc726c249efc..1b1b5534256f9 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedCursorTest.java
@@ -48,6 +48,7 @@
import java.util.Map;
import java.util.Optional;
import java.util.Set;
+import java.util.UUID;
import java.util.concurrent.Callable;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ConcurrentHashMap;
@@ -95,6 +96,7 @@
import org.apache.bookkeeper.test.MockedBookKeeperTestCase;
import org.apache.pulsar.common.api.proto.CommandSubscribe;
import org.apache.pulsar.common.api.proto.IntRange;
+import org.apache.pulsar.common.util.FutureUtil;
import org.apache.pulsar.common.util.collections.BitSetRecyclable;
import org.apache.pulsar.common.util.collections.LongPairRangeSet;
import org.apache.pulsar.metadata.api.MetadataStoreException;
@@ -1060,6 +1062,21 @@ void removingCursor() throws Exception {
Awaitility.await().until(() -> ledger.getNumberOfEntries() <= 2);
}
+ @Test(timeOut = 10000)
+ void testRemoveCursorFail() throws Exception {
+ String mlName = UUID.randomUUID().toString().replaceAll("-", "");
+ String cursorName = "c1";
+ ManagedLedger ledger = factory.open(mlName);
+ ledger.openCursor(cursorName);
+ metadataStore.setAlwaysFail(new MetadataStoreException("123"));
+ try {
+ ledger.deleteCursor(cursorName);
+ fail("expected delete cursor failure.");
+ } catch (Exception ex) {
+ assertTrue(FutureUtil.unwrapCompletionException(ex).getMessage().contains("123"));
+ }
+ }
+
@Test(timeOut = 20000)
void cursorPersistence() throws Exception {
ManagedLedger ledger = factory.open("my_test_ledger");
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java
index 7ddf6541c9a39..6e1f447225e53 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerInfoMetadataTest.java
@@ -26,6 +26,7 @@
import java.util.Map;
import java.util.UUID;
import lombok.extern.slf4j.Slf4j;
+import org.apache.bookkeeper.mledger.MetadataCompressionConfig;
import org.apache.bookkeeper.mledger.offload.OffloadUtils;
import org.apache.bookkeeper.mledger.proto.MLDataFormats;
import org.apache.commons.lang3.RandomUtils;
@@ -33,6 +34,8 @@
import org.testng.Assert;
import org.testng.annotations.DataProvider;
import org.testng.annotations.Test;
+import static org.testng.Assert.assertEquals;
+import static org.testng.Assert.assertTrue;
/**
* ManagedLedgerInfo metadata test.
@@ -53,11 +56,9 @@ private Object[][] compressionTypeProvider() {
};
}
- @Test(dataProvider = "compressionTypeProvider")
- public void testEncodeAndDecode(String compressionType) throws IOException {
- long ledgerId = 10000;
+ private MLDataFormats.ManagedLedgerInfo.Builder generateManagedLedgerInfo(long ledgerId, int ledgerInfoNumber) {
List ledgerInfoList = new ArrayList<>();
- for (int i = 0; i < 100; i++) {
+ for (int i = 0; i < ledgerInfoNumber; i++) {
MLDataFormats.ManagedLedgerInfo.LedgerInfo.Builder builder = MLDataFormats.ManagedLedgerInfo.LedgerInfo.newBuilder();
builder.setLedgerId(ledgerId);
builder.setEntries(RandomUtils.nextInt());
@@ -84,13 +85,18 @@ public void testEncodeAndDecode(String compressionType) throws IOException {
ledgerId ++;
}
- MLDataFormats.ManagedLedgerInfo managedLedgerInfo = MLDataFormats.ManagedLedgerInfo.newBuilder()
- .addAllLedgerInfo(ledgerInfoList)
- .build();
+ return MLDataFormats.ManagedLedgerInfo.newBuilder()
+ .addAllLedgerInfo(ledgerInfoList);
+ }
+
+ @Test(dataProvider = "compressionTypeProvider")
+ public void testEncodeAndDecode(String compressionType) throws IOException {
+ long ledgerId = 10000;
+ MLDataFormats.ManagedLedgerInfo managedLedgerInfo = generateManagedLedgerInfo(ledgerId,100).build();
MetaStoreImpl metaStore;
try {
- metaStore = new MetaStoreImpl(null, null, compressionType, null);
+ metaStore = new MetaStoreImpl(null, null, new MetadataCompressionConfig(compressionType), null);
if ("INVALID_TYPE".equals(compressionType)) {
Assert.fail("The managedLedgerInfo compression type is invalid, should fail.");
}
@@ -126,4 +132,45 @@ public void testParseEmptyData() throws InvalidProtocolBufferException {
Assert.assertEquals(managedLedgerInfo.toString(), "");
}
+ @Test(dataProvider = "compressionTypeProvider")
+ public void testCompressionThreshold(String compressionType) {
+ long ledgerId = 10000;
+ int compressThreshold = 512;
+
+ // should not compress
+ MLDataFormats.ManagedLedgerInfo smallInfo = generateManagedLedgerInfo(ledgerId, 0).build();
+ assertTrue(smallInfo.getSerializedSize() < compressThreshold);
+
+ // should compress
+ MLDataFormats.ManagedLedgerInfo bigInfo = generateManagedLedgerInfo(ledgerId, 1000).build();
+ assertTrue(bigInfo.getSerializedSize() > compressThreshold);
+
+ MLDataFormats.ManagedLedgerInfo managedLedgerInfo = generateManagedLedgerInfo(ledgerId,100).build();
+
+ MetaStoreImpl metaStore;
+ try {
+ MetadataCompressionConfig metadataCompressionConfig =
+ new MetadataCompressionConfig(compressionType, compressThreshold);
+ metaStore = new MetaStoreImpl(null, null, metadataCompressionConfig, null);
+ if ("INVALID_TYPE".equals(compressionType)) {
+ Assert.fail("The managedLedgerInfo compression type is invalid, should fail.");
+ }
+ } catch (Exception e) {
+ if ("INVALID_TYPE".equals(compressionType)) {
+ Assert.assertEquals(e.getClass(), IllegalArgumentException.class);
+ Assert.assertEquals(
+ "No enum constant org.apache.bookkeeper.mledger.proto.MLDataFormats.CompressionType."
+ + compressionType, e.getMessage());
+ return;
+ } else {
+ throw e;
+ }
+ }
+
+ byte[] compressionBytes = metaStore.compressLedgerInfo(smallInfo);
+ assertEquals(compressionBytes.length, smallInfo.getSerializedSize());
+
+ byte[] compressionBytesBig = metaStore.compressLedgerInfo(bigInfo);
+ assertTrue(compressionBytesBig.length !=smallInfo.getSerializedSize());
+ }
}
diff --git a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java
index a4d8b75d00c96..70ddbb9998fd8 100644
--- a/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java
+++ b/managed-ledger/src/test/java/org/apache/bookkeeper/mledger/impl/ManagedLedgerTest.java
@@ -88,6 +88,7 @@
import org.apache.bookkeeper.client.PulsarMockBookKeeper;
import org.apache.bookkeeper.client.PulsarMockLedgerHandle;
import org.apache.bookkeeper.client.api.LedgerEntries;
+import org.apache.bookkeeper.client.api.LedgerMetadata;
import org.apache.bookkeeper.client.api.ReadHandle;
import org.apache.bookkeeper.conf.ClientConfiguration;
import org.apache.bookkeeper.mledger.AsyncCallbacks;
@@ -128,6 +129,7 @@
import org.apache.pulsar.common.api.proto.CommandSubscribe.InitialPosition;
import org.apache.pulsar.common.policies.data.EnsemblePlacementPolicyConfig;
import org.apache.pulsar.common.policies.data.OffloadPoliciesImpl;
+import org.apache.pulsar.common.util.FutureUtil;
import org.apache.pulsar.metadata.api.MetadataStoreException;
import org.apache.pulsar.metadata.api.Stat;
import org.apache.pulsar.metadata.api.extended.SessionEvent;
@@ -3858,12 +3860,26 @@ public void testInactiveLedgerRollOver() throws Exception {
ManagedLedgerImpl ledger = (ManagedLedgerImpl) factory.open("rollover_inactive", config);
ManagedCursor cursor = ledger.openCursor("c1");
+ List ledgerIds = new ArrayList<>();
+
int totalAddEntries = 5;
for (int i = 0; i < totalAddEntries; i++) {
String content = "entry"; // 5 bytes
ledger.checkInactiveLedgerAndRollOver();
ledger.addEntry(content.getBytes());
Thread.sleep(inactiveLedgerRollOverTimeMs * 5);
+
+ ledgerIds.add(ledger.currentLedger.getId());
+ }
+
+ Map ledgerMap = bkc.getLedgerMap();
+ // skip check last ledger, it should be open
+ for (int i = 0; i < ledgerIds.size() - 1; i++) {
+ long ledgerId = ledgerIds.get(i);
+ LedgerMetadata ledgerMetadata = ledgerMap.get(ledgerId).getLedgerMetadata();
+ if (ledgerMetadata != null) {
+ assertTrue(ledgerMetadata.isClosed());
+ }
}
List ledgers = ledger.getLedgersInfoAsList();
@@ -3969,4 +3985,29 @@ public void testGetEstimatedBacklogSize() throws Exception {
Assert.assertEquals(ledger.getEstimatedBacklogSize(((PositionImpl) positions.get(9)).getNext()), 0);
ledger.close();
}
+
+ @Test
+ public void testDeleteCursorTwice() throws Exception {
+ ManagedLedgerImpl ml = (ManagedLedgerImpl) factory.open("ml");
+ String cursorName = "cursor_1";
+ ml.openCursor(cursorName);
+ syncRemoveCursor(ml, cursorName);
+ syncRemoveCursor(ml, cursorName);
+ }
+
+ private void syncRemoveCursor(ManagedLedgerImpl ml, String cursorName){
+ CompletableFuture future = new CompletableFuture<>();
+ ml.getStore().asyncRemoveCursor(ml.name, cursorName, new MetaStoreCallback() {
+ @Override
+ public void operationComplete(Void result, Stat stat) {
+ future.complete(null);
+ }
+
+ @Override
+ public void operationFailed(MetaStoreException e) {
+ future.completeExceptionally(FutureUtil.unwrapCompletionException(e));
+ }
+ });
+ future.join();
+ }
}
diff --git a/pip/README.md b/pip/README.md
new file mode 100644
index 0000000000000..3ed9a1d34cd1d
--- /dev/null
+++ b/pip/README.md
@@ -0,0 +1,97 @@
+# Pulsar Improvement Proposal (PIP)
+
+## What is a PIP?
+
+The PIP is a "Pulsar Improvement Proposal" and it's the mechanism used to propose changes to the Apache Pulsar codebases.
+
+The changes might be in terms of new features, large code refactoring, changes to APIs.
+
+In practical terms, the PIP defines a process in which developers can submit a design doc, receive feedback and get the "go ahead" to execute.
+
+### What is the goal of a PIP?
+
+There are several goals for the PIP process:
+
+1. Ensure community technical discussion of major changes to the Apache Pulsar codebase.
+
+2. Provide clear and thorough design documentation of the proposed changes. Make sure every Pulsar developer will have enough context to effectively perform a code review of the Pull Requests.
+
+3. Use the PIP document to serve as the baseline on which to create the documentation for the new feature.
+
+4. Have greater scrutiny to changes are affecting the public APIs (as defined below) to reduce chances of introducing breaking changes or APIs that are not expressing an ideal semantic.
+
+It is not a goal for PIP to add undue process or slow-down the development.
+
+### When is a PIP required?
+
+* Any new feature for Pulsar brokers or client
+* Any change to the public APIs (Client APIs, REST APIs, Plugin APIs)
+* Any change to the wire protocol APIs
+* Any change to the API of Pulsar CLI tools (eg: new options)
+* Any change to the semantic of existing functionality, even when current behavior is incorrect.
+* Any large code change that will touch multiple components
+* Any changes to the metrics (metrics endpoint, topic stats, topics internal stats, broker stats, etc.)
+* Any change to the configuration
+
+### When is a PIP *not* required?
+
+* Bug-fixes
+* Simple enhancements that won't affect the APIs or the semantic
+* Small documentation changes
+* Small website changes
+* Build scripts changes (except: a complete rewrite)
+
+### Who can create a PIP?
+
+Any person willing to contribute to the Apache Pulsar project is welcome to create a PIP.
+
+## How does the PIP process work?
+
+A PIP proposal can be in these states:
+1. **DRAFT**: (Optional) This might be used for contributors to collaborate and to seek feedback on an incomplete version of the proposal.
+
+2. **DISCUSSION**: The proposal has been submitted to the community for feedback and approval.
+
+3. **ACCEPTED**: The proposal has been accepted by the Pulsar project.
+
+4. **REJECTED**: The proposal has not been accepted by the Pulsar project.
+
+5. **IMPLEMENTED**: The implementation of the proposed changes have been completed and everything has been merged.
+
+6. **RELEASED**: The proposed changes have been included in an official
+ Apache Pulsar release.
+
+
+The process works in the following way:
+
+1. Fork https://github.com/apache/pulsar repository (Using the fork button on GitHub).
+2. Clone the repository, and on it, copy the file `pip/TEMPLATE.md` and name it `pip-xxx.md`. The number `xxx` should be the next sequential number after the last contributed PIP. You view the list of contributed PIPs (at any status) as a list of Pull Requests having a title starting with `[pip][design] PIP-`. Use the link [here](https://github.com/apache/pulsar/pulls?q=is%3Apr+title%3A%22%5Bpip%5D%5Bdesign%5D+PIP-%22) as shortcut.
+3. Write the proposal following the section outlined by the template and the explanation for each section in the comment it contains (you can delete the comment once done).
+ * If you need diagrams, avoid attaching large files. You can use [MermaidJS](https://mermaid.js.org/) as simple language to describe many types of diagrams.
+4. Create GitHub Pull request (PR). The PR title should be `[pip][design] PIP-xxx: {title}`, where the `xxx` match the number given in previous step (file-name). Replace `{title}` with a short title to your proposal.
+5. The author(s) will email the dev@pulsar.apache.org mailing list to kick off a discussion, using subject prefix `[DISCUSS] PIP-xxx: {PIP TITLE}`. The discussion will happen in broader context either on the mailing list or as general comments on the PR. Many of the discussion items will be on particular aspect of the proposal, hence they should be as comments in the PR to specific lines in the proposal file.
+6. Update file with a link to the discussion on the mailing. You can obtain it from [Apache Pony Mail](https://lists.apache.org/list.html?dev@pulsar.apache.org).
+7. Based on the discussion and feedback, some changes might be applied by authors to the text of the proposal. They will be applied as extra commits, making it easier to track the changes.
+8. Once some consensus is reached, there will be a vote to formally approve the proposal. The vote will be held on the dev@pulsar.apache.org mailing list, by
+ sending a message using subject `[VOTE] PIP-xxx: {PIP TITLE}`.
+ Make sure to update the PIP with a link to the vote. You can obtain it from [Apache Pony Mail](https://lists.apache.org/list.html?dev@pulsar.apache.org).
+ Everyone is welcome to vote on the proposal, though only the vote of the PMC members will be considered binding.
+ It is required to have a lazy majority of at least 3 binding +1s votes.
+ The vote should stay open for at least 48 hours.
+9. When the vote is closed, if the outcome is positive, ask a PMC member (using voting thread on mailing list) to merge the PR.
+10. If the outcome is negative, please close the PR (with a small comment that the close is a result of a vote).
+
+All the future implementation Pull Requests that will be created, should always reference the PIP-XXX in the commit log message and the PR title.
+It is advised to create a master GitHub issue to formulate the execution plan and track its progress.
+
+## List of PIPs
+
+### Historical PIPs
+You can the view list of PIPs previously managed by GitHub wiki or GitHub issues [here](https://github.com/apache/pulsar/wiki#pulsar-improvement-proposals)
+
+### List of PIPs
+1. You can view all PIPs (besides the historical ones) as the list of Pull Requests having title starting with `[pip][design] PIP-`. Here is the [link](https://github.com/apache/pulsar/pulls?q=is%3Apr+title%3A%22%5Bpip%5D%5Bdesign%5D+PIP-%22) for it.
+ - Merged PR means the PIP was accepted.
+ - Closed PR means the PIP was rejected.
+ - Open PR means the PIP was submitted and is in the process of discussion.
+2. You can also take a look at the file in the `pip` folder. Each one is an approved PIP.
\ No newline at end of file
diff --git a/pip/TEMPLATE.md b/pip/TEMPLATE.md
new file mode 100644
index 0000000000000..6f907eef7e8e9
--- /dev/null
+++ b/pip/TEMPLATE.md
@@ -0,0 +1,163 @@
+
+
+# Background knowledge
+
+
+
+# Motivation
+
+
+
+# Goals
+
+## In Scope
+
+
+
+## Out of Scope
+
+
+
+
+# High Level Design
+
+
+
+# Detailed Design
+
+## Design & Implementation Details
+
+
+
+## Public-facing Changes
+
+
+
+### Public API
+
+
+### Binary protocol
+
+### Configuration
+
+### CLI
+
+### Metrics
+
+
+
+
+# Monitoring
+
+
+
+# Security Considerations
+
+
+# Backward & Forward Compatability
+
+## Revert
+
+
+
+## Upgrade
+
+
+
+# Alternatives
+
+
+
+# General Notes
+
+# Links
+
+
+* Mailing List discussion thread:
+* Mailing List voting thread:
diff --git a/pom.xml b/pom.xml
index dc27ed54274fb..c1a126b1161c0 100644
--- a/pom.xml
+++ b/pom.xml
@@ -32,7 +32,7 @@
org.apache.pulsarpulsar
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOTPulsarPulsar is a distributed pub-sub messaging platform with a very
@@ -92,8 +92,14 @@ flexible messaging model and an intuitive client API.UTF-8UTF-8
- ${maven.build.timestamp}
+ 2023-05-03T02:53:27Ztrue
+
+
+
+
--add-opens java.base/jdk.internal.loader=ALL-UNNAMED
@@ -103,6 +109,7 @@ flexible messaging model and an intuitive client API.
--add-opens java.base/sun.net=ALL-UNNAMED
--add-opens java.management/sun.management=ALL-UNNAMED
--add-opens jdk.management/com.sun.management.internal=ALL-UNNAMED
+ --add-opens java.base/jdk.internal.platform=ALL-UNNAMED
true4
@@ -126,33 +133,33 @@ flexible messaging model and an intuitive client API.
1.21
- 4.15.4
+ 4.16.13.8.11.5.01.10.01.1.8.44.1.12.15.1.0
- 4.1.89.Final
- 0.0.18.Final
+ 4.1.93.Final
+ 0.0.21.Final9.4.48.v202206222.5.22.341.10.500.16.0
- 3.9.8
- 6.29.4.1
+ 4.3.8
+ 7.9.21.7.324.42.18.01.691.0.61.0.2.3
- 2.13.4.20221013
- 0.9.11
+ 2.14.2
+ 0.10.21.6.28.37
- 0.40.2
+ 0.42.1true0.5.03.19.6
@@ -174,17 +181,17 @@ flexible messaging model and an intuitive client API.
2.10.102.5.05.1.0
- 3.36.0.3
+ 3.42.0.08.0.1142.5.1
- 0.3.2-patch11
+ 0.4.62.7.50.4.4-hotfix1
- 3.3.3
- 2.4.7
+ 3.3.5
+ 2.4.101.2.48.5.2
- 363
+ 3681.9.7.Final42.5.08.0.30
@@ -193,8 +200,8 @@ flexible messaging model and an intuitive client API.
0.11.10.28.02.10.2
- 3.3.4
- 2.4.15
+ 3.3.5
+ 2.4.1631.0.1-jre1.00.16.1
@@ -224,19 +231,19 @@ flexible messaging model and an intuitive client API.
2.3.32.0.25.12.1
- 12.0.1
+ 18.0.04.9.32.8.0
- 1.4.32
+ 1.8.201.09.1.6
- 5.3.20
+ 5.3.274.5.134.4.15
- 0.5.11
- 1.32
+ 0.7.5
+ 2.01.10.125.3.33.4.3
@@ -250,14 +257,14 @@ flexible messaging model and an intuitive client API.
3.2.131.1.1
- 7.7.0
+ 7.7.13.12.43.25.0-GA1.5.03.14.2.01.2.22
- 1.5.3
+ 1.5.45.4.02.33.2
@@ -266,22 +273,21 @@ flexible messaging model and an intuitive client API.
3.0.04.11.0
- 3.1.0
+ 3.3.0
-
- 3.0.0-M3
- 3.4.2
- 3.10.1
- 3.4.0
+ 3.1.0
+ 3.5.0
+ 3.11.0
+ 3.5.02.3.03.4.13.1.01.1.0
- 1.3.4
+ 1.5.03.1.24.0.2
- 3.4.3
+ 3.5.31.7.00.8.84.7.3.0
@@ -291,8 +297,8 @@ flexible messaging model and an intuitive client API.
0.1.41.30.4
- 8.0.1
- 0.9.15
+ 8.1.2
+ 0.9.441.6.16.4.0
@@ -878,6 +884,24 @@ flexible messaging model and an intuitive client API.
${caffeine.version}
+
+ org.bouncycastle
+ bcpkix-jdk15on
+ ${bouncycastle.version}
+
+
+
+ com.cronutils
+ cron-utils
+ ${cron-utils.version}
+
+
+ org.glassfish
+ javax.el
+
+
+
+
com.yahoo.athenzathenz-zts-java-client-core
@@ -1464,7 +1488,6 @@ flexible messaging model and an intuitive client API.
UTF-8truetrue
- truefalse
@@ -1513,10 +1536,17 @@ flexible messaging model and an intuitive client API.
listener
- org.apache.pulsar.tests.PulsarTestListener,org.apache.pulsar.tests.AnnotationListener,org.apache.pulsar.tests.FailFastNotifier,org.apache.pulsar.tests.MockitoCleanupListener,org.apache.pulsar.tests.FastThreadLocalCleanupListener,org.apache.pulsar.tests.ThreadLeakDetectorListener,org.apache.pulsar.tests.SingletonCleanerListener
+ org.apache.pulsar.tests.PulsarTestListener,org.apache.pulsar.tests.JacocoDumpListener,org.apache.pulsar.tests.AnnotationListener,org.apache.pulsar.tests.FailFastNotifier,org.apache.pulsar.tests.MockitoCleanupListener,org.apache.pulsar.tests.FastThreadLocalCleanupListener,org.apache.pulsar.tests.ThreadLeakDetectorListener,org.apache.pulsar.tests.SingletonCleanerListener
+
+
+ org.apache.maven.surefire
+ surefire-testng
+ ${surefire.version}
+
+
@@ -1948,8 +1978,8 @@ flexible messaging model and an intuitive client API.
88
-
-
+
+
@@ -1977,6 +2007,7 @@ flexible messaging model and an intuitive client API.
${project.build.directory}/jacoco_${maven.build.timestamp}_${surefire.forkNumber}.exectrue
+ trueorg.apache.pulsar.*org.apache.bookkeeper.mledger.*
@@ -2140,6 +2171,7 @@ flexible messaging model and an intuitive client API.
pulsar-broker-auth-athenzpulsar-client-auth-athenzpulsar-sql
+ pulsar-broker-auth-oidcpulsar-broker-auth-saslpulsar-client-auth-saslpulsar-config-validation
@@ -2198,6 +2230,7 @@ flexible messaging model and an intuitive client API.
pulsar-websocketpulsar-proxypulsar-testclient
+ pulsar-broker-auth-oidcpulsar-broker-auth-saslpulsar-client-auth-saslpulsar-config-validation
@@ -2405,6 +2438,20 @@ flexible messaging model and an intuitive client API.
+
+ pulsar-io-elastic-tests
+
+ pulsar-io
+
+
+
+
+ pulsar-io-kafka-connect-tests
+
+ pulsar-io
+
+
+
pulsar-sql-tests
diff --git a/pulsar-broker-auth-athenz/pom.xml b/pulsar-broker-auth-athenz/pom.xml
index 419346c7adb90..9c39e07b620ce 100644
--- a/pulsar-broker-auth-athenz/pom.xml
+++ b/pulsar-broker-auth-athenz/pom.xml
@@ -26,7 +26,7 @@
org.apache.pulsarpulsar
- 3.0.0-SNAPSHOT
+ 3.1.0-SNAPSHOTpulsar-broker-auth-athenz
diff --git a/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java b/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java
index 2e062b87a8325..652a922b9a5ad 100644
--- a/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java
+++ b/pulsar-broker-auth-athenz/src/main/java/org/apache/pulsar/broker/authentication/AuthenticationProviderAthenz.java
@@ -43,6 +43,15 @@ public class AuthenticationProviderAthenz implements AuthenticationProvider {
private List domainNameList = null;
private int allowedOffset = 30;
+ public enum ErrorCode {
+ UNKNOWN,
+ NO_CLIENT,
+ NO_TOKEN,
+ NO_PUBLIC_KEY,
+ DOMAIN_MISMATCH,
+ INVALID_TOKEN,
+ }
+
@Override
public void initialize(ServiceConfiguration config) throws IOException {
String domainNames;
@@ -81,11 +90,13 @@ public String getAuthMethodName() {
public String authenticate(AuthenticationDataSource authData) throws AuthenticationException {
SocketAddress clientAddress;
String roleToken;
+ ErrorCode errorCode = ErrorCode.UNKNOWN;
try {
if (authData.hasDataFromPeer()) {
clientAddress = authData.getPeerAddress();
} else {
+ errorCode = ErrorCode.NO_CLIENT;
throw new AuthenticationException("Authentication data source does not have a client address");
}
@@ -94,13 +105,16 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat
} else if (authData.hasDataFromHttp()) {
roleToken = authData.getHttpHeader(AuthZpeClient.ZPE_TOKEN_HDR);
} else {
+ errorCode = ErrorCode.NO_TOKEN;
throw new AuthenticationException("Authentication data source does not have a role token");
}
if (roleToken == null) {
+ errorCode = ErrorCode.NO_TOKEN;
throw new AuthenticationException("Athenz token is null, can't authenticate");
}
if (roleToken.isEmpty()) {
+ errorCode = ErrorCode.NO_TOKEN;
throw new AuthenticationException("Athenz RoleToken is empty, Server is Using Athenz Authentication");
}
if (log.isDebugEnabled()) {
@@ -110,6 +124,7 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat
RoleToken token = new RoleToken(roleToken);
if (!domainNameList.contains(token.getDomain())) {
+ errorCode = ErrorCode.DOMAIN_MISMATCH;
throw new AuthenticationException(
String.format("Athenz RoleToken Domain mismatch, Expected: %s, Found: %s",
domainNameList.toString(), token.getDomain()));
@@ -120,6 +135,7 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat
PublicKey ztsPublicKey = AuthZpeClient.getZtsPublicKey(token.getKeyId());
if (ztsPublicKey == null) {
+ errorCode = ErrorCode.NO_PUBLIC_KEY;
throw new AuthenticationException("Unable to retrieve ZTS Public Key");
}
@@ -128,13 +144,13 @@ public String authenticate(AuthenticationDataSource authData) throws Authenticat
AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName());
return token.getPrincipal();
} else {
+ errorCode = ErrorCode.INVALID_TOKEN;
throw new AuthenticationException(
String.format("Athenz Role Token Not Authenticated from Client: %s", clientAddress));
}
}
} catch (AuthenticationException exception) {
- AuthenticationMetrics.authenticateFailure(getClass().getSimpleName(), getAuthMethodName(),
- exception.getMessage());
+ incrementFailureMetric(errorCode);
throw exception;
}
}
diff --git a/pulsar-broker-auth-oidc/pom.xml b/pulsar-broker-auth-oidc/pom.xml
new file mode 100644
index 0000000000000..ca2b623d96eae
--- /dev/null
+++ b/pulsar-broker-auth-oidc/pom.xml
@@ -0,0 +1,191 @@
+
+
+
+ 4.0.0
+
+ org.apache.pulsar
+ pulsar
+ 3.1.0-SNAPSHOT
+
+
+ pulsar-broker-auth-oidc
+ jar
+ Open ID Connect authentication plugin for broker
+
+
+ 0.11.5
+
+
+
+
+
+ ${project.groupId}
+ pulsar-broker-common
+ ${project.version}
+
+
+ io.grpc
+ *
+
+
+
+
+
+ com.auth0
+ java-jwt
+ 4.3.0
+
+
+
+ com.auth0
+ jwks-rsa
+ 0.22.0
+
+
+
+ com.github.ben-manes.caffeine
+ caffeine
+
+
+
+ org.asynchttpclient
+ async-http-client
+
+
+
+ io.kubernetes
+ client-java
+ ${kubernetesclient.version}
+
+
+
+ io.prometheus
+ simpleclient_httpserver
+
+
+ bcpkix-jdk18on
+ org.bouncycastle
+
+
+ bcutil-jdk18on
+ org.bouncycastle
+
+
+ bcprov-jdk18on
+ org.bouncycastle
+
+
+
+
+
+ io.jsonwebtoken
+ jjwt-api
+ ${jsonwebtoken.version}
+ test
+
+
+ io.jsonwebtoken
+ jjwt-impl
+ ${jsonwebtoken.version}
+ test
+
+
+
+ com.github.tomakehurst
+ wiremock-jre8
+ ${wiremock.version}
+ test
+
+
+
+
+
+
+
+ test-jar-dependencies
+
+
+ maven.test.skip
+ !true
+
+
+
+
+ ${project.groupId}
+ pulsar-broker
+ ${project.version}
+ test
+ test-jar
+
+
+
+
+
+
+
+
+
+ org.gaul
+ modernizer-maven-plugin
+
+ true
+ 8
+
+
+
+ modernizer
+ verify
+
+ modernizer
+
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-checkstyle-plugin
+
+
+ checkstyle
+ verify
+
+ check
+
+
+
+
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+
+ src/test/java/resources/fakeKubeConfig.yaml
+ ${project.basedir}/target/kubeconfig.yaml
+
+
+
+
+
+
diff --git a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentFailoverStreamingDispatcherE2ETest.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationExceptionCode.java
similarity index 57%
rename from pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentFailoverStreamingDispatcherE2ETest.java
rename to pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationExceptionCode.java
index 92352cde47ff0..5f89f5f1370f1 100644
--- a/pulsar-broker/src/test/java/org/apache/pulsar/broker/service/persistent/PersistentFailoverStreamingDispatcherE2ETest.java
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationExceptionCode.java
@@ -16,23 +16,23 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.apache.pulsar.broker.service.persistent;
-
-import org.apache.pulsar.broker.service.PersistentFailoverE2ETest;
-import org.apache.pulsar.broker.service.streamingdispatch.StreamingDispatcher;
-import org.testng.annotations.BeforeClass;
-import org.testng.annotations.Test;
+package org.apache.pulsar.broker.authentication.oidc;
/**
- * PersistentFailoverE2ETest with {@link StreamingDispatcher}
+ * Enum used to classify the types of exceptions encountered
+ * when attempting JWT verification.
*/
-@Test(groups = "broker")
-public class PersistentFailoverStreamingDispatcherE2ETest extends PersistentFailoverE2ETest {
-
- @BeforeClass
- @Override
- protected void setup() throws Exception {
- conf.setStreamingDispatch(true);
- super.setup();
- }
+public enum AuthenticationExceptionCode {
+ UNSUPPORTED_ISSUER,
+ UNSUPPORTED_ALGORITHM,
+ ISSUER_MISMATCH,
+ ALGORITHM_MISMATCH,
+ INVALID_PUBLIC_KEY,
+ ERROR_RETRIEVING_PROVIDER_METADATA,
+ ERROR_RETRIEVING_PUBLIC_KEY,
+ ERROR_DECODING_JWT,
+ ERROR_VERIFYING_JWT,
+ ERROR_VERIFYING_JWT_SIGNATURE,
+ INVALID_JWT_CLAIM,
+ EXPIRED_JWT,
}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java
new file mode 100644
index 0000000000000..2078666a08dd9
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationProviderOpenID.java
@@ -0,0 +1,495 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsBoolean;
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsInt;
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsSet;
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsString;
+import com.auth0.jwk.InvalidPublicKeyException;
+import com.auth0.jwk.Jwk;
+import com.auth0.jwt.JWT;
+import com.auth0.jwt.JWTVerifier;
+import com.auth0.jwt.RegisteredClaims;
+import com.auth0.jwt.algorithms.Algorithm;
+import com.auth0.jwt.exceptions.AlgorithmMismatchException;
+import com.auth0.jwt.exceptions.InvalidClaimException;
+import com.auth0.jwt.exceptions.JWTDecodeException;
+import com.auth0.jwt.exceptions.JWTVerificationException;
+import com.auth0.jwt.exceptions.SignatureVerificationException;
+import com.auth0.jwt.exceptions.TokenExpiredException;
+import com.auth0.jwt.interfaces.Claim;
+import com.auth0.jwt.interfaces.DecodedJWT;
+import com.auth0.jwt.interfaces.Verification;
+import io.kubernetes.client.openapi.ApiClient;
+import io.kubernetes.client.util.Config;
+import io.netty.handler.ssl.SslContext;
+import io.netty.handler.ssl.SslContextBuilder;
+import java.io.File;
+import java.io.IOException;
+import java.net.SocketAddress;
+import java.security.PublicKey;
+import java.security.interfaces.ECPublicKey;
+import java.security.interfaces.RSAPublicKey;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import javax.naming.AuthenticationException;
+import javax.net.ssl.SSLSession;
+import org.apache.pulsar.broker.ServiceConfiguration;
+import org.apache.pulsar.broker.authentication.AuthenticationDataSource;
+import org.apache.pulsar.broker.authentication.AuthenticationProvider;
+import org.apache.pulsar.broker.authentication.AuthenticationProviderToken;
+import org.apache.pulsar.broker.authentication.AuthenticationState;
+import org.apache.pulsar.broker.authentication.metrics.AuthenticationMetrics;
+import org.apache.pulsar.common.api.AuthData;
+import org.asynchttpclient.AsyncHttpClient;
+import org.asynchttpclient.AsyncHttpClientConfig;
+import org.asynchttpclient.DefaultAsyncHttpClient;
+import org.asynchttpclient.DefaultAsyncHttpClientConfig;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * An {@link AuthenticationProvider} implementation that supports the usage of a JSON Web Token (JWT)
+ * for client authentication. This implementation retrieves the PublicKey from the JWT issuer (assuming the
+ * issuer is in the configured allowed list) and then uses that Public Key to verify the validity of the JWT's
+ * signature.
+ *
+ * The Public Keys for a given provider are cached based on certain configured parameters to improve performance.
+ * The tradeoff here is that the longer Public Keys are cached, the longer an invalidated token could be used. One way
+ * to ensure caches are cleared is to restart all brokers.
+ *
+ * Class is called from multiple threads. The implementation must be thread safe. This class expects to be loaded once
+ * and then called concurrently for each new connection. The cache is backed by a GuavaCachedJwkProvider, which is
+ * thread-safe.
+ *
+ * Supported algorithms are: RS256, RS384, RS512, ES256, ES384, ES512 where the naming conventions follow
+ * this RFC: https://datatracker.ietf.org/doc/html/rfc7518#section-3.1.
+ */
+public class AuthenticationProviderOpenID implements AuthenticationProvider {
+ private static final Logger log = LoggerFactory.getLogger(AuthenticationProviderOpenID.class);
+
+ private static final String SIMPLE_NAME = AuthenticationProviderOpenID.class.getSimpleName();
+
+ // Must match the value used by the OAuth2 Client Plugin.
+ private static final String AUTH_METHOD_NAME = "token";
+
+ // This is backed by an ObjectMapper, which is thread safe. It is an optimization
+ // to share this for decoding JWTs for all connections to this broker.
+ private final JWT jwtLibrary = new JWT();
+
+ private Set issuers;
+
+ // This caches the map from Issuer URL to the jwks_uri served at the /.well-known/openid-configuration endpoint
+ private OpenIDProviderMetadataCache openIDProviderMetadataCache;
+
+ // A cache used to store the results of getting the JWKS from the jwks_uri for an issuer.
+ private JwksCache jwksCache;
+
+ private volatile AsyncHttpClient httpClient;
+
+ // A list of supported algorithms. This is the "alg" field on the JWT.
+ // Source for strings: https://datatracker.ietf.org/doc/html/rfc7518#section-3.1.
+ private static final String ALG_RS256 = "RS256";
+ private static final String ALG_RS384 = "RS384";
+ private static final String ALG_RS512 = "RS512";
+ private static final String ALG_ES256 = "ES256";
+ private static final String ALG_ES384 = "ES384";
+ private static final String ALG_ES512 = "ES512";
+
+ private long acceptedTimeLeewaySeconds;
+ private FallbackDiscoveryMode fallbackDiscoveryMode;
+ private String roleClaim = ROLE_CLAIM_DEFAULT;
+ private boolean isRoleClaimNotSubject;
+
+ static final String ALLOWED_TOKEN_ISSUERS = "openIDAllowedTokenIssuers";
+ static final String ISSUER_TRUST_CERTS_FILE_PATH = "openIDTokenIssuerTrustCertsFilePath";
+ static final String FALLBACK_DISCOVERY_MODE = "openIDFallbackDiscoveryMode";
+ static final String ALLOWED_AUDIENCES = "openIDAllowedAudiences";
+ static final String ROLE_CLAIM = "openIDRoleClaim";
+ static final String ROLE_CLAIM_DEFAULT = "sub";
+ static final String ACCEPTED_TIME_LEEWAY_SECONDS = "openIDAcceptedTimeLeewaySeconds";
+ static final int ACCEPTED_TIME_LEEWAY_SECONDS_DEFAULT = 0;
+ static final String CACHE_SIZE = "openIDCacheSize";
+ static final int CACHE_SIZE_DEFAULT = 5;
+ static final String CACHE_REFRESH_AFTER_WRITE_SECONDS = "openIDCacheRefreshAfterWriteSeconds";
+ static final int CACHE_REFRESH_AFTER_WRITE_SECONDS_DEFAULT = 18 * 60 * 60;
+ static final String CACHE_EXPIRATION_SECONDS = "openIDCacheExpirationSeconds";
+ static final int CACHE_EXPIRATION_SECONDS_DEFAULT = 24 * 60 * 60;
+ static final String KEY_ID_CACHE_MISS_REFRESH_SECONDS = "openIDKeyIdCacheMissRefreshSeconds";
+ static final int KEY_ID_CACHE_MISS_REFRESH_SECONDS_DEFAULT = 5 * 60;
+ static final String HTTP_CONNECTION_TIMEOUT_MILLIS = "openIDHttpConnectionTimeoutMillis";
+ static final int HTTP_CONNECTION_TIMEOUT_MILLIS_DEFAULT = 10_000;
+ static final String HTTP_READ_TIMEOUT_MILLIS = "openIDHttpReadTimeoutMillis";
+ static final int HTTP_READ_TIMEOUT_MILLIS_DEFAULT = 10_000;
+ static final String REQUIRE_HTTPS = "openIDRequireIssuersUseHttps";
+ static final boolean REQUIRE_HTTPS_DEFAULT = true;
+
+ // The list of audiences that are allowed to connect to this broker. A valid JWT must contain one of the audiences.
+ private String[] allowedAudiences;
+
+ @Override
+ public void initialize(ServiceConfiguration config) throws IOException {
+ this.allowedAudiences = validateAllowedAudiences(getConfigValueAsSet(config, ALLOWED_AUDIENCES));
+ this.roleClaim = getConfigValueAsString(config, ROLE_CLAIM, ROLE_CLAIM_DEFAULT);
+ this.isRoleClaimNotSubject = !ROLE_CLAIM_DEFAULT.equals(roleClaim);
+ this.acceptedTimeLeewaySeconds = getConfigValueAsInt(config, ACCEPTED_TIME_LEEWAY_SECONDS,
+ ACCEPTED_TIME_LEEWAY_SECONDS_DEFAULT);
+ boolean requireHttps = getConfigValueAsBoolean(config, REQUIRE_HTTPS, REQUIRE_HTTPS_DEFAULT);
+ this.fallbackDiscoveryMode = FallbackDiscoveryMode.valueOf(getConfigValueAsString(config,
+ FALLBACK_DISCOVERY_MODE, FallbackDiscoveryMode.DISABLED.name()));
+ this.issuers = validateIssuers(getConfigValueAsSet(config, ALLOWED_TOKEN_ISSUERS), requireHttps,
+ fallbackDiscoveryMode != FallbackDiscoveryMode.DISABLED);
+
+ int connectionTimeout = getConfigValueAsInt(config, HTTP_CONNECTION_TIMEOUT_MILLIS,
+ HTTP_CONNECTION_TIMEOUT_MILLIS_DEFAULT);
+ int readTimeout = getConfigValueAsInt(config, HTTP_READ_TIMEOUT_MILLIS, HTTP_READ_TIMEOUT_MILLIS_DEFAULT);
+ String trustCertsFilePath = getConfigValueAsString(config, ISSUER_TRUST_CERTS_FILE_PATH, null);
+ SslContext sslContext = null;
+ if (trustCertsFilePath != null) {
+ // Use default settings for everything but the trust store.
+ sslContext = SslContextBuilder.forClient()
+ .trustManager(new File(trustCertsFilePath))
+ .build();
+ }
+ AsyncHttpClientConfig clientConfig = new DefaultAsyncHttpClientConfig.Builder()
+ .setConnectTimeout(connectionTimeout)
+ .setReadTimeout(readTimeout)
+ .setSslContext(sslContext)
+ .build();
+ httpClient = new DefaultAsyncHttpClient(clientConfig);
+ ApiClient k8sApiClient =
+ fallbackDiscoveryMode != FallbackDiscoveryMode.DISABLED ? Config.defaultClient() : null;
+ this.openIDProviderMetadataCache = new OpenIDProviderMetadataCache(config, httpClient, k8sApiClient);
+ this.jwksCache = new JwksCache(config, httpClient, k8sApiClient);
+ }
+
+ @Override
+ public String getAuthMethodName() {
+ return AUTH_METHOD_NAME;
+ }
+
+ /**
+ * Authenticate the parameterized {@link AuthenticationDataSource} by verifying the issuer is an allowed issuer,
+ * then retrieving the JWKS URI from the issuer, then retrieving the Public key from the JWKS URI, and finally
+ * verifying the JWT signature and claims.
+ *
+ * @param authData - the authData passed by the Pulsar Broker containing the token.
+ * @return the role, if the JWT is authenticated, otherwise a failed future.
+ */
+ @Override
+ public CompletableFuture authenticateAsync(AuthenticationDataSource authData) {
+ return authenticateTokenAsync(authData).thenApply(this::getRole);
+ }
+
+ /**
+ * Authenticate the parameterized {@link AuthenticationDataSource} and return the decoded JWT.
+ * @param authData - the authData containing the token.
+ * @return a completed future with the decoded JWT, if the JWT is authenticated. Otherwise, a failed future.
+ */
+ CompletableFuture authenticateTokenAsync(AuthenticationDataSource authData) {
+ String token;
+ try {
+ token = AuthenticationProviderToken.getToken(authData);
+ } catch (AuthenticationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ return CompletableFuture.failedFuture(e);
+ }
+ return authenticateToken(token)
+ .whenComplete((jwt, e) -> {
+ if (jwt != null) {
+ AuthenticationMetrics.authenticateSuccess(getClass().getSimpleName(), getAuthMethodName());
+ }
+ // Failure metrics are incremented within methods above
+ });
+ }
+
+ /**
+ * Get the role from a JWT at the configured role claim field.
+ * NOTE: does not do any verification of the JWT
+ * @param jwt - token to get the role from
+ * @return the role, or null, if it is not set on the JWT
+ */
+ String getRole(DecodedJWT jwt) {
+ try {
+ Claim roleClaim = jwt.getClaim(this.roleClaim);
+ if (roleClaim.isNull()) {
+ // The claim was not present in the JWT
+ return null;
+ }
+
+ String role = roleClaim.asString();
+ if (role != null) {
+ // The role is non null only if the JSON node is a text field
+ return role;
+ }
+
+ List roles = jwt.getClaim(this.roleClaim).asList(String.class);
+ if (roles == null || roles.size() == 0) {
+ return null;
+ } else if (roles.size() == 1) {
+ return roles.get(0);
+ } else {
+ log.debug("JWT for subject [{}] has multiple roles; using the first one.", jwt.getSubject());
+ return roles.get(0);
+ }
+ } catch (JWTDecodeException e) {
+ log.error("Exception while retrieving role from JWT", e);
+ return null;
+ }
+ }
+
+ /**
+ * Convert a JWT string into a {@link DecodedJWT}
+ * The benefit of using this method is that it utilizes the already instantiated {@link JWT} parser.
+ * WARNING: this method does not verify the authenticity of the token. It only decodes it.
+ *
+ * @param token - string JWT to be decoded
+ * @return a decoded JWT
+ * @throws AuthenticationException if the token string is null or if any part of the token contains
+ * an invalid jwt or JSON format of each of the jwt parts.
+ */
+ DecodedJWT decodeJWT(String token) throws AuthenticationException {
+ if (token == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ throw new AuthenticationException("Invalid token: cannot be null");
+ }
+ try {
+ return jwtLibrary.decodeJwt(token);
+ } catch (JWTDecodeException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ throw new AuthenticationException("Unable to decode JWT: " + e.getMessage());
+ }
+ }
+
+ /**
+ * Authenticate the parameterized JWT.
+ *
+ * @param token - a nonnull JWT to authenticate
+ * @return a fully authenticated JWT, or AuthenticationException if the JWT is proven to be invalid in any way
+ */
+ private CompletableFuture authenticateToken(String token) {
+ if (token == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ return CompletableFuture.failedFuture(new AuthenticationException("JWT cannot be null"));
+ }
+ final DecodedJWT jwt;
+ try {
+ jwt = decodeJWT(token);
+ } catch (AuthenticationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ return CompletableFuture.failedFuture(e);
+ }
+ return verifyIssuerAndGetJwk(jwt)
+ .thenCompose(jwk -> {
+ try {
+ if (!jwt.getAlgorithm().equals(jwk.getAlgorithm())) {
+ incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH);
+ return CompletableFuture.failedFuture(
+ new AuthenticationException("JWK's alg [" + jwk.getAlgorithm()
+ + "] does not match JWT's alg [" + jwt.getAlgorithm() + "]"));
+ }
+ // Verify the JWT signature
+ // Throws exception if any verification check fails
+ return CompletableFuture
+ .completedFuture(verifyJWT(jwk.getPublicKey(), jwt.getAlgorithm(), jwt));
+ } catch (InvalidPublicKeyException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.INVALID_PUBLIC_KEY);
+ return CompletableFuture.failedFuture(
+ new AuthenticationException("Invalid public key: " + e.getMessage()));
+ } catch (AuthenticationException e) {
+ return CompletableFuture.failedFuture(e);
+ }
+ });
+ }
+
+ /**
+ * Verify the JWT's issuer (iss) claim is one of the allowed issuers and then retrieve the JWK from the issuer. If
+ * not, see {@link FallbackDiscoveryMode} for the fallback behavior.
+ * @param jwt - the token to use to discover the issuer's JWKS URI, which is then used to retrieve the issuer's
+ * current public keys.
+ * @return a JWK that can be used to verify the JWT's signature
+ */
+ private CompletableFuture verifyIssuerAndGetJwk(DecodedJWT jwt) {
+ if (jwt.getIssuer() == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ISSUER);
+ return CompletableFuture.failedFuture(new AuthenticationException("Issuer cannot be null"));
+ } else if (this.issuers.contains(jwt.getIssuer())) {
+ // Retrieve the metadata: https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
+ return openIDProviderMetadataCache.getOpenIDProviderMetadataForIssuer(jwt.getIssuer())
+ .thenCompose(metadata -> jwksCache.getJwk(metadata.getJwksUri(), jwt.getKeyId()));
+ } else if (fallbackDiscoveryMode == FallbackDiscoveryMode.KUBERNETES_DISCOVER_TRUSTED_ISSUER) {
+ return openIDProviderMetadataCache.getOpenIDProviderMetadataForKubernetesApiServer(jwt.getIssuer())
+ .thenCompose(metadata ->
+ openIDProviderMetadataCache.getOpenIDProviderMetadataForIssuer(metadata.getIssuer()))
+ .thenCompose(metadata -> jwksCache.getJwk(metadata.getJwksUri(), jwt.getKeyId()));
+ } else if (fallbackDiscoveryMode == FallbackDiscoveryMode.KUBERNETES_DISCOVER_PUBLIC_KEYS) {
+ return openIDProviderMetadataCache.getOpenIDProviderMetadataForKubernetesApiServer(jwt.getIssuer())
+ .thenCompose(__ -> jwksCache.getJwkFromKubernetesApiServer(jwt.getKeyId()));
+ } else {
+ incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ISSUER);
+ return CompletableFuture
+ .failedFuture(new AuthenticationException("Issuer not allowed: " + jwt.getIssuer()));
+ }
+ }
+
+ @Override
+ public AuthenticationState newAuthState(AuthData authData, SocketAddress remoteAddress, SSLSession sslSession)
+ throws AuthenticationException {
+ return new AuthenticationStateOpenID(this, remoteAddress, sslSession);
+ }
+
+ @Override
+ public void close() throws IOException {
+ httpClient.close();
+ }
+
+ /**
+ * Build and return a validator for the parameters.
+ *
+ * @param publicKey - the public key to use when configuring the validator
+ * @param publicKeyAlg - the algorithm for the parameterized public key
+ * @param jwt - jwt to be verified and returned (only if verified)
+ * @return a validator to use for validating a JWT associated with the parameterized public key.
+ * @throws AuthenticationException if the Public Key's algorithm is not supported or if the algorithm param does not
+ * match the Public Key's actual algorithm.
+ */
+ DecodedJWT verifyJWT(PublicKey publicKey,
+ String publicKeyAlg,
+ DecodedJWT jwt) throws AuthenticationException {
+ if (publicKeyAlg == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM);
+ throw new AuthenticationException("PublicKey algorithm cannot be null");
+ }
+
+ Algorithm alg;
+ try {
+ switch (publicKeyAlg) {
+ case ALG_RS256:
+ alg = Algorithm.RSA256((RSAPublicKey) publicKey, null);
+ break;
+ case ALG_RS384:
+ alg = Algorithm.RSA384((RSAPublicKey) publicKey, null);
+ break;
+ case ALG_RS512:
+ alg = Algorithm.RSA512((RSAPublicKey) publicKey, null);
+ break;
+ case ALG_ES256:
+ alg = Algorithm.ECDSA256((ECPublicKey) publicKey, null);
+ break;
+ case ALG_ES384:
+ alg = Algorithm.ECDSA384((ECPublicKey) publicKey, null);
+ break;
+ case ALG_ES512:
+ alg = Algorithm.ECDSA512((ECPublicKey) publicKey, null);
+ break;
+ default:
+ incrementFailureMetric(AuthenticationExceptionCode.UNSUPPORTED_ALGORITHM);
+ throw new AuthenticationException("Unsupported algorithm: " + publicKeyAlg);
+ }
+ } catch (ClassCastException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH);
+ throw new AuthenticationException("Expected PublicKey alg [" + publicKeyAlg + "] does match actual alg.");
+ }
+
+ // We verify issuer when retrieving the PublicKey, so it is not verified here.
+ // The claim presence requirements are based on https://openid.net/specs/openid-connect-basic-1_0.html#IDToken
+ Verification verifierBuilder = JWT.require(alg)
+ .acceptLeeway(acceptedTimeLeewaySeconds)
+ .withAnyOfAudience(allowedAudiences)
+ .withClaimPresence(RegisteredClaims.ISSUED_AT)
+ .withClaimPresence(RegisteredClaims.EXPIRES_AT)
+ .withClaimPresence(RegisteredClaims.NOT_BEFORE)
+ .withClaimPresence(RegisteredClaims.SUBJECT);
+
+ if (isRoleClaimNotSubject) {
+ verifierBuilder = verifierBuilder.withClaimPresence(roleClaim);
+ }
+
+ JWTVerifier verifier = verifierBuilder.build();
+
+ try {
+ return verifier.verify(jwt);
+ } catch (TokenExpiredException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.EXPIRED_JWT);
+ throw new AuthenticationException("JWT expired: " + e.getMessage());
+ } catch (SignatureVerificationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT_SIGNATURE);
+ throw new AuthenticationException("JWT signature verification exception: " + e.getMessage());
+ } catch (InvalidClaimException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.INVALID_JWT_CLAIM);
+ throw new AuthenticationException("JWT contains invalid claim: " + e.getMessage());
+ } catch (AlgorithmMismatchException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ALGORITHM_MISMATCH);
+ throw new AuthenticationException("JWT algorithm does not match Public Key algorithm: " + e.getMessage());
+ } catch (JWTDecodeException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_DECODING_JWT);
+ throw new AuthenticationException("Error while decoding JWT: " + e.getMessage());
+ } catch (JWTVerificationException | IllegalArgumentException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_VERIFYING_JWT);
+ throw new AuthenticationException("JWT verification failed: " + e.getMessage());
+ }
+ }
+
+ static void incrementFailureMetric(AuthenticationExceptionCode code) {
+ AuthenticationMetrics.authenticateFailure(SIMPLE_NAME, AUTH_METHOD_NAME, code);
+ }
+
+ /**
+ * Validate the configured allow list of allowedIssuers. The allowedIssuers set must be nonempty in order for
+ * the plugin to authenticate any token. Thus, it fails initialization if the configuration is
+ * missing. Each issuer URL should use the HTTPS scheme. The plugin fails initialization if any
+ * issuer url is insecure, unless requireHttps is false.
+ * @param allowedIssuers - issuers to validate
+ * @param requireHttps - whether to require https for issuers.
+ * @param allowEmptyIssuers - whether to allow empty issuers. This setting only makes sense when kubernetes is used
+ * as a fallback issuer.
+ * @return the validated issuers
+ * @throws IllegalArgumentException if the allowedIssuers is empty, or contains insecure issuers when required
+ */
+ private Set validateIssuers(Set allowedIssuers, boolean requireHttps, boolean allowEmptyIssuers) {
+ if (allowedIssuers == null || (allowedIssuers.isEmpty() && !allowEmptyIssuers)) {
+ throw new IllegalArgumentException("Missing configured value for: " + ALLOWED_TOKEN_ISSUERS);
+ }
+ for (String issuer : allowedIssuers) {
+ if (!issuer.toLowerCase().startsWith("https://")) {
+ log.warn("Allowed issuer is not using https scheme: {}", issuer);
+ if (requireHttps) {
+ throw new IllegalArgumentException("Issuer URL does not use https, but must: " + issuer);
+ }
+ }
+ }
+ return allowedIssuers;
+ }
+
+ /**
+ * Validate the configured allow list of allowedAudiences. The allowedAudiences must be set because
+ * JWT must have an audience claim.
+ * See https://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation.
+ * @param allowedAudiences
+ * @return the validated audiences
+ */
+ String[] validateAllowedAudiences(Set allowedAudiences) {
+ if (allowedAudiences == null || allowedAudiences.isEmpty()) {
+ throw new IllegalArgumentException("Missing configured value for: " + ALLOWED_AUDIENCES);
+ }
+ return allowedAudiences.toArray(new String[0]);
+ }
+}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationStateOpenID.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationStateOpenID.java
new file mode 100644
index 0000000000000..3046a6dd0e3b4
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/AuthenticationStateOpenID.java
@@ -0,0 +1,96 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import java.net.SocketAddress;
+import java.util.concurrent.CompletableFuture;
+import javax.naming.AuthenticationException;
+import javax.net.ssl.SSLSession;
+import org.apache.pulsar.broker.authentication.AuthenticationDataCommand;
+import org.apache.pulsar.broker.authentication.AuthenticationDataSource;
+import org.apache.pulsar.broker.authentication.AuthenticationState;
+import org.apache.pulsar.common.api.AuthData;
+
+/**
+ * Class representing the authentication state of a single connection.
+ */
+class AuthenticationStateOpenID implements AuthenticationState {
+ private final AuthenticationProviderOpenID provider;
+ private AuthenticationDataSource authenticationDataSource;
+ private volatile String role;
+ private final SocketAddress remoteAddress;
+ private final SSLSession sslSession;
+ private volatile long expiration;
+
+ AuthenticationStateOpenID(
+ AuthenticationProviderOpenID provider,
+ SocketAddress remoteAddress,
+ SSLSession sslSession) {
+ this.provider = provider;
+ this.remoteAddress = remoteAddress;
+ this.sslSession = sslSession;
+ }
+
+ @Override
+ public String getAuthRole() throws AuthenticationException {
+ if (role == null) {
+ throw new AuthenticationException("Authentication has not completed");
+ }
+ return role;
+ }
+
+ @Deprecated
+ @Override
+ public AuthData authenticate(AuthData authData) throws AuthenticationException {
+ // This method is not expected to be called and is subject to removal.
+ throw new AuthenticationException("Not supported");
+ }
+
+ @Override
+ public CompletableFuture authenticateAsync(AuthData authData) {
+ final String token = new String(authData.getBytes(), UTF_8);
+ this.authenticationDataSource = new AuthenticationDataCommand(token, remoteAddress, sslSession);
+ return provider
+ .authenticateTokenAsync(authenticationDataSource)
+ .thenApply(jwt -> {
+ this.role = provider.getRole(jwt);
+ // OIDC requires setting the exp claim, so this should never be null.
+ // We verify it is not null during token validation.
+ this.expiration = jwt.getExpiresAt().getTime();
+ // Single stage authentication, so return null here
+ return null;
+ });
+ }
+
+ @Override
+ public AuthenticationDataSource getAuthDataSource() {
+ return authenticationDataSource;
+ }
+
+ @Override
+ public boolean isComplete() {
+ return role != null;
+ }
+
+ @Override
+ public boolean isExpired() {
+ return System.currentTimeMillis() > expiration;
+ }
+}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/ConfigUtils.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/ConfigUtils.java
new file mode 100644
index 0000000000000..f62bf9c818653
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/ConfigUtils.java
@@ -0,0 +1,143 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Set;
+import java.util.stream.Collectors;
+import org.apache.commons.lang3.StringUtils;
+import org.apache.pulsar.broker.ServiceConfiguration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+class ConfigUtils {
+ private static final Logger log = LoggerFactory.getLogger(ConfigUtils.class);
+
+ /**
+ * Get configured property as a string. If not configured, return null.
+ * @param conf - the configuration map
+ * @param configProp - the property to get
+ * @return a string from the conf or null, if the configuration property was not set
+ */
+ static String getConfigValueAsString(ServiceConfiguration conf,
+ String configProp) throws IllegalArgumentException {
+ String value = getConfigValueAsStringImpl(conf, configProp);
+ log.info("Configuration for [{}] is [{}]", configProp, value);
+ return value;
+ }
+
+ /**
+ * Get configured property as a string. If not configured, return null.
+ * @param conf - the configuration map
+ * @param configProp - the property to get
+ * @param defaultValue - the value to use if the configuration value is not set
+ * @return a string from the conf or the default value
+ */
+ static String getConfigValueAsString(ServiceConfiguration conf, String configProp,
+ String defaultValue) throws IllegalArgumentException {
+ String value = getConfigValueAsStringImpl(conf, configProp);
+ if (value == null) {
+ value = defaultValue;
+ }
+ log.info("Configuration for [{}] is [{}]", configProp, value);
+ return value;
+ }
+
+ /**
+ * Get configured property as a set. Split using a comma delimiter and remove any extra whitespace surrounding
+ * the commas. If not configured, return the empty set.
+ *
+ * @param conf - the map of configuration properties
+ * @param configProp - the property (key) to get
+ * @return a set of strings from the conf
+ */
+ static Set getConfigValueAsSet(ServiceConfiguration conf, String configProp) {
+ String value = getConfigValueAsStringImpl(conf, configProp);
+ if (StringUtils.isBlank(value)) {
+ log.info("Configuration for [{}] is the empty set.", configProp);
+ return Collections.emptySet();
+ }
+ Set set = Arrays.stream(value.trim().split("\\s*,\\s*")).collect(Collectors.toSet());
+ log.info("Configuration for [{}] is [{}].", configProp, String.join(", ", set));
+ return set;
+ }
+
+ private static String getConfigValueAsStringImpl(ServiceConfiguration conf,
+ String configProp) throws IllegalArgumentException {
+ Object value = conf.getProperty(configProp);
+ if (value instanceof String) {
+ return (String) value;
+ } else {
+ return null;
+ }
+ }
+
+ /**
+ * Utility method to get an integer from the {@link ServiceConfiguration}. If the value is not a valid long or the
+ * key is not present in the conf, the default value will be used.
+ *
+ * @param conf - the map of configuration properties
+ * @param configProp - the property (key) to get
+ * @param defaultValue - the value to use if the property is missing from the conf
+ * @return a long
+ */
+ static int getConfigValueAsInt(ServiceConfiguration conf, String configProp, int defaultValue) {
+ Object value = conf.getProperty(configProp);
+ if (value instanceof Integer) {
+ log.info("Configuration for [{}] is [{}]", configProp, value);
+ return (Integer) value;
+ } else if (value instanceof String) {
+ try {
+ return Integer.parseInt((String) value);
+ } catch (NumberFormatException numberFormatException) {
+ log.error("Expected configuration for [{}] to be a long, but got [{}]. Using default value: [{}]",
+ configProp, value, defaultValue, numberFormatException);
+ return defaultValue;
+ }
+ } else {
+ log.info("Configuration for [{}] is using the default value: [{}]", configProp, defaultValue);
+ return defaultValue;
+ }
+ }
+
+ /**
+ * Utility method to get a boolean from the {@link ServiceConfiguration}. If the key is present in the conf,
+ * return the default value. If key is present the value is not a valid boolean, the result will be false.
+ *
+ * @param conf - the map of configuration properties
+ * @param configProp - the property (key) to get
+ * @param defaultValue - the value to use if the property is missing from the conf
+ * @return a boolean
+ */
+ static boolean getConfigValueAsBoolean(ServiceConfiguration conf, String configProp, boolean defaultValue) {
+ Object value = conf.getProperty(configProp);
+ if (value instanceof Boolean) {
+ log.info("Configuration for [{}] is [{}]", configProp, value);
+ return (boolean) value;
+ } else if (value instanceof String) {
+ boolean result = Boolean.parseBoolean((String) value);
+ log.info("Configuration for [{}] is [{}]", configProp, result);
+ return result;
+ } else {
+ log.info("Configuration for [{}] is using the default value: [{}]", configProp, defaultValue);
+ return defaultValue;
+ }
+ }
+}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/FallbackDiscoveryMode.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/FallbackDiscoveryMode.java
new file mode 100644
index 0000000000000..5bf0c1b23fce6
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/FallbackDiscoveryMode.java
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import org.apache.pulsar.common.classification.InterfaceStability;
+
+/**
+ * These are the modes available for configuring how the Open ID Connect Authentication Provider should handle a JWT
+ * that has an issuer that is not explicitly in the allowed issuers set configured by
+ * {@link AuthenticationProviderOpenID#ALLOWED_TOKEN_ISSUERS}. The current implementations rely on using the Kubernetes
+ * Api Server's Open ID Connect features to discover an additional issuer or additional public keys to trust. See the
+ * Kubernetes documentation for more information on how Service Accounts can integrate with Open ID Connect.
+ * https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/#service-account-issuer-discovery
+ */
+@InterfaceStability.Evolving
+public enum FallbackDiscoveryMode {
+ /**
+ * There will be no discovery of additional trusted issuers or public keys. This setting requires that operators
+ * explicitly allow all issuers that will be trusted. For the Kubernetes Service Account Token Projections to work,
+ * the operator must explicitly trust the issuer on the token's "iss" claim. This is the default setting because it
+ * is the only mode that explicitly follows the OIDC spec for verification of discovered provider configuration.
+ */
+ DISABLED,
+
+ /**
+ * The Kubernetes Api Server will be used to discover an additional trusted issuer by getting the issuer at the
+ * Api Server's /.well-known/openid-configuration endpoint, verifying that issuer matches the "iss" claim on the
+ * supplied token, then treating that issuer as a trusted issuer by discovering the jwks_uri via that issuer's
+ * /.well-known/openid-configuration endpoint. This mode can be helpful in EKS environments where the Api Server's
+ * public keys served at the /openid/v1/jwks endpoint are not the same as the public keys served at the issuer's
+ * jwks_uri. It fails to be OIDC compliant because the URL used to discover the provider configuration is not the
+ * same as the issuer claim on the token.
+ */
+ KUBERNETES_DISCOVER_TRUSTED_ISSUER,
+
+ /**
+ * The Kubernetes Api Server will be used to discover an additional set of valid public keys by getting the issuer
+ * at the Api Server's /.well-known/openid-configuration endpoint, verifying that issuer matches the "iss" claim on
+ * the supplied token, then calling the Api Server endpoint to get the public keys using a kubernetes client. This
+ * mode is currently useful getting the public keys from the Api Server because the Api Server requires custom TLS
+ * and authentication, and the kubernetes client automatically handles those. It fails to be OIDC compliant because
+ * the URL used to discover the provider configuration is not the same as the issuer claim on the token.
+ */
+ KUBERNETES_DISCOVER_PUBLIC_KEYS,
+}
diff --git a/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java
new file mode 100644
index 0000000000000..73934e9c1e05e
--- /dev/null
+++ b/pulsar-broker-auth-oidc/src/main/java/org/apache/pulsar/broker/authentication/oidc/JwksCache.java
@@ -0,0 +1,239 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+package org.apache.pulsar.broker.authentication.oidc;
+
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_EXPIRATION_SECONDS;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_EXPIRATION_SECONDS_DEFAULT;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_REFRESH_AFTER_WRITE_SECONDS;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_REFRESH_AFTER_WRITE_SECONDS_DEFAULT;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_SIZE;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.CACHE_SIZE_DEFAULT;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.KEY_ID_CACHE_MISS_REFRESH_SECONDS;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.KEY_ID_CACHE_MISS_REFRESH_SECONDS_DEFAULT;
+import static org.apache.pulsar.broker.authentication.oidc.AuthenticationProviderOpenID.incrementFailureMetric;
+import static org.apache.pulsar.broker.authentication.oidc.ConfigUtils.getConfigValueAsInt;
+import com.auth0.jwk.Jwk;
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
+import com.github.benmanes.caffeine.cache.AsyncCacheLoader;
+import com.github.benmanes.caffeine.cache.AsyncLoadingCache;
+import com.github.benmanes.caffeine.cache.Caffeine;
+import io.kubernetes.client.openapi.ApiCallback;
+import io.kubernetes.client.openapi.ApiClient;
+import io.kubernetes.client.openapi.ApiException;
+import io.kubernetes.client.openapi.apis.OpenidApi;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Optional;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import javax.naming.AuthenticationException;
+import org.apache.pulsar.broker.ServiceConfiguration;
+import org.asynchttpclient.AsyncHttpClient;
+
+public class JwksCache {
+
+ // Map from an issuer's JWKS URI to its JWKS. When the Issuer is not empty, use the fallback client.
+ private final AsyncLoadingCache, List> cache;
+ private final ConcurrentHashMap, Long> jwksLastRefreshTime = new ConcurrentHashMap<>();
+ private final long keyIdCacheMissRefreshNanos;
+ private final ObjectReader reader = new ObjectMapper().readerFor(HashMap.class);
+ private final AsyncHttpClient httpClient;
+ private final OpenidApi openidApi;
+
+ JwksCache(ServiceConfiguration config, AsyncHttpClient httpClient, ApiClient apiClient) throws IOException {
+ // Store the clients
+ this.httpClient = httpClient;
+ this.openidApi = apiClient != null ? new OpenidApi(apiClient) : null;
+ keyIdCacheMissRefreshNanos = TimeUnit.SECONDS.toNanos(getConfigValueAsInt(config,
+ KEY_ID_CACHE_MISS_REFRESH_SECONDS, KEY_ID_CACHE_MISS_REFRESH_SECONDS_DEFAULT));
+ // Configure the cache
+ int maxSize = getConfigValueAsInt(config, CACHE_SIZE, CACHE_SIZE_DEFAULT);
+ int refreshAfterWriteSeconds = getConfigValueAsInt(config, CACHE_REFRESH_AFTER_WRITE_SECONDS,
+ CACHE_REFRESH_AFTER_WRITE_SECONDS_DEFAULT);
+ int expireAfterSeconds = getConfigValueAsInt(config, CACHE_EXPIRATION_SECONDS,
+ CACHE_EXPIRATION_SECONDS_DEFAULT);
+ AsyncCacheLoader, List> loader = (jwksUri, executor) -> {
+ // Store the time of the retrieval, even though it might be a little early or the call might fail.
+ jwksLastRefreshTime.put(jwksUri, System.nanoTime());
+ if (jwksUri.isPresent()) {
+ return getJwksFromJwksUri(jwksUri.get());
+ } else {
+ return getJwksFromKubernetesApiServer();
+ }
+ };
+ this.cache = Caffeine.newBuilder()
+ .maximumSize(maxSize)
+ .refreshAfterWrite(refreshAfterWriteSeconds, TimeUnit.SECONDS)
+ .expireAfterWrite(expireAfterSeconds, TimeUnit.SECONDS)
+ .buildAsync(loader);
+ }
+
+ CompletableFuture getJwk(String jwksUri, String keyId) {
+ if (jwksUri == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ return CompletableFuture.failedFuture(new IllegalArgumentException("jwksUri must not be null."));
+ }
+ return getJwkAndMaybeReload(Optional.of(jwksUri), keyId, false);
+ }
+
+ /**
+ * Retrieve the JWK for the given key ID from the given JWKS URI. If the key ID is not found, and failOnMissingKeyId
+ * is false, then the JWK will be reloaded from the JWKS URI and the key ID will be searched for again.
+ */
+ private CompletableFuture getJwkAndMaybeReload(Optional maybeJwksUri,
+ String keyId,
+ boolean failOnMissingKeyId) {
+ return cache
+ .get(maybeJwksUri)
+ .thenCompose(jwks -> {
+ try {
+ return CompletableFuture.completedFuture(getJwkForKID(maybeJwksUri, jwks, keyId));
+ } catch (IllegalArgumentException e) {
+ if (failOnMissingKeyId) {
+ throw e;
+ } else {
+ Long lastRefresh = jwksLastRefreshTime.get(maybeJwksUri);
+ if (lastRefresh == null || System.nanoTime() - lastRefresh > keyIdCacheMissRefreshNanos) {
+ // In this case, the key ID was not found, but we haven't refreshed the JWKS in a while,
+ // so it is possible the key ID was added. Refresh the JWKS and try again.
+ cache.synchronous().invalidate(maybeJwksUri);
+ }
+ // There is a small race condition where the JWKS could be refreshed by another thread,
+ // so we retry getting the JWK, even though we might not have invalidated the cache.
+ return getJwkAndMaybeReload(maybeJwksUri, keyId, true);
+ }
+ }
+ });
+ }
+
+ private CompletableFuture> getJwksFromJwksUri(String jwksUri) {
+ return httpClient
+ .prepareGet(jwksUri)
+ .execute()
+ .toCompletableFuture()
+ .thenCompose(result -> {
+ CompletableFuture> future = new CompletableFuture<>();
+ try {
+ HashMap jwks =
+ reader.readValue(result.getResponseBodyAsBytes());
+ future.complete(convertToJwks(jwksUri, jwks));
+ } catch (AuthenticationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(e);
+ } catch (Exception e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(new AuthenticationException(
+ "Error retrieving public key at " + jwksUri + ": " + e.getMessage()));
+ }
+ return future;
+ });
+ }
+
+ CompletableFuture getJwkFromKubernetesApiServer(String keyId) {
+ if (openidApi == null) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ return CompletableFuture.failedFuture(new AuthenticationException(
+ "Failed to retrieve public key from Kubernetes API server: Kubernetes fallback is not enabled."));
+ }
+ return getJwkAndMaybeReload(Optional.empty(), keyId, false);
+ }
+
+ private CompletableFuture> getJwksFromKubernetesApiServer() {
+ CompletableFuture> future = new CompletableFuture<>();
+ try {
+ openidApi.getServiceAccountIssuerOpenIDKeysetAsync(new ApiCallback() {
+ @Override
+ public void onFailure(ApiException e, int statusCode, Map> responseHeaders) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ // We want the message and responseBody here: https://github.com/kubernetes-client/java/issues/2066.
+ future.completeExceptionally(
+ new AuthenticationException("Failed to retrieve public key from Kubernetes API server. "
+ + "Message: " + e.getMessage() + " Response body: " + e.getResponseBody()));
+ }
+
+ @Override
+ public void onSuccess(String result, int statusCode, Map> responseHeaders) {
+ try {
+ HashMap jwks = reader.readValue(result);
+ future.complete(convertToJwks("Kubernetes API server", jwks));
+ } catch (AuthenticationException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(e);
+ } catch (Exception e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(new AuthenticationException(
+ "Error retrieving public key at Kubernetes API server: " + e.getMessage()));
+ }
+ }
+
+ @Override
+ public void onUploadProgress(long bytesWritten, long contentLength, boolean done) {
+
+ }
+
+ @Override
+ public void onDownloadProgress(long bytesRead, long contentLength, boolean done) {
+
+ }
+ });
+ } catch (ApiException e) {
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ future.completeExceptionally(
+ new AuthenticationException("Failed to retrieve public key from Kubernetes API server: "
+ + e.getMessage()));
+ }
+ return future;
+ }
+
+ private Jwk getJwkForKID(Optional maybeJwksUri, List jwks, String keyId) {
+ for (Jwk jwk : jwks) {
+ if (jwk.getId().equals(keyId)) {
+ return jwk;
+ }
+ }
+ incrementFailureMetric(AuthenticationExceptionCode.ERROR_RETRIEVING_PUBLIC_KEY);
+ throw new IllegalArgumentException("No JWK found for Key ID " + keyId);
+ }
+
+ /**
+ * The JWK Set is stored in the "keys" key see https://www.rfc-editor.org/rfc/rfc7517#section-5.1.
+ *
+ * @param jwksUri - the URI used to retrieve the JWKS
+ * @param jwks - the JWKS to convert
+ * @return a list of {@link Jwk}
+ */
+ private List convertToJwks(String jwksUri, Map jwks) throws AuthenticationException {
+ try {
+ @SuppressWarnings("unchecked")
+ List