diff --git a/.ci/docker-compose-file/.env b/.ci/docker-compose-file/.env index e3beb1bc7..397c44854 100644 --- a/.ci/docker-compose-file/.env +++ b/.ci/docker-compose-file/.env @@ -5,5 +5,6 @@ PGSQL_TAG=13 LDAP_TAG=2.4.50 INFLUXDB_TAG=2.5.0 TDENGINE_TAG=3.0.2.4 +DYNAMO_TAG=1.21.0 TARGET=emqx/emqx diff --git a/.ci/docker-compose-file/clickhouse/config.xml b/.ci/docker-compose-file/clickhouse/config.xml new file mode 100644 index 000000000..085f92a12 --- /dev/null +++ b/.ci/docker-compose-file/clickhouse/config.xml @@ -0,0 +1,678 @@ + + + + + + trace + /var/log/clickhouse-server/clickhouse-server.log + /var/log/clickhouse-server/clickhouse-server.err.log + 1000M + 10 + + + + + + + + + + + + + + false + + false + + + https://6f33034cfe684dd7a3ab9875e57b1c8d@o388870.ingest.sentry.io/5226277 + + + + 8123 + 9000 + 9004 + + + + + + + /etc/clickhouse-server/server.crt + /etc/clickhouse-server/server.key + + /etc/clickhouse-server/dhparam.pem + none + true + true + sslv2,sslv3 + true + + + + true + true + sslv2,sslv3 + true + + + + RejectCertificateHandler + + + + + + + + + 9009 + + + + + + + + + + + + + + + + + + + + 4096 + 3 + + + 100 + + + 0 + + + + 10000 + + + 10 + + + 4194304 + + + 0 + + + + + + 8589934592 + + + 5368709120 + + + + /var/lib/clickhouse/ + + + /var/lib/clickhouse/tmp/ + + + + + + /var/lib/clickhouse/user_files/ + + + /var/lib/clickhouse/access/ + + + /etc/clickhouse-server/users.xml + + + default + + + + + + default + + + + + + + + + true + + + + + + + + + + + + localhost + 9000 + + + + + + + + + localhost + 9000 + + + + + localhost + 9000 + + + + + + + 127.0.0.1 + 9000 + + + + + 127.0.0.2 + 9000 + + + + + + + localhost + 9440 + 1 + + + + + + + localhost + 9000 + + + + + localhost + 1 + + + + + + + + + + + + + + + + + + + + + + + + 3600 + + + + 3600 + + + 60 + + + + + + + + + + + + + system + query_log
+ + toYYYYMM(event_date) + + + + + 7500 +
+ + + + system + trace_log
+ + toYYYYMM(event_date) + 7500 +
+ + + + system + query_thread_log
+ toYYYYMM(event_date) + 7500 +
+ + + + + + + + system + metric_log
+ 7500 + 1000 +
+ + + + system + asynchronous_metric_log
+ + 60000 +
+ + + + + + + + + + + + *_dictionary.xml + + + + + + + + + + /clickhouse/task_queue/ddl + + + + + + + + + + + + + + + + click_cost + any + + 0 + 3600 + + + 86400 + 60 + + + + max + + 0 + 60 + + + 3600 + 300 + + + 86400 + 3600 + + + + + + /var/lib/clickhouse/format_schemas/ + + + + + + + +
diff --git a/.ci/docker-compose-file/clickhouse/users.xml b/.ci/docker-compose-file/clickhouse/users.xml new file mode 100644 index 000000000..ced773355 --- /dev/null +++ b/.ci/docker-compose-file/clickhouse/users.xml @@ -0,0 +1,110 @@ + + + + + + + + 10000000000 + + + 0 + + + random + + + + + 1 + + + + + + + + + public + + + + ::/0 + + + + default + + + default + + + + + + + + + + + + + + 3600 + + + 0 + 0 + 0 + 0 + 0 + + + + diff --git a/.ci/docker-compose-file/docker-compose-clickhouse.yaml b/.ci/docker-compose-file/docker-compose-clickhouse.yaml new file mode 100644 index 000000000..118f83dc1 --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-clickhouse.yaml @@ -0,0 +1,16 @@ +version: '3.9' + +services: + clickhouse: + container_name: clickhouse + image: clickhouse/clickhouse-server:23.1.2.9-alpine + restart: always + volumes: + - ./clickhouse/users.xml:/etc/clickhouse-server/users.xml + - ./clickhouse/config.xml:/etc/clickhouse-server/config.d/config.xml + expose: + - "8123" + ports: + - "8123:8123" + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-dynamo.yaml b/.ci/docker-compose-file/docker-compose-dynamo.yaml new file mode 100644 index 000000000..926d6287c --- /dev/null +++ b/.ci/docker-compose-file/docker-compose-dynamo.yaml @@ -0,0 +1,15 @@ +version: '3.9' + +services: + dynamodb-local: + container_name: dynamo + image: amazon/dynamodb-local:${DYNAMO_TAG} + restart: always + ports: + - "8000:8000" + environment: + AWS_ACCESS_KEY_ID: root + AWS_SECRET_ACCESS_KEY: public + AWS_DEFAULT_REGION: us-west-2 + networks: + - emqx_bridge diff --git a/.ci/docker-compose-file/docker-compose-mysql-tcp.yaml b/.ci/docker-compose-file/docker-compose-mysql-tcp.yaml index 8a4c498df..4578ff94f 100644 --- a/.ci/docker-compose-file/docker-compose-mysql-tcp.yaml +++ b/.ci/docker-compose-file/docker-compose-mysql-tcp.yaml @@ -13,10 +13,12 @@ services: networks: - emqx_bridge command: - --bind-address "::" - --character-set-server=utf8mb4 - --collation-server=utf8mb4_general_ci - --explicit_defaults_for_timestamp=true - --lower_case_table_names=1 - --max_allowed_packet=128M - --skip-symbolic-links + - --bind-address=0.0.0.0 + - --character-set-server=utf8mb4 + - --collation-server=utf8mb4_general_ci + - --lower-case-table-names=1 + - --max-allowed-packet=128M + # Severely limit maximum number of prepared statements the server must permit + # so that we hit potential resource exhaustion earlier in tests. + - --max-prepared-stmt-count=64 + - --skip-symbolic-links diff --git a/.ci/docker-compose-file/docker-compose-mysql-tls.yaml b/.ci/docker-compose-file/docker-compose-mysql-tls.yaml index 47d9ecd83..83fd4658c 100644 --- a/.ci/docker-compose-file/docker-compose-mysql-tls.yaml +++ b/.ci/docker-compose-file/docker-compose-mysql-tls.yaml @@ -23,9 +23,11 @@ services: - --port=3306 - --character-set-server=utf8mb4 - --collation-server=utf8mb4_general_ci - - --explicit_defaults_for_timestamp=true - - --lower_case_table_names=1 - - --max_allowed_packet=128M + - --lower-case-table-names=1 + - --max-allowed-packet=128M + # Severely limit maximum number of prepared statements the server must permit + # so that we hit potential resource exhaustion earlier in tests. + - --max-prepared-stmt-count=64 - --ssl-ca=/etc/certs/ca-cert.pem - --ssl-cert=/etc/certs/server-cert.pem - --ssl-key=/etc/certs/server-key.pem diff --git a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml index 3f526978e..3dd30af52 100644 --- a/.ci/docker-compose-file/docker-compose-toxiproxy.yaml +++ b/.ci/docker-compose-file/docker-compose-toxiproxy.yaml @@ -18,6 +18,7 @@ services: - 15432:5432 - 15433:5433 - 16041:6041 + - 18000:8000 command: - "-host=0.0.0.0" - "-config=/config/toxiproxy.json" diff --git a/.ci/docker-compose-file/toxiproxy.json b/.ci/docker-compose-file/toxiproxy.json index e26134ec8..6188eab17 100644 --- a/.ci/docker-compose-file/toxiproxy.json +++ b/.ci/docker-compose-file/toxiproxy.json @@ -47,5 +47,11 @@ "listen": "0.0.0.0:6041", "upstream": "tdengine:6041", "enabled": true + }, + { + "name": "dynamo", + "listen": "0.0.0.0:8000", + "upstream": "dynamo:8000", + "enabled": true } ] diff --git a/.github/workflows/build_and_push_docker_images.yaml b/.github/workflows/build_and_push_docker_images.yaml index a5f1f315c..57dc2cb45 100644 --- a/.github/workflows/build_and_push_docker_images.yaml +++ b/.github/workflows/build_and_push_docker_images.yaml @@ -23,9 +23,9 @@ on: jobs: prepare: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 # prepare source with any OTP version, no need for a matrix - container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-24.3.4.2-2-ubuntu22.04" outputs: PROFILE: ${{ steps.get_profile.outputs.PROFILE }} @@ -105,7 +105,7 @@ jobs: path: source.zip docker: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: prepare strategy: @@ -121,7 +121,7 @@ jobs: # NOTE: 'otp' and 'elixir' are to configure emqx-builder image # only support latest otp and elixir, not a matrix builder: - - 5.0-28 # update to latest + - 5.0-29 # update to latest otp: - 24.3.4.2-2 # switch to 25 once ready to release 5.1 elixir: diff --git a/.github/workflows/build_packages.yaml b/.github/workflows/build_packages.yaml index b23e2c604..73a2ece7a 100644 --- a/.github/workflows/build_packages.yaml +++ b/.github/workflows/build_packages.yaml @@ -22,8 +22,9 @@ on: jobs: prepare: - runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04 + runs-on: ubuntu-22.04 + if: (github.repository_owner == 'emqx' && github.event_name == 'schedule') || github.event_name != 'schedule' + container: ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-24.3.4.2-2-ubuntu22.04 outputs: BUILD_PROFILE: ${{ steps.get_profile.outputs.BUILD_PROFILE }} IS_EXACT_TAG: ${{ steps.get_profile.outputs.IS_EXACT_TAG }} @@ -153,6 +154,7 @@ jobs: - 24.3.4.2-2 os: - macos-11 + - macos-12 - macos-12-arm64 runs-on: ${{ matrix.os }} steps: @@ -211,28 +213,35 @@ jobs: - ubuntu18.04 - debian11 - debian10 + - raspbian10 + - raspbian9 + - el9 - el8 - el7 - amzn2 build_machine: - aws-arm64 - - ubuntu-20.04 + - ubuntu-22.04 builder: - - 5.0-28 + - 5.0-29 elixir: - 1.13.4 exclude: - arch: arm64 - build_machine: ubuntu-20.04 + build_machine: ubuntu-22.04 - arch: amd64 build_machine: aws-arm64 + - arch: amd64 + os: raspbian9 + - arch: amd64 + os: raspbian10 include: - profile: emqx otp: 25.1.2-2 arch: amd64 os: ubuntu22.04 build_machine: ubuntu-22.04 - builder: 5.0-28 + builder: 5.0-29 elixir: 1.13.4 release_with: elixir - profile: emqx @@ -240,7 +249,7 @@ jobs: arch: amd64 os: amzn2 build_machine: ubuntu-22.04 - builder: 5.0-28 + builder: 5.0-29 elixir: 1.13.4 release_with: elixir @@ -306,7 +315,7 @@ jobs: {"text": "Scheduled run of ${{ github.workflow }}@${{ matrix.os }} failed: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}"} publish_artifacts: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: [prepare, mac, linux] if: needs.prepare.outputs.IS_EXACT_TAG && github.event_name != 'schedule' strategy: @@ -369,15 +378,19 @@ jobs: push "debian/buster" "packages/$PROFILE/$PROFILE-$VERSION-debian10-arm64.deb" push "debian/bullseye" "packages/$PROFILE/$PROFILE-$VERSION-debian11-amd64.deb" push "debian/bullseye" "packages/$PROFILE/$PROFILE-$VERSION-debian11-arm64.deb" + push "raspbian/stretch" "packages/$PROFILE/$PROFILE-$VERSION-raspbian9-arm64.deb" + push "raspbian/buster" "packages/$PROFILE/$PROFILE-$VERSION-raspbian10-arm64.deb" push "ubuntu/bionic" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu18.04-amd64.deb" push "ubuntu/bionic" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu18.04-arm64.deb" push "ubuntu/focal" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu20.04-amd64.deb" push "ubuntu/focal" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu20.04-arm64.deb" push "ubuntu/jammy" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu22.04-amd64.deb" push "ubuntu/jammy" "packages/$PROFILE/$PROFILE-$VERSION-ubuntu22.04-arm64.deb" + push "el/6" "packages/$PROFILE/$PROFILE-$VERSION-amzn2-amd64.rpm" + push "el/6" "packages/$PROFILE/$PROFILE-$VERSION-amzn2-arm64.rpm" push "el/7" "packages/$PROFILE/$PROFILE-$VERSION-el7-amd64.rpm" push "el/7" "packages/$PROFILE/$PROFILE-$VERSION-el7-arm64.rpm" push "el/8" "packages/$PROFILE/$PROFILE-$VERSION-el8-amd64.rpm" push "el/8" "packages/$PROFILE/$PROFILE-$VERSION-el8-arm64.rpm" - push "el/6" "packages/$PROFILE/$PROFILE-$VERSION-amzn2-amd64.rpm" - push "el/6" "packages/$PROFILE/$PROFILE-$VERSION-amzn2-arm64.rpm" + push "el/9" "packages/$PROFILE/$PROFILE-$VERSION-el9-amd64.rpm" + push "el/9" "packages/$PROFILE/$PROFILE-$VERSION-el9-arm64.rpm" diff --git a/.github/workflows/build_slim_packages.yaml b/.github/workflows/build_slim_packages.yaml index 64704b39c..e18a1319e 100644 --- a/.github/workflows/build_slim_packages.yaml +++ b/.github/workflows/build_slim_packages.yaml @@ -30,15 +30,14 @@ jobs: fail-fast: false matrix: profile: - - ["emqx", "24.3.4.2-2", "el7"] - - ["emqx", "24.3.4.2-2", "ubuntu20.04"] - - ["emqx", "25.1.2-2", "ubuntu22.04"] - - ["emqx-enterprise", "24.3.4.2-2", "ubuntu20.04"] - - ["emqx-enterprise", "25.1.2-2", "ubuntu22.04"] + - ["emqx", "24.3.4.2-2", "el7", "erlang"] + - ["emqx", "25.1.2-2", "ubuntu22.04", "elixir"] + - ["emqx-enterprise", "24.3.4.2-2", "amzn2", "erlang"] + - ["emqx-enterprise", "25.1.2-2", "ubuntu20.04", "erlang"] builder: - - 5.0-28 + - 5.0-29 elixir: - - 1.13.4 + - '1.13.4' container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.profile[1] }}-${{ matrix.profile[2] }}" @@ -55,18 +54,22 @@ jobs: run: | git config --global --add safe.directory "$GITHUB_WORKSPACE" - name: build and test tgz package + if: matrix.profile[3] == 'erlang' run: | make ${EMQX_NAME}-tgz ./scripts/pkg-tests.sh ${EMQX_NAME}-tgz - name: build and test deb/rpm packages + if: matrix.profile[3] == 'erlang' run: | make ${EMQX_NAME}-pkg ./scripts/pkg-tests.sh ${EMQX_NAME}-pkg - name: build and test tgz package (Elixir) + if: matrix.profile[3] == 'elixir' run: | make ${EMQX_NAME}-elixir-tgz ./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-tgz - name: build and test deb/rpm packages (Elixir) + if: matrix.profile[3] == 'elixir' run: | make ${EMQX_NAME}-elixir-pkg ./scripts/pkg-tests.sh ${EMQX_NAME}-elixir-pkg @@ -79,7 +82,7 @@ jobs: name: "${{ matrix.profile[0] }}_schema_dump" path: | scripts/spellcheck - _build/${{ matrix.profile[0] }}/lib/emqx_dashboard/priv/www/static/schema.json + _build/docgen/${{ matrix.profile[0] }}/schema.json windows: runs-on: windows-2019 @@ -215,4 +218,4 @@ jobs: path: /tmp/ - name: Run spellcheck run: | - bash /tmp/scripts/spellcheck/spellcheck.sh /tmp/_build/${{ matrix.profile }}/lib/emqx_dashboard/priv/www/static/schema.json + bash /tmp/scripts/spellcheck/spellcheck.sh /tmp/_build/docgen/${{ matrix.profile }}/schema.json diff --git a/.github/workflows/check_deps_integrity.yaml b/.github/workflows/check_deps_integrity.yaml index ff41a4e86..f24e164d9 100644 --- a/.github/workflows/check_deps_integrity.yaml +++ b/.github/workflows/check_deps_integrity.yaml @@ -1,11 +1,12 @@ name: Check Rebar Dependencies -on: [pull_request, push] +on: + pull_request: jobs: check_deps_integrity: - runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04 + runs-on: ubuntu-latest + container: ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-25.1.2-2-ubuntu22.04 steps: - uses: actions/checkout@v3 diff --git a/.github/workflows/code_style_check.yaml b/.github/workflows/code_style_check.yaml index 393da4dbd..390ca8ffe 100644 --- a/.github/workflows/code_style_check.yaml +++ b/.github/workflows/code_style_check.yaml @@ -4,8 +4,8 @@ on: [pull_request] jobs: code_style_check: - runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04" + runs-on: ubuntu-22.04 + container: "ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-25.1.2-2-ubuntu22.04" steps: - uses: actions/checkout@v3 with: diff --git a/.github/workflows/elixir_apps_check.yaml b/.github/workflows/elixir_apps_check.yaml index 744618680..a123ad93b 100644 --- a/.github/workflows/elixir_apps_check.yaml +++ b/.github/workflows/elixir_apps_check.yaml @@ -2,13 +2,14 @@ name: Check Elixir Release Applications -on: [pull_request, push] +on: + pull_request: jobs: elixir_apps_check: runs-on: ubuntu-latest # just use the latest builder - container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04" + container: "ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-25.1.2-2-ubuntu22.04" strategy: fail-fast: false diff --git a/.github/workflows/elixir_deps_check.yaml b/.github/workflows/elixir_deps_check.yaml index 5f5450cab..348ed4931 100644 --- a/.github/workflows/elixir_deps_check.yaml +++ b/.github/workflows/elixir_deps_check.yaml @@ -2,12 +2,13 @@ name: Elixir Dependency Version Check -on: [pull_request, push] +on: + pull_request: jobs: elixir_deps_check: - runs-on: ubuntu-20.04 - container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04 + runs-on: ubuntu-latest + container: ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-25.1.2-2-ubuntu22.04 steps: - name: Checkout diff --git a/.github/workflows/elixir_release.yml b/.github/workflows/elixir_release.yml index 40bb83636..5517a2abc 100644 --- a/.github/workflows/elixir_release.yml +++ b/.github/workflows/elixir_release.yml @@ -17,7 +17,7 @@ jobs: profile: - emqx - emqx-enterprise - container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-25.1.2-2-ubuntu20.04 + container: ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-25.1.2-2-ubuntu22.04 steps: - name: Checkout uses: actions/checkout@v3 diff --git a/.github/workflows/run_emqx_app_tests.yaml b/.github/workflows/run_emqx_app_tests.yaml index cf6e1bdff..147708373 100644 --- a/.github/workflows/run_emqx_app_tests.yaml +++ b/.github/workflows/run_emqx_app_tests.yaml @@ -12,7 +12,7 @@ jobs: strategy: matrix: builder: - - 5.0-28 + - 5.0-29 otp: - 24.3.4.2-2 - 25.1.2-2 @@ -22,16 +22,16 @@ jobs: elixir: - 1.13.4 os: - - ubuntu20.04 + - ubuntu22.04 arch: - amd64 runs-on: - aws-amd64 - - ubuntu-20.04 + - ubuntu-22.04 use-self-hosted: - ${{ github.repository_owner == 'emqx' }} exclude: - - runs-on: ubuntu-20.04 + - runs-on: ubuntu-22.04 use-self-hosted: true - runs-on: aws-amd64 use-self-hosted: false diff --git a/.github/workflows/run_fvt_tests.yaml b/.github/workflows/run_fvt_tests.yaml index 4ef634d91..a95fcd805 100644 --- a/.github/workflows/run_fvt_tests.yaml +++ b/.github/workflows/run_fvt_tests.yaml @@ -7,16 +7,17 @@ concurrency: on: push: branches: - - '**' + - master + - 'ci/**' tags: - v* pull_request: jobs: prepare: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest # prepare source with any OTP version, no need for a matrix - container: ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-debian11 + container: ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-24.3.4.2-2-debian11 steps: - uses: actions/checkout@v3 @@ -33,7 +34,7 @@ jobs: path: source.zip docker_test: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: prepare strategy: @@ -49,7 +50,7 @@ jobs: os: - ["debian11", "debian:11-slim"] builder: - - 5.0-28 + - 5.0-29 otp: - 24.3.4.2-2 elixir: @@ -107,7 +108,7 @@ jobs: docker exec node1.emqx.io node_dump helm_test: - runs-on: ubuntu-20.04 + runs-on: ubuntu-22.04 needs: prepare strategy: @@ -122,7 +123,7 @@ jobs: os: - ["debian11", "debian:11-slim"] builder: - - 5.0-28 + - 5.0-29 otp: - 24.3.4.2-2 elixir: diff --git a/.github/workflows/run_relup_tests.yaml b/.github/workflows/run_relup_tests.yaml index 4d03878de..ca3e0e0ce 100644 --- a/.github/workflows/run_relup_tests.yaml +++ b/.github/workflows/run_relup_tests.yaml @@ -14,8 +14,8 @@ concurrency: jobs: relup_test_plan: - runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04" + runs-on: ubuntu-22.04 + container: "ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-24.3.4.2-2-ubuntu22.04" outputs: CUR_EE_VSN: ${{ steps.find-versions.outputs.CUR_EE_VSN }} OLD_VERSIONS: ${{ steps.find-versions.outputs.OLD_VERSIONS }} diff --git a/.github/workflows/run_test_cases.yaml b/.github/workflows/run_test_cases.yaml index 79998f413..5006fe760 100644 --- a/.github/workflows/run_test_cases.yaml +++ b/.github/workflows/run_test_cases.yaml @@ -7,225 +7,226 @@ concurrency: on: push: branches: - - '**' + - master + - 'ci/**' tags: - v* - e* pull_request: jobs: - build-matrix: - runs-on: ubuntu-latest - outputs: - prepare: ${{ steps.matrix.outputs.prepare }} - host: ${{ steps.matrix.outputs.host }} - docker: ${{ steps.matrix.outputs.docker }} - runs-on: ${{ steps.runner.outputs.runs-on }} - steps: - - uses: actions/checkout@v3 - - name: Build matrix - id: matrix - run: | - APPS="$(./scripts/find-apps.sh --ci)" - MATRIX="$(echo "${APPS}" | jq -c ' - [ - (.[] | select(.profile == "emqx") | . + { - builder: "5.0-28", - otp: "25.1.2-2", - elixir: "1.13.4" - }), - (.[] | select(.profile == "emqx-enterprise") | . + { - builder: "5.0-28", - otp: ["24.3.4.2-2", "25.1.2-2"][], - elixir: "1.13.4" - }) - ] - ')" - echo "${MATRIX}" | jq - MATRIX_PREPARE="$(echo "${MATRIX}" | jq -c 'map({profile, builder, otp, elixir}) | unique')" - MATRIX_HOST="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "host"))')" - MATRIX_DOCKER="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "docker"))')" - echo "prepare=${MATRIX_PREPARE}" | tee -a $GITHUB_OUTPUT - echo "host=${MATRIX_HOST}" | tee -a $GITHUB_OUTPUT - echo "docker=${MATRIX_DOCKER}" | tee -a $GITHUB_OUTPUT - - name: Choose runner host - id: runner - run: | - RUNS_ON="ubuntu-20.04" - ${{ github.repository_owner == 'emqx' }} && RUNS_ON="aws-amd64" - echo "runs-on=${RUNS_ON}" | tee -a $GITHUB_OUTPUT + build-matrix: + runs-on: ubuntu-latest + outputs: + prepare: ${{ steps.matrix.outputs.prepare }} + host: ${{ steps.matrix.outputs.host }} + docker: ${{ steps.matrix.outputs.docker }} + runs-on: ${{ steps.runner.outputs.runs-on }} + steps: + - uses: actions/checkout@v3 + - name: Build matrix + id: matrix + run: | + APPS="$(./scripts/find-apps.sh --ci)" + MATRIX="$(echo "${APPS}" | jq -c ' + [ + (.[] | select(.profile == "emqx") | . + { + builder: "5.0-29", + otp: "25.1.2-2", + elixir: "1.13.4" + }), + (.[] | select(.profile == "emqx-enterprise") | . + { + builder: "5.0-29", + otp: ["24.3.4.2-2", "25.1.2-2"][], + elixir: "1.13.4" + }) + ] + ')" + echo "${MATRIX}" | jq + MATRIX_PREPARE="$(echo "${MATRIX}" | jq -c 'map({profile, builder, otp, elixir}) | unique')" + MATRIX_HOST="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "host"))')" + MATRIX_DOCKER="$(echo "${MATRIX}" | jq -c 'map(select(.runner == "docker"))')" + echo "prepare=${MATRIX_PREPARE}" | tee -a $GITHUB_OUTPUT + echo "host=${MATRIX_HOST}" | tee -a $GITHUB_OUTPUT + echo "docker=${MATRIX_DOCKER}" | tee -a $GITHUB_OUTPUT + - name: Choose runner host + id: runner + run: | + RUNS_ON="ubuntu-22.04" + ${{ github.repository_owner == 'emqx' }} && RUNS_ON="aws-amd64" + echo "runs-on=${RUNS_ON}" | tee -a $GITHUB_OUTPUT - prepare: - runs-on: ${{ needs.build-matrix.outputs.runs-on }} - needs: [build-matrix] - strategy: - fail-fast: false - matrix: - include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} - container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" - steps: - - uses: AutoModality/action-clean@v1 - - uses: actions/checkout@v3 - with: - path: source - - name: get_all_deps - working-directory: source - env: - PROFILE: ${{ matrix.profile }} - #DIAGNOSTIC: 1 - run: | - make ensure-rebar3 - # fetch all deps and compile - make ${{ matrix.profile }} - make static_checks - make test-compile - cd .. - zip -ryq source.zip source/* source/.[^.]* - - uses: actions/upload-artifact@v3 - with: - name: source-${{ matrix.profile }}-${{ matrix.otp }} - path: source.zip + prepare: + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + needs: [build-matrix] + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} + container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/checkout@v3 + with: + path: source + - name: get_all_deps + working-directory: source + env: + PROFILE: ${{ matrix.profile }} + #DIAGNOSTIC: 1 + run: | + make ensure-rebar3 + # fetch all deps and compile + make ${{ matrix.profile }} + make static_checks + make test-compile + cd .. + zip -ryq source.zip source/* source/.[^.]* + - uses: actions/upload-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: source.zip - eunit_and_proper: - needs: - - build-matrix - - prepare - runs-on: ${{ needs.build-matrix.outputs.runs-on }} - strategy: - fail-fast: false - matrix: - include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} + eunit_and_proper: + needs: + - build-matrix + - prepare + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.prepare) }} - defaults: - run: - shell: bash - container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" + defaults: + run: + shell: bash + container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" - steps: - - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v3 - with: - name: source-${{ matrix.profile }}-${{ matrix.otp }} - path: . - - name: unzip source code - run: unzip -o -q source.zip - # produces eunit.coverdata - - name: eunit - env: - PROFILE: ${{ matrix.profile }} - CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} - working-directory: source - run: make eunit + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: . + - name: unzip source code + run: unzip -o -q source.zip + # produces eunit.coverdata + - name: eunit + env: + PROFILE: ${{ matrix.profile }} + CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} + working-directory: source + run: make eunit - # produces proper.coverdata - - name: proper - env: - PROFILE: ${{ matrix.profile }} - CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} - working-directory: source - run: make proper + # produces proper.coverdata + - name: proper + env: + PROFILE: ${{ matrix.profile }} + CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} + working-directory: source + run: make proper - - uses: actions/upload-artifact@v3 - with: - name: coverdata - path: source/_build/test/cover + - uses: actions/upload-artifact@v3 + with: + name: coverdata + path: source/_build/test/cover - ct_docker: - needs: - - build-matrix - - prepare - runs-on: ${{ needs.build-matrix.outputs.runs-on }} - strategy: - fail-fast: false - matrix: - include: ${{ fromJson(needs.build-matrix.outputs.docker) }} + ct_docker: + needs: + - build-matrix + - prepare + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.docker) }} - defaults: - run: - shell: bash + defaults: + run: + shell: bash - steps: - - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v3 - with: - name: source-${{ matrix.profile }}-${{ matrix.otp }} - path: . - - name: unzip source code - run: unzip -q source.zip - - name: run tests - working-directory: source - env: - DOCKER_CT_RUNNER_IMAGE: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" - MONGO_TAG: "5" - MYSQL_TAG: "8" - PGSQL_TAG: "13" - REDIS_TAG: "7.0" - INFLUXDB_TAG: "2.5.0" - TDENGINE_TAG: "3.0.2.4" - PROFILE: ${{ matrix.profile }} - CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} - run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} - - uses: actions/upload-artifact@v3 - with: - name: coverdata - path: source/_build/test/cover - - uses: actions/upload-artifact@v3 - if: failure() - with: - name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }} - path: source/_build/test/logs + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: . + - name: unzip source code + run: unzip -q source.zip + - name: run tests + working-directory: source + env: + DOCKER_CT_RUNNER_IMAGE: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" + MONGO_TAG: "5" + MYSQL_TAG: "8" + PGSQL_TAG: "13" + REDIS_TAG: "7.0" + INFLUXDB_TAG: "2.5.0" + TDENGINE_TAG: "3.0.2.4" + PROFILE: ${{ matrix.profile }} + CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} + run: ./scripts/ct/run.sh --ci --app ${{ matrix.app }} + - uses: actions/upload-artifact@v3 + with: + name: coverdata + path: source/_build/test/cover + - uses: actions/upload-artifact@v3 + if: failure() + with: + name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }} + path: source/_build/test/logs - ct: - needs: - - build-matrix - - prepare - runs-on: ${{ needs.build-matrix.outputs.runs-on }} - strategy: - fail-fast: false - matrix: - include: ${{ fromJson(needs.build-matrix.outputs.host) }} + ct: + needs: + - build-matrix + - prepare + runs-on: ${{ needs.build-matrix.outputs.runs-on }} + strategy: + fail-fast: false + matrix: + include: ${{ fromJson(needs.build-matrix.outputs.host) }} - container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" - defaults: - run: - shell: bash + container: "ghcr.io/emqx/emqx-builder/${{ matrix.builder }}:${{ matrix.elixir }}-${{ matrix.otp }}-ubuntu20.04" + defaults: + run: + shell: bash - steps: - - uses: AutoModality/action-clean@v1 - - uses: actions/download-artifact@v3 - with: - name: source-${{ matrix.profile }}-${{ matrix.otp }} - path: . - - name: unzip source code - run: unzip -q source.zip + steps: + - uses: AutoModality/action-clean@v1 + - uses: actions/download-artifact@v3 + with: + name: source-${{ matrix.profile }}-${{ matrix.otp }} + path: . + - name: unzip source code + run: unzip -q source.zip - # produces $PROFILE-.coverdata - - name: run common test - working-directory: source - env: - PROFILE: ${{ matrix.profile }} - CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} - run: | - make "${{ matrix.app }}-ct" - - uses: actions/upload-artifact@v3 - with: - name: coverdata - path: source/_build/test/cover - if-no-files-found: warn # do not fail if no coverdata found - - uses: actions/upload-artifact@v3 - if: failure() - with: - name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }} - path: source/_build/test/logs + # produces $PROFILE-.coverdata + - name: run common test + working-directory: source + env: + PROFILE: ${{ matrix.profile }} + CT_COVER_EXPORT_PREFIX: ${{ matrix.profile }}-${{ matrix.otp }} + run: | + make "${{ matrix.app }}-ct" + - uses: actions/upload-artifact@v3 + with: + name: coverdata + path: source/_build/test/cover + if-no-files-found: warn # do not fail if no coverdata found + - uses: actions/upload-artifact@v3 + if: failure() + with: + name: logs-${{ matrix.profile }}-${{ matrix.prefix }}-${{ matrix.otp }} + path: source/_build/test/logs - make_cover: - needs: - - eunit_and_proper - - ct - - ct_docker - runs-on: ubuntu-20.04 - container: "ghcr.io/emqx/emqx-builder/5.0-28:1.13.4-24.3.4.2-2-ubuntu20.04" - steps: + make_cover: + needs: + - eunit_and_proper + - ct + - ct_docker + runs-on: ubuntu-22.04 + container: "ghcr.io/emqx/emqx-builder/5.0-29:1.13.4-24.3.4.2-2-ubuntu22.04" + steps: - uses: AutoModality/action-clean@v1 - uses: actions/download-artifact@v3 with: @@ -258,15 +259,15 @@ jobs: if: failure() run: cat rebar3.crashdump - # do this in a separate job - upload_coverdata: - needs: make_cover - runs-on: ubuntu-20.04 - steps: - - name: Coveralls Finished - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - run: | - curl -v -k https://coveralls.io/webhook \ - --header "Content-Type: application/json" \ - --data "{\"repo_name\":\"$GITHUB_REPOSITORY\",\"repo_token\":\"$GITHUB_TOKEN\",\"payload\":{\"build_num\":$GITHUB_RUN_ID,\"status\":\"done\"}}" || true + # do this in a separate job + upload_coverdata: + needs: make_cover + runs-on: ubuntu-20.04 + steps: + - name: Coveralls Finished + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + curl -v -k https://coveralls.io/webhook \ + --header "Content-Type: application/json" \ + --data "{\"repo_name\":\"$GITHUB_REPOSITORY\",\"repo_token\":\"$GITHUB_TOKEN\",\"payload\":{\"build_num\":$GITHUB_RUN_ID,\"status\":\"done\"}}" || true diff --git a/.github/workflows/shellcheck.yaml b/.github/workflows/shellcheck.yaml index 56a6645e1..558ecf3bf 100644 --- a/.github/workflows/shellcheck.yaml +++ b/.github/workflows/shellcheck.yaml @@ -1,10 +1,11 @@ name: Shellcheck -on: [pull_request, push] +on: + pull_request: jobs: shellcheck: - runs-on: ubuntu-20.04 + runs-on: ubuntu-latest steps: - name: Checkout source code uses: actions/checkout@v3 diff --git a/.github/workflows/stale.yaml b/.github/workflows/stale.yaml index 32abe1721..cf6229b13 100644 --- a/.github/workflows/stale.yaml +++ b/.github/workflows/stale.yaml @@ -11,6 +11,7 @@ on: jobs: stale: runs-on: ubuntu-latest + if: github.repository_owner == 'emqx' permissions: issues: write pull-requests: none diff --git a/apps/emqx/i18n/emqx_schema_i18n.conf b/apps/emqx/i18n/emqx_schema_i18n.conf index a3aebe796..b57698327 100644 --- a/apps/emqx/i18n/emqx_schema_i18n.conf +++ b/apps/emqx/i18n/emqx_schema_i18n.conf @@ -1070,14 +1070,20 @@ Supported configurations are the following: en: """Dispatch strategy for shared subscription. - `random`: dispatch the message to a random selected subscriber - `round_robin`: select the subscribers in a round-robin manner + - `round_robin_per_group`: select the subscribers in round-robin fashion within each shared subscriber group + - `local`: select random local subscriber otherwise select random cluster-wide - `sticky`: always use the last selected subscriber to dispatch, until the subscriber disconnects. - - `hash`: select the subscribers by the hash of `clientIds`""" + - `hash_clientid`: select the subscribers by hashing the `clientIds` + - `hash_topic`: select the subscribers by hashing the source topic""" zh: """共享订阅消息派发策略。 - `random`:随机挑选一个共享订阅者派发; - `round_robin`:使用 round-robin 策略派发; + - `round_robin_per_group`: 在共享组内循环选择下一个成员; + - `local`: 选择随机的本地成员,否则选择随机的集群范围内成员; - `sticky`:总是使用上次选中的订阅者派发,直到它断开连接; - - `hash`:使用发送者的 Client ID 进行 Hash 来选择订阅者。""" + - `hash_clientid`:使用发送者的 Client ID 进行 Hash 来选择订阅者; + - `hash_topic`: 使用源主题进行 Hash 来选择订阅者。""" } } diff --git a/apps/emqx/include/emqx_release.hrl b/apps/emqx/include/emqx_release.hrl index a79389ecb..cdf2eefa7 100644 --- a/apps/emqx/include/emqx_release.hrl +++ b/apps/emqx/include/emqx_release.hrl @@ -35,7 +35,7 @@ -define(EMQX_RELEASE_CE, "5.0.20"). %% Enterprise edition --define(EMQX_RELEASE_EE, "5.0.1"). +-define(EMQX_RELEASE_EE, "5.0.2-alpha.1"). %% the HTTP API version -define(EMQX_API_VERSION, "5.0"). diff --git a/apps/emqx/rebar.config b/apps/emqx/rebar.config index 0ecbbfc1a..2ce97c13d 100644 --- a/apps/emqx/rebar.config +++ b/apps/emqx/rebar.config @@ -27,7 +27,7 @@ {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}}, {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}}, {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}}, - {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.3"}}}, + {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.4"}}}, {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}}, {hocon, {git, "https://github.com/emqx/hocon.git", {tag, "0.36.0"}}}, {pbkdf2, {git, "https://github.com/emqx/erlang-pbkdf2.git", {tag, "2.0.4"}}}, diff --git a/apps/emqx/rebar3 b/apps/emqx/rebar3 deleted file mode 100755 index edb85b3c9..000000000 Binary files a/apps/emqx/rebar3 and /dev/null differ diff --git a/apps/emqx/src/emqx.app.src b/apps/emqx/src/emqx.app.src index e195107ed..d2831b74d 100644 --- a/apps/emqx/src/emqx.app.src +++ b/apps/emqx/src/emqx.app.src @@ -3,7 +3,7 @@ {id, "emqx"}, {description, "EMQX Core"}, % strict semver, bump manually! - {vsn, "5.0.19"}, + {vsn, "5.0.20"}, {modules, []}, {registered, []}, {applications, [ diff --git a/apps/emqx/src/emqx_channel.erl b/apps/emqx/src/emqx_channel.erl index e82adc786..9acad4d57 100644 --- a/apps/emqx/src/emqx_channel.erl +++ b/apps/emqx/src/emqx_channel.erl @@ -224,6 +224,8 @@ set_session(Session, Channel = #channel{conninfo = ConnInfo, clientinfo = Client Channel#channel{session = Session1}. -spec stats(channel()) -> emqx_types:stats(). +stats(#channel{session = undefined}) -> + emqx_pd:get_counters(?CHANNEL_METRICS); stats(#channel{session = Session}) -> lists:append(emqx_session:stats(Session), emqx_pd:get_counters(?CHANNEL_METRICS)). diff --git a/apps/emqx/src/emqx_cm.erl b/apps/emqx/src/emqx_cm.erl index 77bc44eeb..6de05dabe 100644 --- a/apps/emqx/src/emqx_cm.erl +++ b/apps/emqx/src/emqx_cm.erl @@ -19,6 +19,7 @@ -behaviour(gen_server). +-include("emqx.hrl"). -include("logger.hrl"). -include("types.hrl"). -include_lib("snabbkaffe/include/snabbkaffe.hrl"). @@ -67,7 +68,8 @@ %% Test/debug interface -export([ all_channels/0, - all_client_ids/0 + all_client_ids/0, + get_session_confs/2 ]). %% gen_server callbacks @@ -296,9 +298,9 @@ open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) -> register_channel(ClientId, Self, ConnInfo), {ok, #{ - session => Session1, + session => clean_session(Session1), present => true, - pendings => Pendings + pendings => clean_pendings(Pendings) }}; {living, ConnMod, ChanPid, Session} -> ok = emqx_session:resume(ClientInfo, Session), @@ -315,9 +317,9 @@ open_session(false, ClientInfo = #{clientid := ClientId}, ConnInfo) -> ), register_channel(ClientId, Self, ConnInfo), {ok, #{ - session => Session1, + session => clean_session(Session1), present => true, - pendings => Pendings + pendings => clean_pendings(Pendings) }}; {error, _} -> CreateSess() @@ -355,6 +357,7 @@ get_session_confs(#{zone := Zone, clientid := ClientId}, #{ max_inflight => MaxInflight, retry_interval => get_mqtt_conf(Zone, retry_interval), await_rel_timeout => get_mqtt_conf(Zone, await_rel_timeout), + max_awaiting_rel => get_mqtt_conf(Zone, max_awaiting_rel), mqueue => mqueue_confs(Zone), %% TODO: Add conf for allowing/disallowing persistent sessions. %% Note that the connection info is already enriched to have @@ -730,3 +733,14 @@ get_connected_client_count() -> undefined -> 0; Size -> Size end. + +clean_session(Session) -> + emqx_session:filter_queue(fun is_banned_msg/1, Session). + +clean_pendings(Pendings) -> + lists:filter(fun is_banned_msg/1, Pendings). + +is_banned_msg(#message{from = ClientId}) -> + [] =:= emqx_banned:look_up({clientid, ClientId}); +is_banned_msg({deliver, _Topic, Msg}) -> + is_banned_msg(Msg). diff --git a/apps/emqx/src/emqx_mqueue.erl b/apps/emqx/src/emqx_mqueue.erl index 494e2b33e..fbf29d754 100644 --- a/apps/emqx/src/emqx_mqueue.erl +++ b/apps/emqx/src/emqx_mqueue.erl @@ -67,7 +67,8 @@ out/1, stats/1, dropped/1, - to_list/1 + to_list/1, + filter/2 ]). -define(NO_PRIORITY_TABLE, disabled). @@ -158,6 +159,19 @@ max_len(#mqueue{max_len = MaxLen}) -> MaxLen. to_list(MQ) -> to_list(MQ, []). +-spec filter(fun((any()) -> boolean()), mqueue()) -> mqueue(). +filter(_Pred, #mqueue{len = 0} = MQ) -> + MQ; +filter(Pred, #mqueue{q = Q, len = Len, dropped = Droppend} = MQ) -> + Q2 = ?PQUEUE:filter(Pred, Q), + case ?PQUEUE:len(Q2) of + Len -> + MQ; + Len2 -> + Diff = Len - Len2, + MQ#mqueue{q = Q2, len = Len2, dropped = Droppend + Diff} + end. + to_list(MQ, Acc) -> case out(MQ) of {empty, _MQ} -> diff --git a/apps/emqx/src/emqx_schema.erl b/apps/emqx/src/emqx_schema.erl index a673fa898..6f935f1e5 100644 --- a/apps/emqx/src/emqx_schema.erl +++ b/apps/emqx/src/emqx_schema.erl @@ -2608,7 +2608,7 @@ non_empty_string(_) -> {error, invalid_string}. servers_sc(Meta0, ParseOpts) -> %% if this filed has a default value %% then it is not NOT required - %% NOTE: maps:is_key is not the solution beause #{default => undefined} is legit + %% NOTE: maps:is_key is not the solution because #{default => undefined} is legit HasDefault = (maps:get(default, Meta0, undefined) =/= undefined), Required = maps:get(required, Meta0, not HasDefault), Meta = #{ @@ -2661,17 +2661,18 @@ normalize_host_port_str(Str) -> %% NOTE: Validator is called after converter. servers_validator(Opts, Required) -> fun(Str0) -> - Str = str(Str0), - case Str =:= "" orelse Str =:= "undefined" of - true when Required -> - %% it's a required field - %% but value is set to an empty string (from environment override) - %% or when the filed is not set in config file + case str(Str0) of + "" -> + %% Empty string is not allowed even if the field is not required + %% we should remove field from config if it's empty + throw("cannot_be_empty"); + "undefined" when Required -> + %% when the filed is not set in config file %% NOTE: assuming nobody is going to name their server "undefined" throw("cannot_be_empty"); - true -> + "undefined" -> ok; - _ -> + Str -> %% it's valid as long as it can be parsed _ = parse_servers(Str, Opts), ok @@ -2816,20 +2817,17 @@ is_port_number(Port) -> end. parse_port(Port) -> - try - P = list_to_integer(string:strip(Port)), - true = (P > 0), - true = (P =< 65535), - P - catch - _:_ -> - throw("bad_port_number") + case string:to_integer(string:strip(Port)) of + {P, ""} when P < 0 -> throw("port_number_must_be_positive"); + {P, ""} when P > 65535 -> throw("port_number_too_large"); + {P, ""} -> P; + _ -> throw("bad_port_number") end. quic_feature_toggle(Desc) -> sc( %% true, false are for user facing - %% 0, 1 are for internal represtation + %% 0, 1 are for internal representation typerefl:alias("boolean", typerefl:union([true, false, 0, 1])), #{ desc => Desc, diff --git a/apps/emqx/src/emqx_session.erl b/apps/emqx/src/emqx_session.erl index 2e17190e2..a13dfe491 100644 --- a/apps/emqx/src/emqx_session.erl +++ b/apps/emqx/src/emqx_session.erl @@ -82,6 +82,7 @@ deliver/3, enqueue/3, dequeue/2, + filter_queue/2, ignore_local/4, retry/2, terminate/3 @@ -200,7 +201,7 @@ -spec init(options()) -> session(). init(Opts) -> - MaxInflight = maps:get(max_inflight, Opts, 1), + MaxInflight = maps:get(max_inflight, Opts), QueueOpts = maps:merge( #{ max_len => 1000, @@ -211,17 +212,17 @@ init(Opts) -> #session{ id = emqx_guid:gen(), clientid = maps:get(clientid, Opts, <<>>), - is_persistent = maps:get(is_persistent, Opts, false), - max_subscriptions = maps:get(max_subscriptions, Opts, infinity), + is_persistent = maps:get(is_persistent, Opts), + max_subscriptions = maps:get(max_subscriptions, Opts), subscriptions = #{}, - upgrade_qos = maps:get(upgrade_qos, Opts, false), + upgrade_qos = maps:get(upgrade_qos, Opts), inflight = emqx_inflight:new(MaxInflight), mqueue = emqx_mqueue:init(QueueOpts), next_pkt_id = 1, - retry_interval = maps:get(retry_interval, Opts, 30000), + retry_interval = maps:get(retry_interval, Opts), awaiting_rel = #{}, - max_awaiting_rel = maps:get(max_awaiting_rel, Opts, 100), - await_rel_timeout = maps:get(await_rel_timeout, Opts, 300000), + max_awaiting_rel = maps:get(max_awaiting_rel, Opts), + await_rel_timeout = maps:get(await_rel_timeout, Opts), created_at = erlang:system_time(millisecond) }. @@ -529,6 +530,9 @@ dequeue(ClientInfo, Cnt, Msgs, Q) -> end end. +filter_queue(Pred, #session{mqueue = Q} = Session) -> + Session#session{mqueue = emqx_mqueue:filter(Pred, Q)}. + acc_cnt(#message{qos = ?QOS_0}, Cnt) -> Cnt; acc_cnt(_Msg, Cnt) -> Cnt - 1. diff --git a/apps/emqx/src/emqx_sys.erl b/apps/emqx/src/emqx_sys.erl index 81b27d727..a5f14e32a 100644 --- a/apps/emqx/src/emqx_sys.erl +++ b/apps/emqx/src/emqx_sys.erl @@ -211,7 +211,7 @@ handle_info({timeout, TRef, heartbeat}, State = #state{heartbeat = TRef}) -> handle_info({timeout, TRef, tick}, State = #state{ticker = TRef, sysdescr = Descr}) -> publish_any(version, version()), publish_any(sysdescr, Descr), - publish_any(brokers, mria_mnesia:running_nodes()), + publish_any(brokers, mria:running_nodes()), publish_any(stats, emqx_stats:getstats()), publish_any(metrics, emqx_metrics:all()), {noreply, tick(State), hibernate}; diff --git a/apps/emqx/src/persistent_session/emqx_persistent_session.erl b/apps/emqx/src/persistent_session/emqx_persistent_session.erl index c1100cfdb..68f783283 100644 --- a/apps/emqx/src/persistent_session/emqx_persistent_session.erl +++ b/apps/emqx/src/persistent_session/emqx_persistent_session.erl @@ -303,7 +303,7 @@ resume(ClientInfo = #{clientid := ClientID}, ConnInfo, Session) -> %% 3. Notify writers that we are resuming. %% They will buffer new messages. ?tp(ps_notify_writers, #{sid => SessionID}), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), NodeMarkers = resume_begin(Nodes, SessionID), ?tp(ps_node_markers, #{sid => SessionID, markers => NodeMarkers}), diff --git a/apps/emqx/test/emqx_banned_SUITE.erl b/apps/emqx/test/emqx_banned_SUITE.erl index ed22a019a..605c1de6d 100644 --- a/apps/emqx/test/emqx_banned_SUITE.erl +++ b/apps/emqx/test/emqx_banned_SUITE.erl @@ -141,3 +141,73 @@ t_kick(_) -> snabbkaffe:stop(), emqx_banned:delete(Who), ?assertEqual(1, length(?of_kind(kick_session_due_to_banned, Trace))). + +t_session_taken(_) -> + erlang:process_flag(trap_exit, true), + Topic = <<"t/banned">>, + ClientId2 = <<"t_session_taken">>, + MsgNum = 3, + Connect = fun() -> + {ok, C} = emqtt:start_link([ + {clientid, <<"client1">>}, + {proto_ver, v5}, + {clean_start, false}, + {properties, #{'Session-Expiry-Interval' => 120}} + ]), + {ok, _} = emqtt:connect(C), + {ok, _, [0]} = emqtt:subscribe(C, Topic, []), + C + end, + + Publish = fun() -> + lists:foreach( + fun(_) -> + Msg = emqx_message:make(ClientId2, Topic, <<"payload">>), + emqx_broker:safe_publish(Msg) + end, + lists:seq(1, MsgNum) + ) + end, + + C1 = Connect(), + ok = emqtt:disconnect(C1), + + Publish(), + + C2 = Connect(), + ?assertEqual(MsgNum, length(receive_messages(MsgNum + 1))), + ok = emqtt:disconnect(C2), + + Publish(), + + Now = erlang:system_time(second), + Who = {clientid, ClientId2}, + emqx_banned:create(#{ + who => Who, + by => <<"test">>, + reason => <<"test">>, + at => Now, + until => Now + 120 + }), + + C3 = Connect(), + ?assertEqual(0, length(receive_messages(MsgNum + 1))), + emqx_banned:delete(Who), + {ok, #{}, [0]} = emqtt:unsubscribe(C3, Topic), + ok = emqtt:disconnect(C3). + +receive_messages(Count) -> + receive_messages(Count, []). +receive_messages(0, Msgs) -> + Msgs; +receive_messages(Count, Msgs) -> + receive + {publish, Msg} -> + ct:log("Msg: ~p ~n", [Msg]), + receive_messages(Count - 1, [Msg | Msgs]); + Other -> + ct:log("Other Msg: ~p~n", [Other]), + receive_messages(Count, Msgs) + after 1200 -> + Msgs + end. diff --git a/apps/emqx/test/emqx_channel_SUITE.erl b/apps/emqx/test/emqx_channel_SUITE.erl index c6610c0e2..6dd389350 100644 --- a/apps/emqx/test/emqx_channel_SUITE.erl +++ b/apps/emqx/test/emqx_channel_SUITE.erl @@ -1236,11 +1236,17 @@ connpkt(Props) -> session() -> session(#{}). session(InitFields) when is_map(InitFields) -> + Conf = emqx_cm:get_session_confs( + #{zone => default, clientid => <<"fake-test">>}, #{ + receive_maximum => 0, expiry_interval => 0 + } + ), + Session = emqx_session:init(Conf), maps:fold( - fun(Field, Value, Session) -> - emqx_session:set_field(Field, Value, Session) + fun(Field, Value, SessionAcc) -> + emqx_session:set_field(Field, Value, SessionAcc) end, - emqx_session:init(#{max_inflight => 0}), + Session, InitFields ). diff --git a/apps/emqx/test/emqx_connection_SUITE.erl b/apps/emqx/test/emqx_connection_SUITE.erl index 23ddf4008..cc9e03168 100644 --- a/apps/emqx/test/emqx_connection_SUITE.erl +++ b/apps/emqx/test/emqx_connection_SUITE.erl @@ -673,7 +673,10 @@ channel(InitFields) -> peercert => undefined, mountpoint => undefined }, - Session = emqx_session:init(#{max_inflight => 0}), + Conf = emqx_cm:get_session_confs(ClientInfo, #{ + receive_maximum => 0, expiry_interval => 1000 + }), + Session = emqx_session:init(Conf), maps:fold( fun(Field, Value, Channel) -> emqx_channel:set_field(Field, Value, Channel) diff --git a/apps/emqx/test/emqx_schema_tests.erl b/apps/emqx/test/emqx_schema_tests.erl index e1ac1874f..a0d264662 100644 --- a/apps/emqx/test/emqx_schema_tests.erl +++ b/apps/emqx/test/emqx_schema_tests.erl @@ -455,10 +455,11 @@ servers_validator_test() -> NotRequired = emqx_schema:servers_validator(#{}, false), ?assertThrow("cannot_be_empty", Required("")), ?assertThrow("cannot_be_empty", Required(<<>>)), + ?assertThrow("cannot_be_empty", NotRequired("")), + ?assertThrow("cannot_be_empty", NotRequired(<<>>)), ?assertThrow("cannot_be_empty", Required(undefined)), - ?assertEqual(ok, NotRequired("")), - ?assertEqual(ok, NotRequired(<<>>)), ?assertEqual(ok, NotRequired(undefined)), + ?assertEqual(ok, NotRequired("undefined")), ok. converter_invalid_input_test() -> diff --git a/apps/emqx/test/emqx_session_SUITE.erl b/apps/emqx/test/emqx_session_SUITE.erl index ecc9794d1..21d8f0a2a 100644 --- a/apps/emqx/test/emqx_session_SUITE.erl +++ b/apps/emqx/test/emqx_session_SUITE.erl @@ -63,7 +63,12 @@ end_per_testcase(_TestCase, Config) -> %%-------------------------------------------------------------------- t_session_init(_) -> - Session = emqx_session:init(#{max_inflight => 64}), + Conf = emqx_cm:get_session_confs( + #{zone => default, clientid => <<"fake-test">>}, #{ + receive_maximum => 64, expiry_interval => 0 + } + ), + Session = emqx_session:init(Conf), ?assertEqual(#{}, emqx_session:info(subscriptions, Session)), ?assertEqual(0, emqx_session:info(subscriptions_cnt, Session)), ?assertEqual(infinity, emqx_session:info(subscriptions_max, Session)), @@ -459,11 +464,17 @@ mqueue(Opts) -> session() -> session(#{}). session(InitFields) when is_map(InitFields) -> + Conf = emqx_cm:get_session_confs( + #{zone => default, clientid => <<"fake-test">>}, #{ + receive_maximum => 0, expiry_interval => 0 + } + ), + Session = emqx_session:init(Conf), maps:fold( - fun(Field, Value, Session) -> - emqx_session:set_field(Field, Value, Session) + fun(Field, Value, SessionAcc) -> + emqx_session:set_field(Field, Value, SessionAcc) end, - emqx_session:init(#{max_inflight => 0}), + Session, InitFields ). diff --git a/apps/emqx/test/emqx_ws_connection_SUITE.erl b/apps/emqx/test/emqx_ws_connection_SUITE.erl index 787491c4b..de8b1c9af 100644 --- a/apps/emqx/test/emqx_ws_connection_SUITE.erl +++ b/apps/emqx/test/emqx_ws_connection_SUITE.erl @@ -612,7 +612,10 @@ channel(InitFields) -> peercert => undefined, mountpoint => undefined }, - Session = emqx_session:init(#{max_inflight => 0}), + Conf = emqx_cm:get_session_confs(ClientInfo, #{ + receive_maximum => 0, expiry_interval => 0 + }), + Session = emqx_session:init(Conf), maps:fold( fun(Field, Value, Channel) -> emqx_channel:set_field(Field, Value, Channel) diff --git a/apps/emqx/test/props/prop_emqx_sys.erl b/apps/emqx/test/props/prop_emqx_sys.erl index 5e6c56341..505d729a7 100644 --- a/apps/emqx/test/props/prop_emqx_sys.erl +++ b/apps/emqx/test/props/prop_emqx_sys.erl @@ -30,7 +30,7 @@ emqx_metrics, emqx_stats, emqx_broker, - mria_mnesia, + mria, emqx_hooks, emqx_config_handler ]). @@ -109,8 +109,8 @@ do_mock(emqx_broker) -> ); do_mock(emqx_stats) -> meck:expect(emqx_stats, getstats, fun() -> [0] end); -do_mock(mria_mnesia) -> - meck:expect(mria_mnesia, running_nodes, fun() -> [node()] end); +do_mock(mria) -> + meck:expect(mria, running_nodes, fun() -> [node()] end); do_mock(emqx_metrics) -> meck:expect(emqx_metrics, all, fun() -> [{hello, 3}] end); do_mock(emqx_hooks) -> diff --git a/apps/emqx_authn/src/emqx_authn.app.src b/apps/emqx_authn/src/emqx_authn.app.src index 7fbdf787a..6a3ffbdb4 100644 --- a/apps/emqx_authn/src/emqx_authn.app.src +++ b/apps/emqx_authn/src/emqx_authn.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authn, [ {description, "EMQX Authentication"}, - {vsn, "0.1.14"}, + {vsn, "0.1.15"}, {modules, []}, {registered, [emqx_authn_sup, emqx_authn_registry]}, {applications, [kernel, stdlib, emqx_resource, emqx_connector, ehttpc, epgsql, mysql, jose]}, diff --git a/apps/emqx_authn/src/emqx_authn_api.erl b/apps/emqx_authn/src/emqx_authn_api.erl index 7e1c613d3..0a7f67f5a 100644 --- a/apps/emqx_authn/src/emqx_authn_api.erl +++ b/apps/emqx_authn/src/emqx_authn_api.erl @@ -881,7 +881,7 @@ lookup_from_local_node(ChainName, AuthenticatorID) -> end. lookup_from_all_nodes(ChainName, AuthenticatorID) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), LookupResult = emqx_authn_proto_v1:lookup_from_all_nodes(Nodes, ChainName, AuthenticatorID), case is_ok(LookupResult) of {ok, ResList} -> diff --git a/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl b/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl index 6ce59d4f9..84f2c9525 100644 --- a/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl +++ b/apps/emqx_authn/src/enhanced_authn/emqx_enhanced_authn_scram_mnesia.erl @@ -168,7 +168,7 @@ authenticate( }, State ) -> - case ensure_auth_method(AuthMethod, State) of + case ensure_auth_method(AuthMethod, AuthData, State) of true -> case AuthCache of #{next_step := client_final} -> @@ -304,11 +304,13 @@ run_fuzzy_filter( %% Internal functions %%------------------------------------------------------------------------------ -ensure_auth_method(<<"SCRAM-SHA-256">>, #{algorithm := sha256}) -> +ensure_auth_method(_AuthMethod, undefined, _State) -> + false; +ensure_auth_method(<<"SCRAM-SHA-256">>, _AuthData, #{algorithm := sha256}) -> true; -ensure_auth_method(<<"SCRAM-SHA-512">>, #{algorithm := sha512}) -> +ensure_auth_method(<<"SCRAM-SHA-512">>, _AuthData, #{algorithm := sha512}) -> true; -ensure_auth_method(_, _) -> +ensure_auth_method(_AuthMethod, _AuthData, _State) -> false. check_client_first_message(Bin, _Cache, #{iteration_count := IterationCount} = State) -> diff --git a/apps/emqx_authn/test/emqx_enhanced_authn_scram_mnesia_SUITE.erl b/apps/emqx_authn/test/emqx_enhanced_authn_scram_mnesia_SUITE.erl index b143903b5..f52e895cc 100644 --- a/apps/emqx_authn/test/emqx_enhanced_authn_scram_mnesia_SUITE.erl +++ b/apps/emqx_authn/test/emqx_enhanced_authn_scram_mnesia_SUITE.erl @@ -20,6 +20,7 @@ -compile(nowarn_export_all). -include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). -include_lib("emqx/include/emqx_mqtt.hrl"). -include("emqx_authn.hrl"). @@ -37,9 +38,11 @@ all() -> init_per_suite(Config) -> _ = application:load(emqx_conf), ok = emqx_common_test_helpers:start_apps([emqx_authn]), - Config. + IdleTimeout = emqx_config:get([mqtt, idle_timeout]), + [{idle_timeout, IdleTimeout} | Config]. -end_per_suite(_Config) -> +end_per_suite(Config) -> + ok = emqx_config:put([mqtt, idle_timeout], ?config(idle_timeout, Config)), ok = emqx_common_test_helpers:stop_apps([emqx_authn]). init_per_testcase(_Case, Config) -> @@ -99,6 +102,8 @@ t_authenticate(_Config) -> init_auth(Username, Password, Algorithm), + ok = emqx_config:put([mqtt, idle_timeout], 500), + {ok, Pid} = emqx_authn_mqtt_test_client:start_link("127.0.0.1", 1883), ClientFirstMessage = esasl_scram:client_first_message(Username), @@ -115,6 +120,9 @@ t_authenticate(_Config) -> ok = emqx_authn_mqtt_test_client:send(Pid, ConnectPacket), + %% Intentional sleep to trigger idle timeout for the connection not yet authenticated + ok = ct:sleep(1000), + ?AUTH_PACKET( ?RC_CONTINUE_AUTHENTICATION, #{'Authentication-Data' := ServerFirstMessage} @@ -150,6 +158,28 @@ t_authenticate(_Config) -> ServerFinalMessage, ClientCache#{algorithm => Algorithm} ). +t_authenticate_bad_props(_Config) -> + Algorithm = sha512, + Username = <<"u">>, + Password = <<"p">>, + + init_auth(Username, Password, Algorithm), + + {ok, Pid} = emqx_authn_mqtt_test_client:start_link("127.0.0.1", 1883), + + ConnectPacket = ?CONNECT_PACKET( + #mqtt_packet_connect{ + proto_ver = ?MQTT_PROTO_V5, + properties = #{ + 'Authentication-Method' => <<"SCRAM-SHA-512">> + } + } + ), + + ok = emqx_authn_mqtt_test_client:send(Pid, ConnectPacket), + + ?CONNACK_PACKET(?RC_NOT_AUTHORIZED) = receive_packet(). + t_authenticate_bad_username(_Config) -> Algorithm = sha512, Username = <<"u">>, diff --git a/apps/emqx_authz/src/emqx_authz.app.src b/apps/emqx_authz/src/emqx_authz.app.src index f016db09a..943978519 100644 --- a/apps/emqx_authz/src/emqx_authz.app.src +++ b/apps/emqx_authz/src/emqx_authz.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_authz, [ {description, "An OTP application"}, - {vsn, "0.1.14"}, + {vsn, "0.1.15"}, {registered, []}, {mod, {emqx_authz_app, []}}, {applications, [ diff --git a/apps/emqx_authz/src/emqx_authz_api_sources.erl b/apps/emqx_authz/src/emqx_authz_api_sources.erl index c692154b1..58fa471fc 100644 --- a/apps/emqx_authz/src/emqx_authz_api_sources.erl +++ b/apps/emqx_authz/src/emqx_authz_api_sources.erl @@ -47,7 +47,7 @@ -export([ sources/2, source/2, - move_source/2, + source_move/2, aggregate_metrics/1 ]). @@ -164,7 +164,7 @@ schema("/authorization/sources/:type/status") -> }; schema("/authorization/sources/:type/move") -> #{ - 'operationId' => move_source, + 'operationId' => source_move, post => #{ description => ?DESC(authorization_sources_type_move_post), @@ -230,8 +230,6 @@ sources(get, _) -> get_raw_sources() ), {200, #{sources => Sources}}; -sources(post, #{body := #{<<"type">> := <<"file">>} = Body}) -> - create_authz_file(Body); sources(post, #{body := Body}) -> update_config(?CMD_PREPEND, Body). @@ -240,75 +238,99 @@ source(Method, #{bindings := #{type := Type} = Bindings} = Req) when -> source(Method, Req#{bindings => Bindings#{type => atom_to_binary(Type, utf8)}}); source(get, #{bindings := #{type := Type}}) -> - case get_raw_source(Type) of - [] -> - {404, #{code => <<"NOT_FOUND">>, message => <<"Not found: ", Type/binary>>}}; - [#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}] -> - case file:read_file(Path) of - {ok, Rules} -> - {200, #{ - type => file, - enable => Enable, - rules => Rules - }}; - {error, Reason} -> - {500, #{ - code => <<"INTERNAL_ERROR">>, - message => bin(Reason) - }} - end; - [Source] -> - {200, Source} - end; -source(put, #{bindings := #{type := <<"file">>}, body := #{<<"type">> := <<"file">>} = Body}) -> - update_authz_file(Body); -source(put, #{bindings := #{type := Type}, body := Body}) -> - update_config({?CMD_REPLACE, Type}, Body); + with_source( + Type, + fun + (#{<<"type">> := <<"file">>, <<"enable">> := Enable, <<"path">> := Path}) -> + case file:read_file(Path) of + {ok, Rules} -> + {200, #{ + type => file, + enable => Enable, + rules => Rules + }}; + {error, Reason} -> + {500, #{ + code => <<"INTERNAL_ERROR">>, + message => bin(Reason) + }} + end; + (Source) -> + {200, Source} + end + ); +source(put, #{bindings := #{type := Type}, body := #{<<"type">> := Type} = Body}) -> + with_source( + Type, + fun(_) -> + update_config({?CMD_REPLACE, Type}, Body) + end + ); +source(put, #{bindings := #{type := Type}, body := #{<<"type">> := _OtherType}}) -> + with_source( + Type, + fun(_) -> + {400, #{code => <<"BAD_REQUEST">>, message => <<"Type mismatch">>}} + end + ); source(delete, #{bindings := #{type := Type}}) -> - update_config({?CMD_DELETE, Type}, #{}). + with_source( + Type, + fun(_) -> + update_config({?CMD_DELETE, Type}, #{}) + end + ). source_status(get, #{bindings := #{type := Type}}) -> - lookup_from_all_nodes(Type). + with_source( + atom_to_binary(Type, utf8), + fun(_) -> lookup_from_all_nodes(Type) end + ). -move_source(Method, #{bindings := #{type := Type} = Bindings} = Req) when +source_move(Method, #{bindings := #{type := Type} = Bindings} = Req) when is_atom(Type) -> - move_source(Method, Req#{bindings => Bindings#{type => atom_to_binary(Type, utf8)}}); -move_source(post, #{bindings := #{type := Type}, body := #{<<"position">> := Position}}) -> - case parse_position(Position) of - {ok, NPosition} -> - try emqx_authz:move(Type, NPosition) of - {ok, _} -> - {204}; - {error, {not_found_source, _Type}} -> - {404, #{ - code => <<"NOT_FOUND">>, - message => <<"source ", Type/binary, " not found">> - }}; - {error, {emqx_conf_schema, _}} -> - {400, #{ - code => <<"BAD_REQUEST">>, - message => <<"BAD_SCHEMA">> - }}; + source_move(Method, Req#{bindings => Bindings#{type => atom_to_binary(Type, utf8)}}); +source_move(post, #{bindings := #{type := Type}, body := #{<<"position">> := Position}}) -> + with_source( + Type, + fun(_Source) -> + case parse_position(Position) of + {ok, NPosition} -> + try emqx_authz:move(Type, NPosition) of + {ok, _} -> + {204}; + {error, {not_found_source, _Type}} -> + {404, #{ + code => <<"NOT_FOUND">>, + message => <<"source ", Type/binary, " not found">> + }}; + {error, {emqx_conf_schema, _}} -> + {400, #{ + code => <<"BAD_REQUEST">>, + message => <<"BAD_SCHEMA">> + }}; + {error, Reason} -> + {400, #{ + code => <<"BAD_REQUEST">>, + message => bin(Reason) + }} + catch + error:{unknown_authz_source_type, Unknown} -> + NUnknown = bin(Unknown), + {400, #{ + code => <<"BAD_REQUEST">>, + message => <<"Unknown authz Source Type: ", NUnknown/binary>> + }} + end; {error, Reason} -> {400, #{ code => <<"BAD_REQUEST">>, message => bin(Reason) }} - catch - error:{unknown_authz_source_type, Unknown} -> - NUnknown = bin(Unknown), - {400, #{ - code => <<"BAD_REQUEST">>, - message => <<"Unknown authz Source Type: ", NUnknown/binary>> - }} - end; - {error, Reason} -> - {400, #{ - code => <<"BAD_REQUEST">>, - message => bin(Reason) - }} - end. + end + end + ). %%-------------------------------------------------------------------- %% Internal functions @@ -334,7 +356,7 @@ lookup_from_local_node(Type) -> end. lookup_from_all_nodes(Type) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case is_ok(emqx_authz_proto_v1:lookup_from_all_nodes(Nodes, Type)) of {ok, ResList} -> {StatusMap, MetricsMap, ResourceMetricsMap, ErrorMap} = make_result_map(ResList), @@ -484,6 +506,15 @@ get_raw_source(Type) -> get_raw_sources() ). +-spec with_source(binary(), fun((map()) -> term())) -> term(). +with_source(Type, ContF) -> + case get_raw_source(Type) of + [] -> + {404, #{code => <<"NOT_FOUND">>, message => <<"Not found: ", Type/binary>>}}; + [Source] -> + ContF(Source) + end. + update_config(Cmd, Sources) -> case emqx_authz:update(Cmd, Sources) of {ok, _} -> @@ -628,13 +659,3 @@ status_metrics_example() -> } } }. - -create_authz_file(Body) -> - do_update_authz_file(?CMD_PREPEND, Body). - -update_authz_file(Body) -> - do_update_authz_file({?CMD_REPLACE, <<"file">>}, Body). - -do_update_authz_file(Cmd, Body) -> - %% API update will placed in `authz` subdirectory inside EMQX's `data_dir` - update_config(Cmd, Body). diff --git a/apps/emqx_authz/src/emqx_authz_schema.erl b/apps/emqx_authz/src/emqx_authz_schema.erl index e68ab3a50..b15d4abd4 100644 --- a/apps/emqx_authz/src/emqx_authz_schema.erl +++ b/apps/emqx_authz/src/emqx_authz_schema.erl @@ -240,7 +240,7 @@ http_common_fields() -> mongo_common_fields() -> [ {collection, - ?HOCON(atom(), #{ + ?HOCON(binary(), #{ required => true, desc => ?DESC(collection) })}, diff --git a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl index 76b025716..411399d64 100644 --- a/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl +++ b/apps/emqx_authz/test/emqx_authz_api_sources_SUITE.erl @@ -21,7 +21,6 @@ -import(emqx_mgmt_api_test_util, [request/3, uri/1]). -include_lib("eunit/include/eunit.hrl"). --include_lib("common_test/include/ct.hrl"). -include_lib("emqx/include/emqx_placeholder.hrl"). -define(MONGO_SINGLE_HOST, "mongo"). @@ -183,7 +182,7 @@ t_api(_) -> {ok, 404, ErrResult} = request(get, uri(["authorization", "sources", "http"]), []), ?assertMatch( #{<<"code">> := <<"NOT_FOUND">>, <<"message">> := <<"Not found: http">>}, - jsx:decode(ErrResult) + emqx_json:decode(ErrResult, [return_maps]) ), [ @@ -215,7 +214,9 @@ t_api(_) -> ?SOURCE1#{<<"enable">> := false} ), {ok, 200, Result3} = request(get, uri(["authorization", "sources", "http"]), []), - ?assertMatch(#{<<"type">> := <<"http">>, <<"enable">> := false}, jsx:decode(Result3)), + ?assertMatch( + #{<<"type">> := <<"http">>, <<"enable">> := false}, emqx_json:decode(Result3, [return_maps]) + ), Keyfile = emqx_common_test_helpers:app_path( emqx, @@ -252,7 +253,7 @@ t_api(_) -> <<"total">> := 0, <<"nomatch">> := 0 } - } = jiffy:decode(Status4, [return_maps]), + } = emqx_json:decode(Status4, [return_maps]), ?assertMatch( #{ <<"type">> := <<"mongodb">>, @@ -264,7 +265,7 @@ t_api(_) -> <<"verify">> := <<"verify_none">> } }, - jsx:decode(Result4) + emqx_json:decode(Result4, [return_maps]) ), {ok, Cacert} = file:read_file(Cacertfile), @@ -296,7 +297,7 @@ t_api(_) -> <<"verify">> := <<"verify_none">> } }, - jsx:decode(Result5) + emqx_json:decode(Result5, [return_maps]) ), {ok, 200, Status5_1} = request(get, uri(["authorization", "sources", "mongodb", "status"]), []), @@ -307,7 +308,7 @@ t_api(_) -> <<"total">> := 0, <<"nomatch">> := 0 } - } = jiffy:decode(Status5_1, [return_maps]), + } = emqx_json:decode(Status5_1, [return_maps]), #{ ssl := #{ @@ -332,6 +333,7 @@ t_api(_) -> uri(["authorization", "sources", "postgresql"]), ?SOURCE4#{<<"server">> := <<"fake">>} ), + {ok, 204, _} = request( put, uri(["authorization", "sources", "redis"]), @@ -343,6 +345,19 @@ t_api(_) -> } ), + {ok, 400, TypeMismatch} = request( + put, + uri(["authorization", "sources", "file"]), + #{<<"type">> => <<"built_in_database">>, <<"enable">> => false} + ), + ?assertMatch( + #{ + <<"code">> := <<"BAD_REQUEST">>, + <<"message">> := <<"Type mismatch", _/binary>> + }, + emqx_json:decode(TypeMismatch, [return_maps]) + ), + lists:foreach( fun(#{<<"type">> := Type}) -> {ok, 204, _} = request( @@ -357,6 +372,43 @@ t_api(_) -> ?assertEqual([], get_sources(Result6)), ?assertEqual([], emqx:get_config([authorization, sources])), + lists:foreach( + fun(#{<<"type">> := Type}) -> + {ok, 404, _} = request( + get, + uri(["authorization", "sources", binary_to_list(Type), "status"]), + [] + ), + {ok, 404, _} = request( + post, + uri(["authorization", "sources", binary_to_list(Type), "move"]), + #{<<"position">> => <<"front">>} + ), + {ok, 404, _} = request( + get, + uri(["authorization", "sources", binary_to_list(Type)]), + [] + ), + {ok, 404, _} = request( + delete, + uri(["authorization", "sources", binary_to_list(Type)]), + [] + ) + end, + Sources + ), + + {ok, 404, _TypeMismatch2} = request( + put, + uri(["authorization", "sources", "file"]), + #{<<"type">> => <<"built_in_database">>, <<"enable">> => false} + ), + {ok, 404, _} = request( + put, + uri(["authorization", "sources", "built_in_database"]), + #{<<"type">> => <<"built_in_database">>, <<"enable">> => false} + ), + {ok, 204, _} = request(post, uri(["authorization", "sources"]), ?SOURCE6), {ok, Client} = emqtt:start_link( @@ -368,7 +420,6 @@ t_api(_) -> ] ), emqtt:connect(Client), - timer:sleep(50), emqtt:publish( Client, @@ -378,17 +429,24 @@ t_api(_) -> [{qos, 1}] ), - {ok, 200, Status5} = request(get, uri(["authorization", "sources", "file", "status"]), []), - #{ - <<"metrics">> := #{ - <<"allow">> := 1, - <<"deny">> := 0, - <<"total">> := 1, - <<"nomatch">> := 0 - } - } = jiffy:decode(Status5, [return_maps]), + snabbkaffe:retry( + 10, + 3, + fun() -> + {ok, 200, Status5} = request( + get, uri(["authorization", "sources", "file", "status"]), [] + ), + #{ + <<"metrics">> := #{ + <<"allow">> := 1, + <<"deny">> := 0, + <<"total">> := 1, + <<"nomatch">> := 0 + } + } = emqx_json:decode(Status5, [return_maps]) + end + ), - timer:sleep(50), emqtt:publish( Client, <<"t2">>, @@ -397,17 +455,24 @@ t_api(_) -> [{qos, 1}] ), - {ok, 200, Status6} = request(get, uri(["authorization", "sources", "file", "status"]), []), - #{ - <<"metrics">> := #{ - <<"allow">> := 2, - <<"deny">> := 0, - <<"total">> := 2, - <<"nomatch">> := 0 - } - } = jiffy:decode(Status6, [return_maps]), + snabbkaffe:retry( + 10, + 3, + fun() -> + {ok, 200, Status6} = request( + get, uri(["authorization", "sources", "file", "status"]), [] + ), + #{ + <<"metrics">> := #{ + <<"allow">> := 2, + <<"deny">> := 0, + <<"total">> := 2, + <<"nomatch">> := 0 + } + } = emqx_json:decode(Status6, [return_maps]) + end + ), - timer:sleep(50), emqtt:publish( Client, <<"t3">>, @@ -416,20 +481,26 @@ t_api(_) -> [{qos, 1}] ), - timer:sleep(50), - {ok, 200, Status7} = request(get, uri(["authorization", "sources", "file", "status"]), []), - #{ - <<"metrics">> := #{ - <<"allow">> := 3, - <<"deny">> := 0, - <<"total">> := 3, - <<"nomatch">> := 0 - } - } = jiffy:decode(Status7, [return_maps]), - + snabbkaffe:retry( + 10, + 3, + fun() -> + {ok, 200, Status7} = request( + get, uri(["authorization", "sources", "file", "status"]), [] + ), + #{ + <<"metrics">> := #{ + <<"allow">> := 3, + <<"deny">> := 0, + <<"total">> := 3, + <<"nomatch">> := 0 + } + } = emqx_json:decode(Status7, [return_maps]) + end + ), ok. -t_move_source(_) -> +t_source_move(_) -> {ok, _} = emqx_authz:update(replace, [?SOURCE1, ?SOURCE2, ?SOURCE3, ?SOURCE4, ?SOURCE5]), ?assertMatch( [ @@ -550,7 +621,7 @@ t_aggregate_metrics(_) -> ). get_sources(Result) -> - maps:get(<<"sources">>, jsx:decode(Result), []). + maps:get(<<"sources">>, emqx_json:decode(Result, [return_maps])). data_dir() -> emqx:data_dir(). diff --git a/apps/emqx_bridge/src/emqx_bridge.app.src b/apps/emqx_bridge/src/emqx_bridge.app.src index 0ec246320..37ec1266a 100644 --- a/apps/emqx_bridge/src/emqx_bridge.app.src +++ b/apps/emqx_bridge/src/emqx_bridge.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_bridge, [ {description, "EMQX bridges"}, - {vsn, "0.1.12"}, + {vsn, "0.1.13"}, {registered, []}, {mod, {emqx_bridge_app, []}}, {applications, [ diff --git a/apps/emqx_bridge/src/emqx_bridge.erl b/apps/emqx_bridge/src/emqx_bridge.erl index dc0a96690..ddf24d380 100644 --- a/apps/emqx_bridge/src/emqx_bridge.erl +++ b/apps/emqx_bridge/src/emqx_bridge.erl @@ -57,7 +57,13 @@ T == influxdb_api_v2; T == redis_single; T == redis_sentinel; - T == redis_cluster + T == redis_cluster; + T == clickhouse; + T == pgsql; + T == timescale; + T == matrix; + T == tdengine; + T == dynamo ). load() -> diff --git a/apps/emqx_bridge/src/emqx_bridge_api.erl b/apps/emqx_bridge/src/emqx_bridge_api.erl index 3c7c30660..30671b2bb 100644 --- a/apps/emqx_bridge/src/emqx_bridge_api.erl +++ b/apps/emqx_bridge/src/emqx_bridge_api.erl @@ -46,17 +46,28 @@ -export([lookup_from_local_node/2]). +-define(BAD_REQUEST(Reason), {400, error_msg('BAD_REQUEST', Reason)}). + +-define(BRIDGE_NOT_ENABLED, + ?BAD_REQUEST(<<"Forbidden operation, bridge not enabled">>) +). + +-define(NOT_FOUND(Reason), {404, error_msg('NOT_FOUND', Reason)}). + +-define(BRIDGE_NOT_FOUND(BridgeType, BridgeName), + ?NOT_FOUND( + <<"Bridge lookup failed: bridge named '", BridgeName/binary, "' of type ", + (atom_to_binary(BridgeType))/binary, " does not exist.">> + ) +). + -define(TRY_PARSE_ID(ID, EXPR), try emqx_bridge_resource:parse_bridge_id(Id) of {BridgeType, BridgeName} -> EXPR catch throw:{invalid_bridge_id, Reason} -> - {400, - error_msg( - 'INVALID_ID', - <<"Invalid bride ID, ", Reason/binary>> - )} + ?NOT_FOUND(<<"Invalid bridge ID, ", Reason/binary>>) end ). @@ -93,11 +104,11 @@ get_response_body_schema() -> param_path_operation_cluster() -> {operation, mk( - enum([stop, restart]), + enum([start, stop, restart]), #{ in => path, required => true, - example => <<"restart">>, + example => <<"start">>, desc => ?DESC("desc_param_path_operation_cluster") } )}. @@ -105,11 +116,11 @@ param_path_operation_cluster() -> param_path_operation_on_node() -> {operation, mk( - enum([stop, restart]), + enum([start, stop, restart]), #{ in => path, required => true, - example => <<"stop">>, + example => <<"start">>, desc => ?DESC("desc_param_path_operation_on_node") } )}. @@ -338,7 +349,7 @@ schema("/bridges/:id") -> responses => #{ 200 => get_response_body_schema(), 404 => error_schema('NOT_FOUND', "Bridge not found"), - 400 => error_schema(['BAD_REQUEST', 'INVALID_ID'], "Update bridge failed") + 400 => error_schema('BAD_REQUEST', "Update bridge failed") } }, delete => #{ @@ -348,9 +359,11 @@ schema("/bridges/:id") -> parameters => [param_path_id()], responses => #{ 204 => <<"Bridge deleted">>, - 400 => error_schema(['INVALID_ID'], "Update bridge failed"), + 400 => error_schema( + 'BAD_REQUEST', + "Cannot delete bridge while active rules are defined for this bridge" + ), 404 => error_schema('NOT_FOUND', "Bridge not found"), - 403 => error_schema('FORBIDDEN_REQUEST', "Forbidden operation"), 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable") } } @@ -379,7 +392,7 @@ schema("/bridges/:id/metrics/reset") -> parameters => [param_path_id()], responses => #{ 204 => <<"Reset success">>, - 400 => error_schema(['BAD_REQUEST'], "RPC Call Failed") + 404 => error_schema('NOT_FOUND', "Bridge not found") } } }; @@ -395,7 +408,7 @@ schema("/bridges/:id/enable/:enable") -> responses => #{ 204 => <<"Success">>, - 400 => error_schema('INVALID_ID', "Bad bridge ID"), + 404 => error_schema('NOT_FOUND', "Bridge not found or invalid operation"), 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable") } } @@ -413,7 +426,10 @@ schema("/bridges/:id/:operation") -> ], responses => #{ 204 => <<"Operation success">>, - 400 => error_schema('INVALID_ID', "Bad bridge ID"), + 400 => error_schema( + 'BAD_REQUEST', "Problem with configuration of external service" + ), + 404 => error_schema('NOT_FOUND', "Bridge not found or invalid operation"), 501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"), 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable") } @@ -433,8 +449,11 @@ schema("/nodes/:node/bridges/:id/:operation") -> ], responses => #{ 204 => <<"Operation success">>, - 400 => error_schema('INVALID_ID', "Bad bridge ID"), - 403 => error_schema('FORBIDDEN_REQUEST', "forbidden operation"), + 400 => error_schema( + 'BAD_REQUEST', + "Problem with configuration of external service or bridge not enabled" + ), + 404 => error_schema('NOT_FOUND', "Bridge or node not found or invalid operation"), 501 => error_schema('NOT_IMPLEMENTED', "Not Implemented"), 503 => error_schema('SERVICE_UNAVAILABLE', "Service unavailable") } @@ -459,21 +478,19 @@ schema("/bridges_probe") -> }. '/bridges'(post, #{body := #{<<"type">> := BridgeType, <<"name">> := BridgeName} = Conf0}) -> - Conf = filter_out_request_body(Conf0), case emqx_bridge:lookup(BridgeType, BridgeName) of {ok, _} -> {400, error_msg('ALREADY_EXISTS', <<"bridge already exists">>)}; {error, not_found} -> - case ensure_bridge_created(BridgeType, BridgeName, Conf) of - ok -> lookup_from_all_nodes(BridgeType, BridgeName, 201); - {error, Error} -> {400, Error} - end + Conf = filter_out_request_body(Conf0), + {ok, _} = emqx_bridge:create(BridgeType, BridgeName, Conf), + lookup_from_all_nodes(BridgeType, BridgeName, 201) end; '/bridges'(get, _Params) -> {200, zip_bridges([ [format_resp(Data, Node) || Data <- emqx_bridge_proto_v1:list_bridges(Node)] - || Node <- mria_mnesia:running_nodes() + || Node <- mria:running_nodes() ])}. '/bridges/:id'(get, #{bindings := #{id := Id}}) -> @@ -486,43 +503,38 @@ schema("/bridges_probe") -> {ok, _} -> RawConf = emqx:get_raw_config([bridges, BridgeType, BridgeName], #{}), Conf = deobfuscate(Conf1, RawConf), - case ensure_bridge_created(BridgeType, BridgeName, Conf) of - ok -> - lookup_from_all_nodes(BridgeType, BridgeName, 200); - {error, Error} -> - {400, Error} - end; + {ok, _} = emqx_bridge:create(BridgeType, BridgeName, Conf), + lookup_from_all_nodes(BridgeType, BridgeName, 200); {error, not_found} -> - {404, error_msg('NOT_FOUND', <<"bridge not found">>)} + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName) end ); '/bridges/:id'(delete, #{bindings := #{id := Id}, query_string := Qs}) -> - AlsoDeleteActs = - case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of - <<"true">> -> true; - true -> true; - _ -> false - end, ?TRY_PARSE_ID( Id, case emqx_bridge:lookup(BridgeType, BridgeName) of {ok, _} -> + AlsoDeleteActs = + case maps:get(<<"also_delete_dep_actions">>, Qs, <<"false">>) of + <<"true">> -> true; + true -> true; + _ -> false + end, case emqx_bridge:check_deps_and_remove(BridgeType, BridgeName, AlsoDeleteActs) of {ok, _} -> 204; {error, {rules_deps_on_this_bridge, RuleIds}} -> - {403, - error_msg( - 'FORBIDDEN_REQUEST', - {<<"There're some rules dependent on this bridge">>, RuleIds} - )}; + ?BAD_REQUEST( + {<<"Cannot delete bridge while active rules are defined for this bridge">>, + RuleIds} + ); {error, timeout} -> {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; {error, Reason} -> {500, error_msg('INTERNAL_ERROR', Reason)} end; {error, not_found} -> - {404, error_msg('NOT_FOUND', <<"Bridge not found">>)} + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName) end ). @@ -532,13 +544,11 @@ schema("/bridges_probe") -> '/bridges/:id/metrics/reset'(put, #{bindings := #{id := Id}}) -> ?TRY_PARSE_ID( Id, - case - emqx_bridge_resource:reset_metrics( + begin + ok = emqx_bridge_resource:reset_metrics( emqx_bridge_resource:resource_id(BridgeType, BridgeName) - ) - of - ok -> {204}; - Reason -> {400, error_msg('BAD_REQUEST', Reason)} + ), + {204} end ). @@ -549,9 +559,9 @@ schema("/bridges_probe") -> Params1 = maybe_deobfuscate_bridge_probe(Params), case emqx_bridge_resource:create_dry_run(ConnType, maps:remove(<<"type">>, Params1)) of ok -> - {204}; - {error, Error} -> - {400, error_msg('TEST_FAILED', Error)} + 204; + {error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> + {400, error_msg('TEST_FAILED', to_hr_reason(Reason))} end; BadRequest -> BadRequest @@ -578,14 +588,14 @@ lookup_from_all_nodes_metrics(BridgeType, BridgeName, SuccCode) -> do_lookup_from_all_nodes(BridgeType, BridgeName, SuccCode, FormatFun). do_lookup_from_all_nodes(BridgeType, BridgeName, SuccCode, FormatFun) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case is_ok(emqx_bridge_proto_v1:lookup_from_all_nodes(Nodes, BridgeType, BridgeName)) of {ok, [{ok, _} | _] = Results} -> {SuccCode, FormatFun([R || {ok, R} <- Results])}; {ok, [{error, not_found} | _]} -> - {404, error_msg('NOT_FOUND', <<"not_found">>)}; - {error, ErrL} -> - {500, error_msg('INTERNAL_ERROR', ErrL)} + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName); + {error, Reason} -> + {500, error_msg('INTERNAL_ERROR', Reason)} end. lookup_from_local_node(BridgeType, BridgeName) -> @@ -599,13 +609,13 @@ lookup_from_local_node(BridgeType, BridgeName) -> Id, case enable_func(Enable) of invalid -> - {400, error_msg('BAD_REQUEST', <<"invalid operation">>)}; + ?NOT_FOUND(<<"Invalid operation">>); OperFunc -> case emqx_bridge:disable_enable(OperFunc, BridgeType, BridgeName) of {ok, _} -> - {204}; + 204; {error, {pre_config_update, _, bridge_not_found}} -> - {404, error_msg('NOT_FOUND', <<"bridge not found">>)}; + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName); {error, {_, _, timeout}} -> {503, error_msg('SERVICE_UNAVAILABLE', <<"request timeout">>)}; {error, timeout} -> @@ -624,10 +634,18 @@ lookup_from_local_node(BridgeType, BridgeName) -> Id, case operation_to_all_func(Op) of invalid -> - {400, error_msg('BAD_REQUEST', <<"invalid operation">>)}; + ?NOT_FOUND(<<"Invalid operation: ", Op/binary>>); OperFunc -> - Nodes = mria_mnesia:running_nodes(), - call_operation(all, OperFunc, [Nodes, BridgeType, BridgeName]) + try is_enabled_bridge(BridgeType, BridgeName) of + false -> + ?BRIDGE_NOT_ENABLED; + true -> + Nodes = mria:running_nodes(), + call_operation(all, OperFunc, [Nodes, BridgeType, BridgeName]) + catch + throw:not_found -> + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName) + end end ). @@ -639,16 +657,11 @@ lookup_from_local_node(BridgeType, BridgeName) -> Id, case node_operation_func(Op) of invalid -> - {400, error_msg('BAD_REQUEST', <<"invalid operation">>)}; + ?NOT_FOUND(<<"Invalid operation: ", Op/binary>>); OperFunc -> - ConfMap = emqx:get_config([bridges, BridgeType, BridgeName]), - case maps:get(enable, ConfMap, false) of + try is_enabled_bridge(BridgeType, BridgeName) of false -> - {403, - error_msg( - 'FORBIDDEN_REQUEST', - <<"forbidden operation: bridge disabled">> - )}; + ?BRIDGE_NOT_ENABLED; true -> case emqx_misc:safe_to_existing_atom(Node, utf8) of {ok, TargetNode} -> @@ -656,12 +669,24 @@ lookup_from_local_node(BridgeType, BridgeName) -> TargetNode, BridgeType, BridgeName ]); {error, _} -> - {400, error_msg('INVALID_NODE', <<"invalid node">>)} + ?NOT_FOUND(<<"Invalid node name: ", Node/binary>>) end + catch + throw:not_found -> + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName) end end ). +is_enabled_bridge(BridgeType, BridgeName) -> + try emqx:get_config([bridges, BridgeType, BridgeName]) of + ConfMap -> + maps:get(enable, ConfMap, false) + catch + error:{config_not_found, _} -> + throw(not_found) + end. + node_operation_func(<<"restart">>) -> restart_bridge_to_node; node_operation_func(<<"start">>) -> start_bridge_to_node; node_operation_func(<<"stop">>) -> stop_bridge_to_node; @@ -676,12 +701,6 @@ enable_func(<<"true">>) -> enable; enable_func(<<"false">>) -> disable; enable_func(_) -> invalid. -ensure_bridge_created(BridgeType, BridgeName, Conf) -> - case emqx_bridge:create(BridgeType, BridgeName, Conf) of - {ok, _} -> ok; - {error, Reason} -> {error, error_msg('BAD_REQUEST', Reason)} - end. - zip_bridges([BridgesFirstNode | _] = BridgesAllNodes) -> lists:foldl( fun(#{type := Type, name := Name}, Acc) -> @@ -892,7 +911,7 @@ is_ok(ResL) -> ) of [] -> {ok, [Res || {ok, Res} <- ResL]}; - ErrL -> {error, ErrL} + ErrL -> hd(ErrL) end. filter_out_request_body(Conf) -> @@ -918,12 +937,10 @@ bin(S) when is_atom(S) -> bin(S) when is_binary(S) -> S. -call_operation(NodeOrAll, OperFunc, Args) -> +call_operation(NodeOrAll, OperFunc, Args = [_Nodes, BridgeType, BridgeName]) -> case is_ok(do_bpapi_call(NodeOrAll, OperFunc, Args)) of - ok -> - {204}; - {ok, _} -> - {204}; + Ok when Ok =:= ok; is_tuple(Ok), element(1, Ok) =:= ok -> + 204; {error, not_implemented} -> %% Should only happen if we call `start` on a node that is %% still on an older bpapi version that doesn't support it. @@ -941,8 +958,12 @@ call_operation(NodeOrAll, OperFunc, Args) -> ) ) )}; - {error, Reason} -> - {500, error_msg('INTERNAL_ERROR', Reason)} + {error, not_found} -> + ?BRIDGE_NOT_FOUND(BridgeType, BridgeName); + {error, {node_not_found, Node}} -> + ?NOT_FOUND(<<"Node not found: ", (atom_to_binary(Node))/binary>>); + {error, Reason} when not is_tuple(Reason); element(1, Reason) =/= 'exit' -> + ?BAD_REQUEST(to_hr_reason(Reason)) end. maybe_try_restart(all, start_bridges_to_all_nodes, Args) -> @@ -950,14 +971,19 @@ maybe_try_restart(all, start_bridges_to_all_nodes, Args) -> maybe_try_restart(Node, start_bridge_to_node, Args) -> call_operation(Node, restart_bridge_to_node, Args); maybe_try_restart(_, _, _) -> - {501}. + 501. do_bpapi_call(all, Call, Args) -> maybe_unwrap( do_bpapi_call_vsn(emqx_bpapi:supported_version(emqx_bridge), Call, Args) ); do_bpapi_call(Node, Call, Args) -> - do_bpapi_call_vsn(emqx_bpapi:supported_version(Node, emqx_bridge), Call, Args). + case lists:member(Node, mria:running_nodes()) of + true -> + do_bpapi_call_vsn(emqx_bpapi:supported_version(Node, emqx_bridge), Call, Args); + false -> + {error, {node_not_found, Node}} + end. do_bpapi_call_vsn(SupportedVersion, Call, Args) -> case lists:member(SupportedVersion, supported_versions(Call)) of @@ -976,6 +1002,19 @@ supported_versions(start_bridge_to_node) -> [2]; supported_versions(start_bridges_to_all_nodes) -> [2]; supported_versions(_Call) -> [1, 2]. +to_hr_reason(nxdomain) -> + <<"Host not found">>; +to_hr_reason(econnrefused) -> + <<"Connection refused">>; +to_hr_reason({unauthorized_client, _}) -> + <<"Unauthorized client">>; +to_hr_reason({not_authorized, _}) -> + <<"Not authorized">>; +to_hr_reason({malformed_username_or_password, _}) -> + <<"Malformed username or password">>; +to_hr_reason(Reason) -> + Reason. + redact(Term) -> emqx_misc:redact(Term). diff --git a/apps/emqx_bridge/src/emqx_bridge_resource.erl b/apps/emqx_bridge/src/emqx_bridge_resource.erl index d2ce7a9d5..53fc7df4c 100644 --- a/apps/emqx_bridge/src/emqx_bridge_resource.erl +++ b/apps/emqx_bridge/src/emqx_bridge_resource.erl @@ -79,7 +79,7 @@ parse_bridge_id(BridgeId) -> {to_type_atom(Type), validate_name(Name)}; _ -> invalid_bridge_id( - <<"should be of forst {type}:{name}, but got ", BridgeId/binary>> + <<"should be of pattern {type}:{name}, but got ", BridgeId/binary>> ) end. diff --git a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl index d242111dc..8b388a771 100644 --- a/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_api_SUITE.erl @@ -73,7 +73,7 @@ init_per_suite(Config) -> _ = application:stop(emqx_resource), _ = application:stop(emqx_connector), ok = emqx_mgmt_api_test_util:init_suite( - [emqx_rule_engine, emqx_bridge] + [emqx_rule_engine, emqx_bridge, emqx_authn] ), ok = emqx_common_test_helpers:load_config( emqx_rule_engine_schema, @@ -83,7 +83,8 @@ init_per_suite(Config) -> Config. end_per_suite(_Config) -> - emqx_mgmt_api_test_util:end_suite([emqx_rule_engine, emqx_bridge]), + emqx_mgmt_api_test_util:end_suite([emqx_rule_engine, emqx_bridge, emqx_authn]), + mria:clear_table(emqx_authn_mnesia), ok. init_per_testcase(t_broken_bpapi_vsn, Config) -> @@ -195,6 +196,9 @@ t_http_crud_apis(Config) -> %% assert we there's no bridges at first {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + {ok, 404, _} = request(get, uri(["bridges", "foo"]), []), + {ok, 404, _} = request(get, uri(["bridges", "webhook:foo"]), []), + %% then we add a webhook bridge, using POST %% POST /bridges/ will create a bridge URL1 = ?URL(Port, "path1"), @@ -213,7 +217,7 @@ t_http_crud_apis(Config) -> <<"status">> := _, <<"node_status">> := [_ | _], <<"url">> := URL1 - } = jsx:decode(Bridge), + } = emqx_json:decode(Bridge, [return_maps]), BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), %% send an message to emqx and the message should be forwarded to the HTTP server @@ -250,7 +254,7 @@ t_http_crud_apis(Config) -> <<"node_status">> := [_ | _], <<"url">> := URL2 }, - jsx:decode(Bridge2) + emqx_json:decode(Bridge2, [return_maps]) ), %% list all bridges again, assert Bridge2 is in it @@ -268,7 +272,7 @@ t_http_crud_apis(Config) -> <<"url">> := URL2 } ], - jsx:decode(Bridge2Str) + emqx_json:decode(Bridge2Str, [return_maps]) ), %% get the bridge by id @@ -282,7 +286,7 @@ t_http_crud_apis(Config) -> <<"node_status">> := [_ | _], <<"url">> := URL2 }, - jsx:decode(Bridge3Str) + emqx_json:decode(Bridge3Str, [return_maps]) ), %% send an message to emqx again, check the path has been changed @@ -311,19 +315,30 @@ t_http_crud_apis(Config) -> ), ?assertMatch( #{ - <<"code">> := _, - <<"message">> := <<"bridge not found">> + <<"code">> := <<"NOT_FOUND">>, + <<"message">> := _ }, - jsx:decode(ErrMsg2) + emqx_json:decode(ErrMsg2, [return_maps]) ), + + %% try delete bad bridge id + {ok, 404, BadId} = request(delete, uri(["bridges", "foo"]), []), + ?assertMatch( + #{ + <<"code">> := <<"NOT_FOUND">>, + <<"message">> := <<"Invalid bridge ID", _/binary>> + }, + emqx_json:decode(BadId, [return_maps]) + ), + %% Deleting a non-existing bridge should result in an error {ok, 404, ErrMsg3} = request(delete, uri(["bridges", BridgeID]), []), ?assertMatch( #{ - <<"code">> := _, - <<"message">> := <<"Bridge not found">> + <<"code">> := <<"NOT_FOUND">>, + <<"message">> := _ }, - jsx:decode(ErrMsg3) + emqx_json:decode(ErrMsg3, [return_maps]) ), ok. @@ -401,14 +416,17 @@ t_check_dependent_actions_on_delete(Config) -> <<"sql">> => <<"SELECT * from \"t\"">> } ), - #{<<"id">> := RuleId} = jsx:decode(Rule), - %% delete the bridge should fail because there is a rule depenents on it - {ok, 403, _} = request(delete, uri(["bridges", BridgeID]), []), + #{<<"id">> := RuleId} = emqx_json:decode(Rule, [return_maps]), + %% deleting the bridge should fail because there is a rule that depends on it + {ok, 400, _} = request( + delete, uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions=false", [] + ), %% delete the rule first {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), %% then delete the bridge is OK {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + ok. t_cascade_delete_actions(Config) -> @@ -436,18 +454,39 @@ t_cascade_delete_actions(Config) -> <<"sql">> => <<"SELECT * from \"t\"">> } ), - #{<<"id">> := RuleId} = jsx:decode(Rule), + #{<<"id">> := RuleId} = emqx_json:decode(Rule, [return_maps]), %% delete the bridge will also delete the actions from the rules - {ok, 204, _} = request(delete, uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions", []), + {ok, 204, _} = request( + delete, uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions=true", [] + ), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), {ok, 200, Rule1} = request(get, uri(["rules", RuleId]), []), ?assertMatch( #{ <<"actions">> := [] }, - jsx:decode(Rule1) + emqx_json:decode(Rule1, [return_maps]) ), {ok, 204, <<>>} = request(delete, uri(["rules", RuleId]), []), + + {ok, 201, _} = request( + post, + uri(["bridges"]), + ?HTTP_BRIDGE(URL1, ?BRIDGE_TYPE, Name) + ), + {ok, 201, _} = request( + post, + uri(["rules"]), + #{ + <<"name">> => <<"t_http_crud_apis">>, + <<"enable">> => true, + <<"actions">> => [BridgeID], + <<"sql">> => <<"SELECT * from \"t\"">> + } + ), + + {ok, 204, _} = request(delete, uri(["bridges", BridgeID]) ++ "?also_delete_dep_actions", []), + {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), ok. t_broken_bpapi_vsn(Config) -> @@ -485,6 +524,18 @@ t_old_bpapi_vsn(Config) -> ok. t_start_stop_bridges_node(Config) -> + {ok, 404, _} = + request( + post, + uri(["nodes", "thisbetterbenotanatomyet", "bridges", "webhook:foo", start]), + <<"">> + ), + {ok, 404, _} = + request( + post, + uri(["nodes", "undefined", "bridges", "webhook:foo", start]), + <<"">> + ), do_start_stop_bridges(node, Config). t_start_stop_bridges_cluster(Config) -> @@ -510,34 +561,42 @@ do_start_stop_bridges(Type, Config) -> <<"status">> := <<"connected">>, <<"node_status">> := [_ | _], <<"url">> := URL1 - } = jsx:decode(Bridge), + } = emqx_json:decode(Bridge, [return_maps]), BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), %% stop it {ok, 204, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>), {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)), + ?assertMatch(#{<<"status">> := <<"stopped">>}, emqx_json:decode(Bridge2, [return_maps])), %% start again {ok, 204, <<>>} = request(post, operation_path(Type, start, BridgeID), <<"">>), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)), + ?assertMatch(#{<<"status">> := <<"connected">>}, emqx_json:decode(Bridge3, [return_maps])), %% start a started bridge {ok, 204, <<>>} = request(post, operation_path(Type, start, BridgeID), <<"">>), {ok, 200, Bridge3_1} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3_1)), + ?assertMatch(#{<<"status">> := <<"connected">>}, emqx_json:decode(Bridge3_1, [return_maps])), %% restart an already started bridge {ok, 204, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)), + ?assertMatch(#{<<"status">> := <<"connected">>}, emqx_json:decode(Bridge3, [return_maps])), %% stop it again {ok, 204, <<>>} = request(post, operation_path(Type, stop, BridgeID), <<"">>), %% restart a stopped bridge {ok, 204, <<>>} = request(post, operation_path(Type, restart, BridgeID), <<"">>), {ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge4)), + ?assertMatch(#{<<"status">> := <<"connected">>}, emqx_json:decode(Bridge4, [return_maps])), + + {ok, 404, _} = request(post, operation_path(Type, invalidop, BridgeID), <<"">>), + %% delete the bridge {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []), + %% Fail parse-id check + {ok, 404, _} = request(post, operation_path(Type, start, <<"wreckbook_fugazi">>), <<"">>), + %% Looks ok but doesn't exist + {ok, 404, _} = request(post, operation_path(Type, start, <<"webhook:cptn_hook">>), <<"">>), + %% Create broken bridge {ListenPort, Sock} = listen_on_random_port(), %% Connecting to this endpoint should always timeout @@ -555,7 +614,7 @@ do_start_stop_bridges(Type, Config) -> <<"server">> := BadServer, <<"status">> := <<"connecting">>, <<"node_status">> := [_ | _] - } = jsx:decode(BadBridge1), + } = emqx_json:decode(BadBridge1, [return_maps]), BadBridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE_MQTT, BadName), ?assertMatch( {ok, SC, _} when SC == 500 orelse SC == 503, @@ -584,33 +643,39 @@ t_enable_disable_bridges(Config) -> <<"status">> := <<"connected">>, <<"node_status">> := [_ | _], <<"url">> := URL1 - } = jsx:decode(Bridge), + } = emqx_json:decode(Bridge, [return_maps]), BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), %% disable it {ok, 204, <<>>} = request(put, enable_path(false, BridgeID), <<"">>), {ok, 200, Bridge2} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"stopped">>}, jsx:decode(Bridge2)), + ?assertMatch(#{<<"status">> := <<"stopped">>}, emqx_json:decode(Bridge2, [return_maps])), %% enable again {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), <<"">>), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)), + ?assertMatch(#{<<"status">> := <<"connected">>}, emqx_json:decode(Bridge3, [return_maps])), %% enable an already started bridge {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), <<"">>), {ok, 200, Bridge3} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge3)), + ?assertMatch(#{<<"status">> := <<"connected">>}, emqx_json:decode(Bridge3, [return_maps])), %% disable it again {ok, 204, <<>>} = request(put, enable_path(false, BridgeID), <<"">>), - {ok, 403, Res} = request(post, operation_path(node, restart, BridgeID), <<"">>), + %% bad param + {ok, 404, _} = request(put, enable_path(foo, BridgeID), <<"">>), + {ok, 404, _} = request(put, enable_path(true, "foo"), <<"">>), + {ok, 404, _} = request(put, enable_path(true, "webhook:foo"), <<"">>), + + {ok, 400, Res} = request(post, operation_path(node, start, BridgeID), <<"">>), ?assertEqual( - <<"{\"code\":\"FORBIDDEN_REQUEST\",\"message\":\"forbidden operation: bridge disabled\"}">>, + <<"{\"code\":\"BAD_REQUEST\",\"message\":\"Forbidden operation, bridge not enabled\"}">>, Res ), + {ok, 400, Res} = request(post, operation_path(cluster, start, BridgeID), <<"">>), %% enable a stopped bridge {ok, 204, <<>>} = request(put, enable_path(true, BridgeID), <<"">>), {ok, 200, Bridge4} = request(get, uri(["bridges", BridgeID]), []), - ?assertMatch(#{<<"status">> := <<"connected">>}, jsx:decode(Bridge4)), + ?assertMatch(#{<<"status">> := <<"connected">>}, emqx_json:decode(Bridge4, [return_maps])), %% delete the bridge {ok, 204, <<>>} = request(delete, uri(["bridges", BridgeID]), []), {ok, 200, <<"[]">>} = request(get, uri(["bridges"]), []). @@ -635,7 +700,7 @@ t_reset_bridges(Config) -> <<"status">> := <<"connected">>, <<"node_status">> := [_ | _], <<"url">> := URL1 - } = jsx:decode(Bridge), + } = emqx_json:decode(Bridge, [return_maps]), BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), {ok, 204, <<>>} = request(put, uri(["bridges", BridgeID, "metrics/reset"]), []), @@ -703,7 +768,7 @@ t_bridges_probe(Config) -> <<"code">> := <<"TEST_FAILED">>, <<"message">> := _ }, - jsx:decode(NxDomain) + emqx_json:decode(NxDomain, [return_maps]) ), {ok, 204, _} = request( @@ -720,9 +785,81 @@ t_bridges_probe(Config) -> ?assertMatch( #{ <<"code">> := <<"TEST_FAILED">>, - <<"message">> := <<"econnrefused">> + <<"message">> := <<"Connection refused">> }, - jsx:decode(ConnRefused) + emqx_json:decode(ConnRefused, [return_maps]) + ), + + {ok, 400, HostNotFound} = request( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"nohost:2883">>) + ), + ?assertMatch( + #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := <<"Host not found">> + }, + emqx_json:decode(HostNotFound, [return_maps]) + ), + + AuthnConfig = #{ + <<"mechanism">> => <<"password_based">>, + <<"backend">> => <<"built_in_database">>, + <<"user_id_type">> => <<"username">> + }, + Chain = 'mqtt:global', + emqx:update_config( + [authentication], + {create_authenticator, Chain, AuthnConfig} + ), + User = #{user_id => <<"u">>, password => <<"p">>}, + AuthenticatorID = <<"password_based:built_in_database">>, + {ok, _} = emqx_authentication:add_user( + Chain, + AuthenticatorID, + User + ), + + {ok, 400, Unauthorized} = request( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"127.0.0.1:1883">>)#{<<"proto_ver">> => <<"v4">>} + ), + ?assertMatch( + #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := <<"Unauthorized client">> + }, + emqx_json:decode(Unauthorized, [return_maps]) + ), + + {ok, 400, Malformed} = request( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"127.0.0.1:1883">>)#{ + <<"proto_ver">> => <<"v4">>, <<"password">> => <<"mySecret">>, <<"username">> => <<"u">> + } + ), + ?assertMatch( + #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := <<"Malformed username or password">> + }, + emqx_json:decode(Malformed, [return_maps]) + ), + + {ok, 400, NotAuthorized} = request( + post, + uri(["bridges_probe"]), + ?MQTT_BRIDGE(<<"127.0.0.1:1883">>) + ), + ?assertMatch( + #{ + <<"code">> := <<"TEST_FAILED">>, + <<"message">> := <<"Not authorized">> + }, + emqx_json:decode(NotAuthorized, [return_maps]) ), {ok, 400, BadReq} = request( @@ -730,7 +867,7 @@ t_bridges_probe(Config) -> uri(["bridges_probe"]), ?BRIDGE(<<"bad_bridge">>, <<"unknown_type">>) ), - ?assertMatch(#{<<"code">> := <<"BAD_REQUEST">>}, jsx:decode(BadReq)), + ?assertMatch(#{<<"code">> := <<"BAD_REQUEST">>}, emqx_json:decode(BadReq, [return_maps])), ok. t_metrics(Config) -> @@ -756,7 +893,7 @@ t_metrics(Config) -> <<"status">> := _, <<"node_status">> := [_ | _], <<"url">> := URL1 - } = jsx:decode(Bridge), + } = emqx_json:decode(Bridge, [return_maps]), BridgeID = emqx_bridge_resource:bridge_id(?BRIDGE_TYPE, Name), @@ -767,12 +904,12 @@ t_metrics(Config) -> <<"metrics">> := #{<<"success">> := 0}, <<"node_metrics">> := [_ | _] }, - jsx:decode(Bridge1Str) + emqx_json:decode(Bridge1Str, [return_maps]) ), %% check that the bridge doesn't contain metrics anymore {ok, 200, Bridge2Str} = request(get, uri(["bridges", BridgeID]), []), - Decoded = jsx:decode(Bridge2Str), + Decoded = emqx_json:decode(Bridge2Str, [return_maps]), ?assertNot(maps:is_key(<<"metrics">>, Decoded)), ?assertNot(maps:is_key(<<"node_metrics">>, Decoded)), @@ -802,7 +939,7 @@ t_metrics(Config) -> <<"metrics">> := #{<<"success">> := _}, <<"node_metrics">> := [_ | _] }, - jsx:decode(Bridge3Str) + emqx_json:decode(Bridge3Str, [return_maps]) ), %% check for non-empty metrics when listing all bridges @@ -814,7 +951,7 @@ t_metrics(Config) -> <<"node_metrics">> := [_ | _] } ], - jsx:decode(BridgesStr) + emqx_json:decode(BridgesStr, [return_maps]) ), ok. diff --git a/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl b/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl index 1bb477dad..e2c9382db 100644 --- a/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl +++ b/apps/emqx_bridge/test/emqx_bridge_mqtt_SUITE.erl @@ -359,9 +359,13 @@ t_mqtt_conn_bridge_egress(_) -> ?assertMatch(<>, Msg#message.from), %% verify the metrics of the bridge - ?assertMetrics( - #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, - BridgeIDEgress + ?retry( + _Interval = 200, + _Attempts = 5, + ?assertMetrics( + #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, + BridgeIDEgress + ) ), %% delete the bridge @@ -402,9 +406,13 @@ t_mqtt_conn_bridge_egress_no_payload_template(_) -> ?assertMatch(#{<<"payload">> := Payload}, jsx:decode(Msg#message.payload)), %% verify the metrics of the bridge - ?assertMetrics( - #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, - BridgeIDEgress + ?retry( + _Interval = 200, + _Attempts = 5, + ?assertMetrics( + #{<<"matched">> := 1, <<"success">> := 1, <<"failed">> := 0}, + BridgeIDEgress + ) ), %% delete the bridge diff --git a/apps/emqx_conf/src/emqx_cluster_rpc.erl b/apps/emqx_conf/src/emqx_cluster_rpc.erl index c285e09b8..b2f06f35a 100644 --- a/apps/emqx_conf/src/emqx_cluster_rpc.erl +++ b/apps/emqx_conf/src/emqx_cluster_rpc.erl @@ -512,7 +512,7 @@ do_alarm(Fun, Res, #{tnx_id := Id} = Meta) -> wait_for_all_nodes_commit(TnxId, Delay, Remain) -> Lagging = lagging_nodes(TnxId), - Stopped = Lagging -- mria_mnesia:running_nodes(), + Stopped = Lagging -- mria:running_nodes(), case Lagging -- Stopped of [] when Stopped =:= [] -> ok; @@ -537,7 +537,7 @@ wait_for_nodes_commit(RequiredSyncs, TnxId, Delay, Remain) -> [] -> ok; Lagging -> - Stopped = Lagging -- mria_mnesia:running_nodes(), + Stopped = Lagging -- mria:running_nodes(), case Stopped of [] -> {peers_lagging, Lagging}; _ -> {stopped_nodes, Stopped} diff --git a/apps/emqx_conf/src/emqx_conf.app.src b/apps/emqx_conf/src/emqx_conf.app.src index 1d04dc362..fbbffba1f 100644 --- a/apps/emqx_conf/src/emqx_conf.app.src +++ b/apps/emqx_conf/src/emqx_conf.app.src @@ -1,6 +1,6 @@ {application, emqx_conf, [ {description, "EMQX configuration management"}, - {vsn, "0.1.13"}, + {vsn, "0.1.14"}, {registered, []}, {mod, {emqx_conf_app, []}}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_conf/src/emqx_conf.erl b/apps/emqx_conf/src/emqx_conf.erl index 00648db31..3da9f0457 100644 --- a/apps/emqx_conf/src/emqx_conf.erl +++ b/apps/emqx_conf/src/emqx_conf.erl @@ -146,8 +146,7 @@ dump_schema(Dir, SchemaModule, I18nFile) -> fun(Lang) -> gen_config_md(Dir, I18nFile, SchemaModule, Lang), gen_api_schema_json(Dir, I18nFile, Lang), - ExampleDir = filename:join(filename:dirname(filename:dirname(I18nFile)), "etc"), - gen_example_conf(ExampleDir, I18nFile, SchemaModule, Lang) + gen_example_conf(Dir, I18nFile, SchemaModule, Lang) end, [en, zh] ), diff --git a/apps/emqx_conf/src/emqx_conf_cli.erl b/apps/emqx_conf/src/emqx_conf_cli.erl index 5c2fd9e18..8e109a1e6 100644 --- a/apps/emqx_conf/src/emqx_conf_cli.erl +++ b/apps/emqx_conf/src/emqx_conf_cli.erl @@ -33,7 +33,7 @@ admins(["status"]) -> status(); admins(["skip"]) -> status(), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), lists:foreach(fun emqx_cluster_rpc:skip_failed_commit/1, Nodes), status(); admins(["skip", Node0]) -> @@ -46,13 +46,13 @@ admins(["tnxid", TnxId0]) -> emqx_ctl:print("~p~n", [emqx_cluster_rpc:query(TnxId)]); admins(["fast_forward"]) -> status(), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), TnxId = emqx_cluster_rpc:latest_tnx_id(), lists:foreach(fun(N) -> emqx_cluster_rpc:fast_forward_to_commit(N, TnxId) end, Nodes), status(); admins(["fast_forward", ToTnxId]) -> status(), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), TnxId = list_to_integer(ToTnxId), lists:foreach(fun(N) -> emqx_cluster_rpc:fast_forward_to_commit(N, TnxId) end, Nodes), status(); diff --git a/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl b/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl index 4c449f580..f7d3c76fd 100644 --- a/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl +++ b/apps/emqx_conf/test/emqx_cluster_rpc_SUITE.erl @@ -48,14 +48,14 @@ init_per_suite(Config) -> meck:new(emqx_alarm, [non_strict, passthrough, no_link]), meck:expect(emqx_alarm, activate, 3, ok), meck:expect(emqx_alarm, deactivate, 3, ok), - meck:new(mria_mnesia, [non_strict, passthrough, no_link]), - meck:expect(mria_mnesia, running_nodes, 0, [?NODE1, {node(), ?NODE2}, {node(), ?NODE3}]), + meck:new(mria, [non_strict, passthrough, no_link]), + meck:expect(mria, running_nodes, 0, [?NODE1, {node(), ?NODE2}, {node(), ?NODE3}]), Config. end_per_suite(_Config) -> ekka:stop(), mria:stop(), - meck:unload(mria_mnesia), + meck:unload(mria), mria_mnesia:delete_schema(), meck:unload(emqx_alarm), ok. diff --git a/apps/emqx_connector/i18n/emqx_connector_mongo.conf b/apps/emqx_connector/i18n/emqx_connector_mongo.conf index 1f00083a4..6a2511ec8 100644 --- a/apps/emqx_connector/i18n/emqx_connector_mongo.conf +++ b/apps/emqx_connector/i18n/emqx_connector_mongo.conf @@ -177,8 +177,8 @@ The MongoDB default port 27017 is used if `[:Port]` is not specified.""" heartbeat_period { desc { - en: "Controls when the driver checks the state of the MongoDB deployment. Specify the interval between checks, counted from the end of the previous check until the beginning of the next one." - zh: "控制驱动程序何时检查MongoDB部署的状态。指定检查的间隔时间,从上一次检查结束到下一次检查开始计算。" + en: "Controls when the driver checks the state of the MongoDB deployment. Specify the interval between checks, counted from the end of the previous check until the beginning of the next one. If the number of connections is increased (which will happen, for example, if you increase the pool size), you may need to increase this period as well to avoid creating too many log entries in the MongoDB log file." + zh: "控制驱动程序何时检查MongoDB部署的状态。指定检查的间隔时间,从上一次检查结束到下一次检查开始计算。如果连接数增加(例如,如果你增加池子的大小,就会发生这种情况),你可能也需要增加这个周期,以避免在MongoDB日志文件中创建太多的日志条目。" } label { en: "Heartbeat period" diff --git a/apps/emqx_connector/include/emqx_connector.hrl b/apps/emqx_connector/include/emqx_connector.hrl index 82c946cfc..cdb6ddd92 100644 --- a/apps/emqx_connector/include/emqx_connector.hrl +++ b/apps/emqx_connector/include/emqx_connector.hrl @@ -23,6 +23,7 @@ -define(MONGO_DEFAULT_PORT, 27017). -define(REDIS_DEFAULT_PORT, 6379). -define(PGSQL_DEFAULT_PORT, 5432). +-define(CLICKHOUSE_DEFAULT_PORT, 8123). -define(AUTO_RECONNECT_INTERVAL, 2). diff --git a/apps/emqx_connector/src/emqx_connector.app.src b/apps/emqx_connector/src/emqx_connector.app.src index f0d51a9ce..4f7ff0f25 100644 --- a/apps/emqx_connector/src/emqx_connector.app.src +++ b/apps/emqx_connector/src/emqx_connector.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_connector, [ {description, "EMQX Data Integration Connectors"}, - {vsn, "0.1.16"}, + {vsn, "0.1.17"}, {registered, []}, {mod, {emqx_connector_app, []}}, {applications, [ diff --git a/apps/emqx_connector/src/emqx_connector_http.erl b/apps/emqx_connector/src/emqx_connector_http.erl index 09fa988d3..401fc8812 100644 --- a/apps/emqx_connector/src/emqx_connector_http.erl +++ b/apps/emqx_connector/src/emqx_connector_http.erl @@ -328,15 +328,17 @@ on_query( {ok, StatusCode, Headers} -> ?SLOG(error, #{ msg => "http connector do request, received error response", - request => redact(NRequest), + note => "the body will be redacted due to security reasons", + request => redact_request(NRequest), connector => InstId, status_code => StatusCode }), {error, #{status_code => StatusCode, headers => Headers}}; {ok, StatusCode, Headers, Body} -> ?SLOG(error, #{ - msg => "http connector do request, received error response", - request => redact(NRequest), + msg => "http connector do request, received error response.", + note => "the body will be redacted due to security reasons", + request => redact_request(NRequest), connector => InstId, status_code => StatusCode }), @@ -603,6 +605,15 @@ is_sensitive_key(_) -> redact(Data) -> emqx_misc:redact(Data, fun is_sensitive_key/1). +%% because the body may contain some sensitive data +%% and at the same time the redact function will not scan the binary data +%% and we also can't know the body format and where the sensitive data will be +%% so the easy way to keep data security is redacted the whole body +redact_request({Path, Headers}) -> + {Path, redact(Headers)}; +redact_request({Path, Headers, _Body}) -> + {Path, redact(Headers), <<"******">>}. + -ifdef(TEST). -include_lib("eunit/include/eunit.hrl"). diff --git a/apps/emqx_connector/src/emqx_connector_mongo.erl b/apps/emqx_connector/src/emqx_connector_mongo.erl index 1b0bcf94d..8804ebaf2 100644 --- a/apps/emqx_connector/src/emqx_connector_mongo.erl +++ b/apps/emqx_connector/src/emqx_connector_mongo.erl @@ -106,7 +106,7 @@ fields(topology) -> {socket_timeout_ms, duration("socket_timeout")}, {server_selection_timeout_ms, duration("server_selection_timeout")}, {wait_queue_timeout_ms, duration("wait_queue_timeout")}, - {heartbeat_frequency_ms, duration("heartbeat_period")}, + {heartbeat_frequency_ms, fun heartbeat_frequency_ms/1}, {min_heartbeat_frequency_ms, duration("min_heartbeat_period")} ]. @@ -407,6 +407,12 @@ duration(Desc) -> desc => ?DESC(Desc) }. +heartbeat_frequency_ms(type) -> emqx_schema:duration_ms(); +heartbeat_frequency_ms(desc) -> ?DESC("heartbeat_period"); +heartbeat_frequency_ms(default) -> 200000; +heartbeat_frequency_ms(validator) -> [?MIN(1)]; +heartbeat_frequency_ms(_) -> undefined. + max_overflow(type) -> non_neg_integer(); max_overflow(desc) -> ?DESC("max_overflow"); max_overflow(default) -> 0; diff --git a/apps/emqx_connector/src/emqx_connector_mysql.erl b/apps/emqx_connector/src/emqx_connector_mysql.erl index e06d6a9d7..68ec59894 100644 --- a/apps/emqx_connector/src/emqx_connector_mysql.erl +++ b/apps/emqx_connector/src/emqx_connector_mysql.erl @@ -391,14 +391,18 @@ proc_sql_params(TypeOrKey, SQLOrData, Params, #{params_tokens := ParamsTokens}) end. on_batch_insert(InstId, BatchReqs, InsertPart, Tokens, State) -> - SQL = emqx_plugin_libs_rule:proc_batch_sql(BatchReqs, InsertPart, Tokens), - on_sql_query(InstId, query, SQL, [], default_timeout, State). + ValuesPart = lists:join($,, [ + emqx_placeholder:proc_param_str(Tokens, Msg, fun emqx_placeholder:quote_mysql/1) + || {_, Msg} <- BatchReqs + ]), + Query = [InsertPart, <<" values ">> | ValuesPart], + on_sql_query(InstId, query, Query, no_params, default_timeout, State). on_sql_query( InstId, SQLFunc, SQLOrKey, - Data, + Params, Timeout, #{poolname := PoolName} = State ) -> @@ -409,9 +413,9 @@ on_sql_query( {ok, Conn} -> ?tp( mysql_connector_send_query, - #{sql_func => SQLFunc, sql_or_key => SQLOrKey, data => Data} + #{sql_func => SQLFunc, sql_or_key => SQLOrKey, data => Params} ), - do_sql_query(SQLFunc, Conn, SQLOrKey, Data, Timeout, LogMeta); + do_sql_query(SQLFunc, Conn, SQLOrKey, Params, Timeout, LogMeta); {error, disconnected} -> ?SLOG( error, @@ -423,8 +427,8 @@ on_sql_query( {error, {recoverable_error, disconnected}} end. -do_sql_query(SQLFunc, Conn, SQLOrKey, Data, Timeout, LogMeta) -> - try mysql:SQLFunc(Conn, SQLOrKey, Data, Timeout) of +do_sql_query(SQLFunc, Conn, SQLOrKey, Params, Timeout, LogMeta) -> + try mysql:SQLFunc(Conn, SQLOrKey, Params, no_filtermap_fun, Timeout) of {error, disconnected} -> ?SLOG( error, @@ -466,7 +470,7 @@ do_sql_query(SQLFunc, Conn, SQLOrKey, Data, Timeout, LogMeta) -> error:badarg -> ?SLOG( error, - LogMeta#{msg => "mysql_connector_invalid_params", params => Data} + LogMeta#{msg => "mysql_connector_invalid_params", params => Params} ), - {error, {unrecoverable_error, {invalid_params, Data}}} + {error, {unrecoverable_error, {invalid_params, Params}}} end. diff --git a/apps/emqx_dashboard/src/emqx_dashboard.app.src b/apps/emqx_dashboard/src/emqx_dashboard.app.src index 44260cbe1..3970d76e4 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard.app.src +++ b/apps/emqx_dashboard/src/emqx_dashboard.app.src @@ -2,7 +2,7 @@ {application, emqx_dashboard, [ {description, "EMQX Web Dashboard"}, % strict semver, bump manually! - {vsn, "5.0.14"}, + {vsn, "5.0.15"}, {modules, []}, {registered, [emqx_dashboard_sup]}, {applications, [kernel, stdlib, mnesia, minirest, emqx, emqx_ctl]}, diff --git a/apps/emqx_dashboard/src/emqx_dashboard_monitor.erl b/apps/emqx_dashboard/src/emqx_dashboard_monitor.erl index 23ac4f35e..019feff51 100644 --- a/apps/emqx_dashboard/src/emqx_dashboard_monitor.erl +++ b/apps/emqx_dashboard/src/emqx_dashboard_monitor.erl @@ -126,7 +126,7 @@ current_rate() -> (_Node, Error) -> Error end, - case lists:foldl(Fun, #{}, mria_mnesia:cluster_nodes(running)) of + case lists:foldl(Fun, #{}, mria:cluster_nodes(running)) of {badrpc, Reason} -> {badrpc, Reason}; Rate -> @@ -205,7 +205,7 @@ do_call(Request) -> gen_server:call(?MODULE, Request, 5000). do_sample(all, Time) -> - do_sample(mria_mnesia:cluster_nodes(running), Time, #{}); + do_sample(mria:cluster_nodes(running), Time, #{}); do_sample(Node, Time) when Node == node() -> MS = match_spec(Time), internal_format(ets:select(?TAB, MS)); diff --git a/apps/emqx_exhook/src/emqx_exhook.app.src b/apps/emqx_exhook/src/emqx_exhook.app.src index 04e0a57db..8ca15a907 100644 --- a/apps/emqx_exhook/src/emqx_exhook.app.src +++ b/apps/emqx_exhook/src/emqx_exhook.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_exhook, [ {description, "EMQX Extension for Hook"}, - {vsn, "5.0.10"}, + {vsn, "5.0.11"}, {modules, []}, {registered, []}, {mod, {emqx_exhook_app, []}}, diff --git a/apps/emqx_exhook/src/emqx_exhook_api.erl b/apps/emqx_exhook/src/emqx_exhook_api.erl index bcfc68269..aa5d1897f 100644 --- a/apps/emqx_exhook/src/emqx_exhook_api.erl +++ b/apps/emqx_exhook/src/emqx_exhook_api.erl @@ -471,7 +471,7 @@ fill_server_hooks_info([], _Name, _Default, MetricsL) -> -spec call_cluster(fun(([node()]) -> emqx_rpc:erpc_multicall(A))) -> [{node(), A | {error, _Err}}]. call_cluster(Fun) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), Ret = Fun(Nodes), lists:zip(Nodes, lists:map(fun emqx_rpc:unwrap_erpc/1, Ret)). diff --git a/apps/emqx_gateway/src/emqx_gateway.app.src b/apps/emqx_gateway/src/emqx_gateway.app.src index 787af7429..59eed7f3f 100644 --- a/apps/emqx_gateway/src/emqx_gateway.app.src +++ b/apps/emqx_gateway/src/emqx_gateway.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_gateway, [ {description, "The Gateway management application"}, - {vsn, "0.1.12"}, + {vsn, "0.1.13"}, {registered, []}, {mod, {emqx_gateway_app, []}}, {applications, [kernel, stdlib, grpc, emqx, emqx_authn, emqx_ctl]}, diff --git a/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl b/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl index 1b4f2e0ac..43c8156d6 100644 --- a/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl +++ b/apps/emqx_gateway/src/emqx_gateway_api_listeners.erl @@ -283,7 +283,7 @@ get_cluster_listeners_info(GwName) -> ). listeners_cluster_status(Listeners) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_gateway_api_listeners_proto_v1:listeners_cluster_status(Nodes, Listeners) of {Results, []} -> Results; diff --git a/apps/emqx_gateway/src/emqx_gateway_cm.erl b/apps/emqx_gateway/src/emqx_gateway_cm.erl index 4719b1da8..599493d97 100644 --- a/apps/emqx_gateway/src/emqx_gateway_cm.erl +++ b/apps/emqx_gateway/src/emqx_gateway_cm.erl @@ -214,7 +214,7 @@ get_chan_info(GwName, ClientId, ChanPid) -> -spec lookup_by_clientid(gateway_name(), emqx_types:clientid()) -> [pid()]. lookup_by_clientid(GwName, ClientId) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_gateway_cm_proto_v1:lookup_by_clientid( Nodes, GwName, ClientId diff --git a/apps/emqx_gateway/src/emqx_gateway_http.erl b/apps/emqx_gateway/src/emqx_gateway_http.erl index d80e3433f..a0155a126 100644 --- a/apps/emqx_gateway/src/emqx_gateway_http.erl +++ b/apps/emqx_gateway/src/emqx_gateway_http.erl @@ -148,7 +148,7 @@ gateway_status(GwName) -> end. cluster_gateway_status(GwName) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_gateway_http_proto_v1:get_cluster_status(Nodes, GwName) of {Results, []} -> Results; diff --git a/apps/emqx_gateway/src/mqttsn/emqx_sn_channel.erl b/apps/emqx_gateway/src/mqttsn/emqx_sn_channel.erl index 29dce90ee..23d07113c 100644 --- a/apps/emqx_gateway/src/mqttsn/emqx_sn_channel.erl +++ b/apps/emqx_gateway/src/mqttsn/emqx_sn_channel.erl @@ -389,7 +389,12 @@ process_connect( clientinfo = ClientInfo } ) -> - SessFun = fun(_, _) -> emqx_session:init(#{max_inflight => 1}) end, + SessFun = fun(ClientInfoT, _) -> + Conf = emqx_cm:get_session_confs( + ClientInfoT, #{receive_maximum => 1, expiry_interval => 0} + ), + emqx_session:init(Conf) + end, case emqx_gateway_ctx:open_session( Ctx, diff --git a/apps/emqx_machine/src/emqx_machine.app.src b/apps/emqx_machine/src/emqx_machine.app.src index 7c62b0685..0bee30e35 100644 --- a/apps/emqx_machine/src/emqx_machine.app.src +++ b/apps/emqx_machine/src/emqx_machine.app.src @@ -3,7 +3,7 @@ {id, "emqx_machine"}, {description, "The EMQX Machine"}, % strict semver, bump manually! - {vsn, "0.2.0"}, + {vsn, "0.2.1"}, {modules, []}, {registered, []}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/apps/emqx_machine/src/emqx_machine.erl b/apps/emqx_machine/src/emqx_machine.erl index 9dc3fdc54..243c4bb8c 100644 --- a/apps/emqx_machine/src/emqx_machine.erl +++ b/apps/emqx_machine/src/emqx_machine.erl @@ -19,6 +19,7 @@ -export([ start/0, graceful_shutdown/0, + brutal_shutdown/0, is_ready/0, node_status/0, @@ -47,6 +48,10 @@ start() -> graceful_shutdown() -> emqx_machine_terminator:graceful_wait(). +%% only used when failed to boot +brutal_shutdown() -> + init:stop(). + set_backtrace_depth() -> {ok, Depth} = application:get_env(emqx_machine, backtrace_depth), _ = erlang:system_flag(backtrace_depth, Depth), diff --git a/apps/emqx_machine/src/emqx_machine_boot.erl b/apps/emqx_machine/src/emqx_machine_boot.erl index 4b3e5ea7d..82b3d602f 100644 --- a/apps/emqx_machine/src/emqx_machine_boot.erl +++ b/apps/emqx_machine/src/emqx_machine_boot.erl @@ -21,6 +21,7 @@ -export([stop_apps/0, ensure_apps_started/0]). -export([sorted_reboot_apps/0]). -export([start_autocluster/0]). +-export([stop_port_apps/0]). -dialyzer({no_match, [basic_reboot_apps/0]}). @@ -61,6 +62,20 @@ stop_apps() -> _ = emqx_alarm_handler:unload(), lists:foreach(fun stop_one_app/1, lists:reverse(sorted_reboot_apps())). +%% Those port apps are terminated after the main apps +%% Don't need to stop when reboot. +stop_port_apps() -> + Loaded = application:loaded_applications(), + lists:foreach( + fun(App) -> + case lists:keymember(App, 1, Loaded) of + true -> stop_one_app(App); + false -> ok + end + end, + [os_mon, jq] + ). + stop_one_app(App) -> ?SLOG(debug, #{msg => "stopping_app", app => App}), try diff --git a/apps/emqx_machine/src/emqx_machine_terminator.erl b/apps/emqx_machine/src/emqx_machine_terminator.erl index 314b8c705..7120cc19b 100644 --- a/apps/emqx_machine/src/emqx_machine_terminator.erl +++ b/apps/emqx_machine/src/emqx_machine_terminator.erl @@ -87,7 +87,8 @@ handle_cast(_Cast, State) -> handle_call(?DO_IT, _From, State) -> try - emqx_machine_boot:stop_apps() + emqx_machine_boot:stop_apps(), + emqx_machine_boot:stop_port_apps() catch C:E:St -> Apps = [element(1, A) || A <- application:which_applications()], diff --git a/apps/emqx_management/src/emqx_management.app.src b/apps/emqx_management/src/emqx_management.app.src index 08de7b670..966358f47 100644 --- a/apps/emqx_management/src/emqx_management.app.src +++ b/apps/emqx_management/src/emqx_management.app.src @@ -2,7 +2,7 @@ {application, emqx_management, [ {description, "EMQX Management API and CLI"}, % strict semver, bump manually! - {vsn, "5.0.15"}, + {vsn, "5.0.16"}, {modules, []}, {registered, [emqx_management_sup]}, {applications, [kernel, stdlib, emqx_plugins, minirest, emqx, emqx_ctl]}, diff --git a/apps/emqx_management/src/emqx_mgmt.erl b/apps/emqx_management/src/emqx_mgmt.erl index dfe1ff310..5ba12646f 100644 --- a/apps/emqx_management/src/emqx_mgmt.erl +++ b/apps/emqx_management/src/emqx_mgmt.erl @@ -112,8 +112,8 @@ %%-------------------------------------------------------------------- list_nodes() -> - Running = mria_mnesia:cluster_nodes(running), - Stopped = mria_mnesia:cluster_nodes(stopped), + Running = mria:cluster_nodes(running), + Stopped = mria:cluster_nodes(stopped), DownNodes = lists:map(fun stopped_node_info/1, Stopped), [{Node, Info} || #{node := Node} = Info <- node_info(Running)] ++ DownNodes. @@ -199,7 +199,7 @@ vm_stats() -> %%-------------------------------------------------------------------- list_brokers() -> - Running = mria_mnesia:running_nodes(), + Running = mria:running_nodes(), [{Node, Broker} || #{node := Node} = Broker <- broker_info(Running)]. lookup_broker(Node) -> @@ -223,7 +223,7 @@ broker_info(Nodes) -> %%-------------------------------------------------------------------- get_metrics() -> - nodes_info_count([get_metrics(Node) || Node <- mria_mnesia:running_nodes()]). + nodes_info_count([get_metrics(Node) || Node <- mria:running_nodes()]). get_metrics(Node) -> unwrap_rpc(emqx_proto_v1:get_metrics(Node)). @@ -243,7 +243,7 @@ get_stats() -> Stats = get_stats(Node), delete_keys(Stats, GlobalStatsKeys) end - || Node <- mria_mnesia:running_nodes() + || Node <- mria:running_nodes() ]), GlobalStats = maps:with(GlobalStatsKeys, maps:from_list(get_stats(node()))), maps:merge(CountStats, GlobalStats). @@ -275,12 +275,12 @@ nodes_info_count(PropList) -> lookup_client({clientid, ClientId}, FormatFun) -> lists:append([ lookup_client(Node, {clientid, ClientId}, FormatFun) - || Node <- mria_mnesia:running_nodes() + || Node <- mria:running_nodes() ]); lookup_client({username, Username}, FormatFun) -> lists:append([ lookup_client(Node, {username, Username}, FormatFun) - || Node <- mria_mnesia:running_nodes() + || Node <- mria:running_nodes() ]). lookup_client(Node, Key, FormatFun) -> @@ -307,7 +307,7 @@ kickout_client(ClientId) -> [] -> {error, not_found}; _ -> - Results = [kickout_client(Node, ClientId) || Node <- mria_mnesia:running_nodes()], + Results = [kickout_client(Node, ClientId) || Node <- mria:running_nodes()], check_results(Results) end. @@ -322,7 +322,7 @@ list_client_subscriptions(ClientId) -> [] -> {error, not_found}; _ -> - Results = [client_subscriptions(Node, ClientId) || Node <- mria_mnesia:running_nodes()], + Results = [client_subscriptions(Node, ClientId) || Node <- mria:running_nodes()], Filter = fun ({error, _}) -> @@ -340,18 +340,18 @@ client_subscriptions(Node, ClientId) -> {Node, unwrap_rpc(emqx_broker_proto_v1:list_client_subscriptions(Node, ClientId))}. clean_authz_cache(ClientId) -> - Results = [clean_authz_cache(Node, ClientId) || Node <- mria_mnesia:running_nodes()], + Results = [clean_authz_cache(Node, ClientId) || Node <- mria:running_nodes()], check_results(Results). clean_authz_cache(Node, ClientId) -> unwrap_rpc(emqx_proto_v1:clean_authz_cache(Node, ClientId)). clean_authz_cache_all() -> - Results = [{Node, clean_authz_cache_all(Node)} || Node <- mria_mnesia:running_nodes()], + Results = [{Node, clean_authz_cache_all(Node)} || Node <- mria:running_nodes()], wrap_results(Results). clean_pem_cache_all() -> - Results = [{Node, clean_pem_cache_all(Node)} || Node <- mria_mnesia:running_nodes()], + Results = [{Node, clean_pem_cache_all(Node)} || Node <- mria:running_nodes()], wrap_results(Results). wrap_results(Results) -> @@ -379,7 +379,7 @@ set_keepalive(_ClientId, _Interval) -> %% @private call_client(ClientId, Req) -> - Results = [call_client(Node, ClientId, Req) || Node <- mria_mnesia:running_nodes()], + Results = [call_client(Node, ClientId, Req) || Node <- mria:running_nodes()], Expected = lists:filter( fun ({error, _}) -> false; @@ -428,7 +428,7 @@ list_subscriptions(Node) -> list_subscriptions_via_topic(Topic, FormatFun) -> lists:append([ list_subscriptions_via_topic(Node, Topic, FormatFun) - || Node <- mria_mnesia:running_nodes() + || Node <- mria:running_nodes() ]). list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) -> @@ -442,7 +442,7 @@ list_subscriptions_via_topic(Node, Topic, _FormatFun = {M, F}) -> %%-------------------------------------------------------------------- subscribe(ClientId, TopicTables) -> - subscribe(mria_mnesia:running_nodes(), ClientId, TopicTables). + subscribe(mria:running_nodes(), ClientId, TopicTables). subscribe([Node | Nodes], ClientId, TopicTables) -> case unwrap_rpc(emqx_management_proto_v3:subscribe(Node, ClientId, TopicTables)) of @@ -467,7 +467,7 @@ publish(Msg) -> -spec unsubscribe(emqx_types:clientid(), emqx_types:topic()) -> {unsubscribe, _} | {error, channel_not_found}. unsubscribe(ClientId, Topic) -> - unsubscribe(mria_mnesia:running_nodes(), ClientId, Topic). + unsubscribe(mria:running_nodes(), ClientId, Topic). -spec unsubscribe([node()], emqx_types:clientid(), emqx_types:topic()) -> {unsubscribe, _} | {error, channel_not_found}. @@ -490,7 +490,7 @@ do_unsubscribe(ClientId, Topic) -> -spec unsubscribe_batch(emqx_types:clientid(), [emqx_types:topic()]) -> {unsubscribe, _} | {error, channel_not_found}. unsubscribe_batch(ClientId, Topics) -> - unsubscribe_batch(mria_mnesia:running_nodes(), ClientId, Topics). + unsubscribe_batch(mria:running_nodes(), ClientId, Topics). -spec unsubscribe_batch([node()], emqx_types:clientid(), [emqx_types:topic()]) -> {unsubscribe_batch, _} | {error, channel_not_found}. @@ -515,7 +515,7 @@ do_unsubscribe_batch(ClientId, Topics) -> %%-------------------------------------------------------------------- get_alarms(Type) -> - [{Node, get_alarms(Node, Type)} || Node <- mria_mnesia:running_nodes()]. + [{Node, get_alarms(Node, Type)} || Node <- mria:running_nodes()]. get_alarms(Node, Type) -> add_duration_field(unwrap_rpc(emqx_proto_v1:get_alarms(Node, Type))). @@ -524,7 +524,7 @@ deactivate(Node, Name) -> unwrap_rpc(emqx_proto_v1:deactivate_alarm(Node, Name)). delete_all_deactivated_alarms() -> - [delete_all_deactivated_alarms(Node) || Node <- mria_mnesia:running_nodes()]. + [delete_all_deactivated_alarms(Node) || Node <- mria:running_nodes()]. delete_all_deactivated_alarms(Node) -> unwrap_rpc(emqx_proto_v1:delete_all_deactivated_alarms(Node)). diff --git a/apps/emqx_management/src/emqx_mgmt_api.erl b/apps/emqx_management/src/emqx_mgmt_api.erl index a0a40533d..c77752f7d 100644 --- a/apps/emqx_management/src/emqx_mgmt_api.erl +++ b/apps/emqx_management/src/emqx_mgmt_api.erl @@ -163,7 +163,7 @@ cluster_query(Tab, QString, QSchema, MsFun, FmtFun) -> {error, page_limit_invalid}; Meta -> {_CodCnt, NQString} = parse_qstring(QString, QSchema), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), ResultAcc = init_query_result(), QueryState = init_query_state(Tab, NQString, MsFun, Meta), NResultAcc = do_cluster_query( diff --git a/apps/emqx_management/src/emqx_mgmt_api_cluster.erl b/apps/emqx_management/src/emqx_mgmt_api_cluster.erl index 37e94200e..68bb6c81d 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_cluster.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_cluster.erl @@ -101,7 +101,7 @@ cluster_info(get, _) -> ClusterName = application:get_env(ekka, cluster_name, emqxcl), Info = #{ name => ClusterName, - nodes => mria_mnesia:running_nodes(), + nodes => mria:running_nodes(), self => node() }, {200, Info}. diff --git a/apps/emqx_management/src/emqx_mgmt_api_configs.erl b/apps/emqx_management/src/emqx_mgmt_api_configs.erl index d9cdf6477..de93a1071 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_configs.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_configs.erl @@ -279,7 +279,7 @@ configs(get, Params, _Req) -> QS = maps:get(query_string, Params, #{}), Node = maps:get(<<"node">>, QS, node()), case - lists:member(Node, mria_mnesia:running_nodes()) andalso + lists:member(Node, mria:running_nodes()) andalso emqx_management_proto_v2:get_full_config(Node) of false -> diff --git a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl index 7bf68ee4d..c126cfe19 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_listeners.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_listeners.erl @@ -483,7 +483,7 @@ err_msg_str(Reason) -> io_lib:format("~p", [Reason]). list_listeners() -> - [list_listeners(Node) || Node <- mria_mnesia:running_nodes()]. + [list_listeners(Node) || Node <- mria:running_nodes()]. list_listeners(Node) -> wrap_rpc(emqx_management_proto_v2:list_listeners(Node)). diff --git a/apps/emqx_management/src/emqx_mgmt_api_metrics.erl b/apps/emqx_management/src/emqx_mgmt_api_metrics.erl index 72b616fae..1c5c8f62a 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_metrics.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_metrics.erl @@ -59,7 +59,7 @@ metrics(get, #{query_string := Qs}) -> maps:from_list( emqx_mgmt:get_metrics(Node) ++ [{node, Node}] ) - || Node <- mria_mnesia:running_nodes() + || Node <- mria:running_nodes() ], {200, Data} end. diff --git a/apps/emqx_management/src/emqx_mgmt_api_stats.erl b/apps/emqx_management/src/emqx_mgmt_api_stats.erl index 19bb3e737..1d3c0e21b 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_stats.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_stats.erl @@ -129,7 +129,7 @@ list(get, #{query_string := Qs}) -> _ -> Data = [ maps:from_list(emqx_mgmt:get_stats(Node) ++ [{node, Node}]) - || Node <- mria_mnesia:running_nodes() + || Node <- mria:running_nodes() ], {200, Data} end. diff --git a/apps/emqx_management/src/emqx_mgmt_api_trace.erl b/apps/emqx_management/src/emqx_mgmt_api_trace.erl index 38ce9dcf2..b93839b0b 100644 --- a/apps/emqx_management/src/emqx_mgmt_api_trace.erl +++ b/apps/emqx_management/src/emqx_mgmt_api_trace.erl @@ -376,7 +376,7 @@ trace(get, _Params) -> fun(#{start_at := A}, #{start_at := B}) -> A > B end, emqx_trace:format(List0) ), - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), TraceSize = wrap_rpc(emqx_mgmt_trace_proto_v2:get_trace_size(Nodes)), AllFileSize = lists:foldl(fun(F, Acc) -> maps:merge(Acc, F) end, #{}, TraceSize), Now = erlang:system_time(second), @@ -445,7 +445,7 @@ format_trace(Trace0) -> LogSize = lists:foldl( fun(Node, Acc) -> Acc#{Node => 0} end, #{}, - mria_mnesia:running_nodes() + mria:running_nodes() ), Trace2 = maps:without([enable, filter], Trace1), Trace2#{ @@ -541,13 +541,13 @@ group_trace_file(ZipDir, TraceLog, TraceFiles) -> ). collect_trace_file(undefined, TraceLog) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file(Nodes, TraceLog)); collect_trace_file(Node, TraceLog) -> wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file([Node], TraceLog)). collect_trace_file_detail(TraceLog) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), wrap_rpc(emqx_mgmt_trace_proto_v2:trace_file_detail(Nodes, TraceLog)). wrap_rpc({GoodRes, BadNodes}) -> @@ -677,7 +677,7 @@ parse_node(Query, Default) -> {ok, Default}; {ok, NodeBin} -> Node = binary_to_existing_atom(NodeBin), - true = lists:member(Node, mria_mnesia:running_nodes()), + true = lists:member(Node, mria:running_nodes()), {ok, Node} end catch diff --git a/apps/emqx_management/test/emqx_mgmt_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_SUITE.erl index 4619905cb..71b51b67c 100644 --- a/apps/emqx_management/test/emqx_mgmt_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_SUITE.erl @@ -36,16 +36,16 @@ end_per_suite(_) -> emqx_mgmt_api_test_util:end_suite([emqx_management, emqx_conf]). init_per_testcase(TestCase, Config) -> - meck:expect(mria_mnesia, running_nodes, 0, [node()]), + meck:expect(mria, running_nodes, 0, [node()]), emqx_common_test_helpers:init_per_testcase(?MODULE, TestCase, Config). end_per_testcase(TestCase, Config) -> - meck:unload(mria_mnesia), + meck:unload(mria), emqx_common_test_helpers:end_per_testcase(?MODULE, TestCase, Config). t_list_nodes(init, Config) -> meck:expect( - mria_mnesia, + mria, cluster_nodes, fun (running) -> [node()]; @@ -125,7 +125,7 @@ t_lookup_client(_Config) -> emqx_mgmt:lookup_client({username, <<"user1">>}, ?FORMATFUN) ), ?assertEqual([], emqx_mgmt:lookup_client({clientid, <<"notfound">>}, ?FORMATFUN)), - meck:expect(mria_mnesia, running_nodes, 0, [node(), 'fake@nonode']), + meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']), ?assertMatch( [_ | {error, nodedown}], emqx_mgmt:lookup_client({clientid, <<"client1">>}, ?FORMATFUN) ). @@ -188,7 +188,7 @@ t_clean_cache(_Config) -> {error, _}, emqx_mgmt:clean_pem_cache_all() ), - meck:expect(mria_mnesia, running_nodes, 0, [node(), 'fake@nonode']), + meck:expect(mria, running_nodes, 0, [node(), 'fake@nonode']), ?assertMatch( {error, [{'fake@nonode', {error, _}}]}, emqx_mgmt:clean_authz_cache_all() diff --git a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl index 4d0262e6a..bacec718d 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_SUITE.erl @@ -179,14 +179,14 @@ t_bad_rpc(_) -> ClientLs1 = [start_emqtt_client(node(), I, 1883) || I <- lists:seq(1, 10)], Path = emqx_mgmt_api_test_util:api_path(["clients?limit=2&page=2"]), try - meck:expect(mria_mnesia, running_nodes, 0, ['fake@nohost']), + meck:expect(mria, running_nodes, 0, ['fake@nohost']), {error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path), %% good cop, bad cop - meck:expect(mria_mnesia, running_nodes, 0, [node(), 'fake@nohost']), + meck:expect(mria, running_nodes, 0, [node(), 'fake@nohost']), {error, {_, 500, _}} = emqx_mgmt_api_test_util:request_api(get, Path) after _ = lists:foreach(fun(C) -> emqtt:disconnect(C) end, ClientLs1), - meck:unload(mria_mnesia), + meck:unload(mria), emqx_mgmt_api_test_util:end_suite() end. diff --git a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl index d26f4480b..2d24bce99 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_configs_SUITE.erl @@ -212,7 +212,7 @@ t_dashboard(_Config) -> t_configs_node({'init', Config}) -> Node = node(), - meck:expect(mria_mnesia, running_nodes, fun() -> [Node, bad_node, other_node] end), + meck:expect(mria, running_nodes, fun() -> [Node, bad_node, other_node] end), meck:expect( emqx_management_proto_v2, get_full_config, @@ -224,7 +224,7 @@ t_configs_node({'init', Config}) -> ), Config; t_configs_node({'end', _}) -> - meck:unload([mria_mnesia, emqx_management_proto_v2]); + meck:unload([mria, emqx_management_proto_v2]); t_configs_node(_) -> Node = atom_to_list(node()), diff --git a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl index 0e212d52f..3238588e2 100644 --- a/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl +++ b/apps/emqx_management/test/emqx_mgmt_api_listeners_SUITE.erl @@ -168,8 +168,8 @@ t_api_listeners_list_not_ready(Config) when is_list(Config) -> L3 = get_tcp_listeners(Node2), Comment = #{ - node1 => rpc:call(Node1, mria_mnesia, running_nodes, []), - node2 => rpc:call(Node2, mria_mnesia, running_nodes, []) + node1 => rpc:call(Node1, mria, running_nodes, []), + node2 => rpc:call(Node2, mria, running_nodes, []) }, ?assert(length(L1) > length(L2), Comment), diff --git a/apps/emqx_modules/src/emqx_modules.app.src b/apps/emqx_modules/src/emqx_modules.app.src index 60d36d673..4a9cb6723 100644 --- a/apps/emqx_modules/src/emqx_modules.app.src +++ b/apps/emqx_modules/src/emqx_modules.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_modules, [ {description, "EMQX Modules"}, - {vsn, "5.0.10"}, + {vsn, "5.0.11"}, {modules, []}, {applications, [kernel, stdlib, emqx, emqx_ctl]}, {mod, {emqx_modules_app, []}}, diff --git a/apps/emqx_modules/src/emqx_telemetry.erl b/apps/emqx_modules/src/emqx_telemetry.erl index 16fef8d34..6d5c772f0 100644 --- a/apps/emqx_modules/src/emqx_telemetry.erl +++ b/apps/emqx_modules/src/emqx_telemetry.erl @@ -266,7 +266,7 @@ uptime() -> element(1, erlang:statistics(wall_clock)). nodes_uuid() -> - Nodes = lists:delete(node(), mria_mnesia:running_nodes()), + Nodes = lists:delete(node(), mria:running_nodes()), lists:foldl( fun(Node, Acc) -> case emqx_telemetry_proto_v1:get_node_uuid(Node) of diff --git a/apps/emqx_modules/src/emqx_topic_metrics_api.erl b/apps/emqx_modules/src/emqx_topic_metrics_api.erl index ef3c2be69..50b586228 100644 --- a/apps/emqx_modules/src/emqx_topic_metrics_api.erl +++ b/apps/emqx_modules/src/emqx_topic_metrics_api.erl @@ -321,7 +321,7 @@ operate_topic_metrics(delete, #{bindings := #{topic := Topic}}) -> %%-------------------------------------------------------------------- cluster_accumulation_metrics() -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_topic_metrics_proto_v1:metrics(Nodes) of {SuccResList, []} -> {ok, accumulate_nodes_metrics(SuccResList)}; @@ -330,7 +330,7 @@ cluster_accumulation_metrics() -> end. cluster_accumulation_metrics(Topic) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_topic_metrics_proto_v1:metrics(Nodes, Topic) of {SuccResList, []} -> case @@ -422,12 +422,12 @@ do_accumulation_metrics(MetricsIn, {MetricsAcc, _}) -> ). reset() -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), _ = emqx_topic_metrics_proto_v1:reset(Nodes), ok. reset(Topic) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_topic_metrics_proto_v1:reset(Nodes, Topic) of {SuccResList, []} -> case diff --git a/apps/emqx_modules/test/emqx_telemetry_SUITE.erl b/apps/emqx_modules/test/emqx_telemetry_SUITE.erl index 8c6a43e47..cee255e77 100644 --- a/apps/emqx_modules/test/emqx_telemetry_SUITE.erl +++ b/apps/emqx_modules/test/emqx_telemetry_SUITE.erl @@ -856,7 +856,7 @@ stop_slave(Node) -> %emqx_cluster_rpc:fast_forward_to_commit(Node, 100), rpc:call(Node, ?MODULE, leave_cluster, []), ok = slave:stop(Node), - ?assertEqual([node()], mria_mnesia:running_nodes()), + ?assertEqual([node()], mria:running_nodes()), ?assertEqual([], nodes()), _ = application:stop(mria), ok = application:start(mria). diff --git a/apps/emqx_plugin_libs/src/emqx_placeholder.erl b/apps/emqx_plugin_libs/src/emqx_placeholder.erl index 3e98fa149..1f93c1d3e 100644 --- a/apps/emqx_plugin_libs/src/emqx_placeholder.erl +++ b/apps/emqx_plugin_libs/src/emqx_placeholder.erl @@ -30,6 +30,7 @@ proc_sql/2, proc_sql_param_str/2, proc_cql_param_str/2, + proc_param_str/3, preproc_tmpl_deep/1, preproc_tmpl_deep/2, proc_tmpl_deep/2, @@ -39,6 +40,12 @@ sql_data/1 ]). +-export([ + quote_sql/1, + quote_cql/1, + quote_mysql/1 +]). + -include_lib("emqx/include/emqx_placeholder.hrl"). -define(EX_PLACE_HOLDER, "(\\$\\{[a-zA-Z0-9\\._]+\\})"). @@ -83,6 +90,8 @@ | {tmpl, tmpl_token()} | {value, term()}. +-dialyzer({no_improper_lists, [quote_mysql/1, escape_mysql/4, escape_prepend/4]}). + %%------------------------------------------------------------------------------ %% APIs %%------------------------------------------------------------------------------ @@ -162,12 +171,22 @@ proc_sql(Tokens, Data) -> -spec proc_sql_param_str(tmpl_token(), map()) -> binary(). proc_sql_param_str(Tokens, Data) -> + % NOTE + % This is a bit misleading: currently, escaping logic in `quote_sql/1` likely + % won't work with pgsql since it does not support C-style escapes by default. + % https://www.postgresql.org/docs/14/sql-syntax-lexical.html#SQL-SYNTAX-CONSTANTS proc_param_str(Tokens, Data, fun quote_sql/1). -spec proc_cql_param_str(tmpl_token(), map()) -> binary(). proc_cql_param_str(Tokens, Data) -> proc_param_str(Tokens, Data, fun quote_cql/1). +-spec proc_param_str(tmpl_token(), map(), fun((_Value) -> iodata())) -> binary(). +proc_param_str(Tokens, Data, Quote) -> + iolist_to_binary( + proc_tmpl(Tokens, Data, #{return => rawlist, var_trans => Quote}) + ). + -spec preproc_tmpl_deep(term()) -> deep_template(). preproc_tmpl_deep(Data) -> preproc_tmpl_deep(Data, #{process_keys => true}). @@ -226,15 +245,29 @@ sql_data(Map) when is_map(Map) -> emqx_json:encode(Map). -spec bin(term()) -> binary(). bin(Val) -> emqx_plugin_libs_rule:bin(Val). +-spec quote_sql(_Value) -> iolist(). +quote_sql(Str) -> + quote_escape(Str, fun escape_sql/1). + +-spec quote_cql(_Value) -> iolist(). +quote_cql(Str) -> + quote_escape(Str, fun escape_cql/1). + +-spec quote_mysql(_Value) -> iolist(). +quote_mysql(Str) when is_binary(Str) -> + try + escape_mysql(Str) + catch + throw:invalid_utf8 -> + [<<"0x">> | binary:encode_hex(Str)] + end; +quote_mysql(Str) -> + quote_escape(Str, fun escape_mysql/1). + %%------------------------------------------------------------------------------ %% Internal functions %%------------------------------------------------------------------------------ -proc_param_str(Tokens, Data, Quote) -> - iolist_to_binary( - proc_tmpl(Tokens, Data, #{return => rawlist, var_trans => Quote}) - ). - get_phld_var(Phld, Data) -> emqx_rule_maps:nested_get(Phld, Data). @@ -312,21 +345,56 @@ unwrap(<<"\"${", Val/binary>>, _StripDoubleQuote = true) -> unwrap(<<"${", Val/binary>>, _StripDoubleQuote) -> binary:part(Val, {0, byte_size(Val) - 1}). -quote_sql(Str) -> - quote(Str, <<"\\\\'">>). - -quote_cql(Str) -> - quote(Str, <<"''">>). - -quote(Str, ReplaceWith) when - is_list(Str); - is_binary(Str); - is_atom(Str); - is_map(Str) --> - [$', escape_apo(bin(Str), ReplaceWith), $']; -quote(Val, _) -> +-spec quote_escape(_Value, fun((binary()) -> iodata())) -> iodata(). +quote_escape(Str, EscapeFun) when is_binary(Str) -> + EscapeFun(Str); +quote_escape(Str, EscapeFun) when is_list(Str) -> + case unicode:characters_to_binary(Str) of + Bin when is_binary(Bin) -> + EscapeFun(Bin); + Otherwise -> + error(Otherwise) + end; +quote_escape(Str, EscapeFun) when is_atom(Str) orelse is_map(Str) -> + EscapeFun(bin(Str)); +quote_escape(Val, _EscapeFun) -> bin(Val). -escape_apo(Str, ReplaceWith) -> - re:replace(Str, <<"'">>, ReplaceWith, [{return, binary}, global]). +-spec escape_sql(binary()) -> iolist(). +escape_sql(S) -> + ES = binary:replace(S, [<<"\\">>, <<"'">>], <<"\\">>, [global, {insert_replaced, 1}]), + [$', ES, $']. + +-spec escape_cql(binary()) -> iolist(). +escape_cql(S) -> + ES = binary:replace(S, <<"'">>, <<"'">>, [global, {insert_replaced, 1}]), + [$', ES, $']. + +-spec escape_mysql(binary()) -> iolist(). +escape_mysql(S0) -> + % https://dev.mysql.com/doc/refman/8.0/en/string-literals.html + [$', escape_mysql(S0, 0, 0, S0), $']. + +%% NOTE +%% This thing looks more complicated than needed because it's optimized for as few +%% intermediate memory (re)allocations as possible. +escape_mysql(<<$', Rest/binary>>, I, Run, Src) -> + escape_prepend(I, Run, Src, [<<"\\'">> | escape_mysql(Rest, I + Run + 1, 0, Src)]); +escape_mysql(<<$\\, Rest/binary>>, I, Run, Src) -> + escape_prepend(I, Run, Src, [<<"\\\\">> | escape_mysql(Rest, I + Run + 1, 0, Src)]); +escape_mysql(<<0, Rest/binary>>, I, Run, Src) -> + escape_prepend(I, Run, Src, [<<"\\0">> | escape_mysql(Rest, I + Run + 1, 0, Src)]); +escape_mysql(<<_/utf8, Rest/binary>> = S, I, Run, Src) -> + CWidth = byte_size(S) - byte_size(Rest), + escape_mysql(Rest, I, Run + CWidth, Src); +escape_mysql(<<>>, 0, _, Src) -> + Src; +escape_mysql(<<>>, I, Run, Src) -> + binary:part(Src, I, Run); +escape_mysql(_, _I, _Run, _Src) -> + throw(invalid_utf8). + +escape_prepend(_RunI, 0, _Src, Tail) -> + Tail; +escape_prepend(I, Run, Src, Tail) -> + [binary:part(Src, I, Run) | Tail]. diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src index 7acf7433b..605fdb346 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_plugin_libs, [ {description, "EMQX Plugin utility libs"}, - {vsn, "4.3.6"}, + {vsn, "4.3.7"}, {modules, []}, {applications, [kernel, stdlib]}, {env, []} diff --git a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl index a60c94a7b..d1a821895 100644 --- a/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl +++ b/apps/emqx_plugin_libs/src/emqx_plugin_libs_rule.erl @@ -68,11 +68,6 @@ -compile({no_auto_import, [float/1]}). --define(EX_PLACE_HOLDER, "(\\$\\{[a-zA-Z0-9\\._]+\\})"). - -%% Space and CRLF --define(EX_WITHE_CHARS, "\\s"). - -type uri_string() :: iodata(). -type tmpl_token() :: list({var, binary()} | {str, binary()}). @@ -172,8 +167,8 @@ detect_sql_type(SQL) -> ) -> InsertSQL :: binary(). proc_batch_sql(BatchReqs, InsertPart, Tokens) -> ValuesPart = erlang:iolist_to_binary( - lists:join(", ", [ - emqx_plugin_libs_rule:proc_sql_param_str(Tokens, Msg) + lists:join($,, [ + proc_sql_param_str(Tokens, Msg) || {_, Msg} <- BatchReqs ]) ), diff --git a/apps/emqx_plugin_libs/test/emqx_placeholder_SUITE.erl b/apps/emqx_plugin_libs/test/emqx_placeholder_SUITE.erl index 6baaaefc6..fc431e80c 100644 --- a/apps/emqx_plugin_libs/test/emqx_placeholder_SUITE.erl +++ b/apps/emqx_plugin_libs/test/emqx_placeholder_SUITE.erl @@ -105,19 +105,27 @@ t_preproc_sql3(_) -> emqx_placeholder:proc_sql_param_str(ParamsTokens, Selected) ). -t_preproc_sql4(_) -> +t_preproc_mysql1(_) -> %% with apostrophes %% https://github.com/emqx/emqx/issues/4135 Selected = #{ a => <<"1''2">>, b => 1, c => 1.0, - d => #{d1 => <<"someone's phone">>} + d => #{d1 => <<"someone's phone">>}, + e => <<$\\, 0, "💩"/utf8>>, + f => <<"non-utf8", 16#DCC900:24>>, + g => "utf8's cool 🐸" }, - ParamsTokens = emqx_placeholder:preproc_tmpl(<<"a:${a},b:${b},c:${c},d:${d}">>), + ParamsTokens = emqx_placeholder:preproc_tmpl( + <<"a:${a},b:${b},c:${c},d:${d},e:${e},f:${f},g:${g}">> + ), ?assertEqual( - <<"a:'1\\'\\'2',b:1,c:1.0,d:'{\"d1\":\"someone\\'s phone\"}'">>, - emqx_placeholder:proc_sql_param_str(ParamsTokens, Selected) + << + "a:'1\\'\\'2',b:1,c:1.0,d:'{\"d1\":\"someone\\'s phone\"}'," + "e:'\\\\\\0💩',f:0x6E6F6E2D75746638DCC900,g:'utf8\\'s cool 🐸'"/utf8 + >>, + emqx_placeholder:proc_param_str(ParamsTokens, Selected, fun emqx_placeholder:quote_mysql/1) ). t_preproc_sql5(_) -> diff --git a/apps/emqx_prometheus/src/emqx_prometheus.app.src b/apps/emqx_prometheus/src/emqx_prometheus.app.src index 6970ba777..07ae38d75 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.app.src +++ b/apps/emqx_prometheus/src/emqx_prometheus.app.src @@ -2,7 +2,7 @@ {application, emqx_prometheus, [ {description, "Prometheus for EMQX"}, % strict semver, bump manually! - {vsn, "5.0.6"}, + {vsn, "5.0.7"}, {modules, []}, {registered, [emqx_prometheus_sup]}, {applications, [kernel, stdlib, prometheus, emqx, emqx_management]}, diff --git a/apps/emqx_prometheus/src/emqx_prometheus.erl b/apps/emqx_prometheus/src/emqx_prometheus.erl index 62e6f1d9a..60d52f58b 100644 --- a/apps/emqx_prometheus/src/emqx_prometheus.erl +++ b/apps/emqx_prometheus/src/emqx_prometheus.erl @@ -599,7 +599,8 @@ emqx_cluster() -> ]. emqx_cluster_data() -> - #{running_nodes := Running, stopped_nodes := Stopped} = mria_mnesia:cluster_info(), + Running = mria:cluster_nodes(running), + Stopped = mria:cluster_nodes(stopped), [ {nodes_running, length(Running)}, {nodes_stopped, length(Stopped)} diff --git a/apps/emqx_resource/src/emqx_resource.app.src b/apps/emqx_resource/src/emqx_resource.app.src index 0cc013099..7be1bcb1c 100644 --- a/apps/emqx_resource/src/emqx_resource.app.src +++ b/apps/emqx_resource/src/emqx_resource.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_resource, [ {description, "Manager for all external resources"}, - {vsn, "0.1.9"}, + {vsn, "0.1.10"}, {registered, []}, {mod, {emqx_resource_app, []}}, {applications, [ diff --git a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl index d5a50f351..711833963 100644 --- a/apps/emqx_resource/src/emqx_resource_buffer_worker.erl +++ b/apps/emqx_resource/src/emqx_resource_buffer_worker.erl @@ -920,6 +920,15 @@ do_call_query(_QM, _Id, _Index, _Ref, _Query, _QueryOpts, _Data) -> %% return `{error, {recoverable_error, Reason}}` EXPR catch + %% For convenience and to make the code in the callbacks cleaner an + %% error exception with the two following formats are translated to the + %% corresponding return values. The receiver of the return values + %% recognizes these special return formats and use them to decided if a + %% request should be retried. + error:{unrecoverable_error, Msg} -> + {error, {unrecoverable_error, Msg}}; + error:{recoverable_error, Msg} -> + {error, {recoverable_error, Msg}}; ERR:REASON:STACKTRACE -> ?RESOURCE_ERROR(exception, #{ name => NAME, diff --git a/apps/emqx_retainer/src/emqx_retainer.app.src b/apps/emqx_retainer/src/emqx_retainer.app.src index 8bdae6d7f..11013cdd3 100644 --- a/apps/emqx_retainer/src/emqx_retainer.app.src +++ b/apps/emqx_retainer/src/emqx_retainer.app.src @@ -2,7 +2,7 @@ {application, emqx_retainer, [ {description, "EMQX Retainer"}, % strict semver, bump manually! - {vsn, "5.0.10"}, + {vsn, "5.0.11"}, {modules, []}, {registered, [emqx_retainer_sup]}, {applications, [kernel, stdlib, emqx, emqx_ctl]}, diff --git a/apps/emqx_retainer/src/emqx_retainer_mnesia.erl b/apps/emqx_retainer/src/emqx_retainer_mnesia.erl index cadb9110f..2137d49f2 100644 --- a/apps/emqx_retainer/src/emqx_retainer_mnesia.erl +++ b/apps/emqx_retainer/src/emqx_retainer_mnesia.erl @@ -626,7 +626,7 @@ do_reindex_batch(QC, Done) -> {Status, Done + length(Topics)}. wait_dispatch_complete(Timeout) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), {Results, []} = emqx_retainer_proto_v2:wait_dispatch_complete(Nodes, Timeout), lists:all( fun(Result) -> Result =:= ok end, @@ -649,7 +649,7 @@ active_indices() -> {dirty_indices(read), dirty_indices(write)}. are_indices_updated(Indices) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), case emqx_retainer_proto_v2:active_mnesia_indices(Nodes) of {Results, []} -> lists:all( diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src index 4c924b824..1681297ec 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine.app.src +++ b/apps/emqx_rule_engine/src/emqx_rule_engine.app.src @@ -2,7 +2,7 @@ {application, emqx_rule_engine, [ {description, "EMQX Rule Engine"}, % strict semver, bump manually! - {vsn, "5.0.10"}, + {vsn, "5.0.11"}, {modules, []}, {registered, [emqx_rule_engine_sup, emqx_rule_engine]}, {applications, [kernel, stdlib, rulesql, getopt, emqx_ctl]}, diff --git a/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl b/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl index 62e1553d2..30de3e8e8 100644 --- a/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl +++ b/apps/emqx_rule_engine/src/emqx_rule_engine_api.erl @@ -529,7 +529,7 @@ get_rule_metrics(Id) -> end, [ Format(Node, emqx_plugin_libs_proto_v1:get_metrics(Node, rule_metrics, Id)) - || Node <- mria_mnesia:running_nodes() + || Node <- mria:running_nodes() ]. aggregate_metrics(AllMetrics) -> diff --git a/apps/emqx_slow_subs/src/emqx_slow_subs.app.src b/apps/emqx_slow_subs/src/emqx_slow_subs.app.src index 170a4bb02..7d3fc341d 100644 --- a/apps/emqx_slow_subs/src/emqx_slow_subs.app.src +++ b/apps/emqx_slow_subs/src/emqx_slow_subs.app.src @@ -1,7 +1,7 @@ {application, emqx_slow_subs, [ {description, "EMQX Slow Subscribers Statistics"}, % strict semver, bump manually! - {vsn, "1.0.3"}, + {vsn, "1.0.4"}, {modules, []}, {registered, [emqx_slow_subs_sup]}, {applications, [kernel, stdlib, emqx]}, diff --git a/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl b/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl index 8ebdd50c3..311bcf62e 100644 --- a/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl +++ b/apps/emqx_slow_subs/src/emqx_slow_subs_api.erl @@ -147,5 +147,5 @@ settings(put, #{body := Body}) -> end. rpc_call(Fun) -> - Nodes = mria_mnesia:running_nodes(), + Nodes = mria:running_nodes(), Fun(Nodes). diff --git a/apps/emqx_statsd/src/emqx_statsd.app.src b/apps/emqx_statsd/src/emqx_statsd.app.src index 67825162e..9d40c6857 100644 --- a/apps/emqx_statsd/src/emqx_statsd.app.src +++ b/apps/emqx_statsd/src/emqx_statsd.app.src @@ -1,7 +1,7 @@ %% -*- mode: erlang -*- {application, emqx_statsd, [ {description, "EMQX Statsd"}, - {vsn, "5.0.6"}, + {vsn, "5.0.7"}, {registered, []}, {mod, {emqx_statsd_app, []}}, {applications, [ diff --git a/apps/emqx_statsd/src/emqx_statsd.erl b/apps/emqx_statsd/src/emqx_statsd.erl index 75c15fa9e..c2e1819ac 100644 --- a/apps/emqx_statsd/src/emqx_statsd.erl +++ b/apps/emqx_statsd/src/emqx_statsd.erl @@ -53,9 +53,9 @@ -define(SAMPLE_TIMEOUT, sample_timeout). %% Remove after 5.1.x -start() -> check_multicall_result(emqx_statsd_proto_v1:start(mria_mnesia:running_nodes())). -stop() -> check_multicall_result(emqx_statsd_proto_v1:stop(mria_mnesia:running_nodes())). -restart() -> check_multicall_result(emqx_statsd_proto_v1:restart(mria_mnesia:running_nodes())). +start() -> check_multicall_result(emqx_statsd_proto_v1:start(mria:running_nodes())). +stop() -> check_multicall_result(emqx_statsd_proto_v1:stop(mria:running_nodes())). +restart() -> check_multicall_result(emqx_statsd_proto_v1:restart(mria:running_nodes())). do_start() -> emqx_statsd_sup:ensure_child_started(?APP). diff --git a/apps/emqx_statsd/test/emqx_statsd_SUITE.erl b/apps/emqx_statsd/test/emqx_statsd_SUITE.erl index a203ef7d5..bcc710050 100644 --- a/apps/emqx_statsd/test/emqx_statsd_SUITE.erl +++ b/apps/emqx_statsd/test/emqx_statsd_SUITE.erl @@ -33,6 +33,26 @@ "tags {\"t1\" = \"good\", test = 100}\n" "}\n" >>). +-define(BAD_CONF, << + "\n" + "statsd {\n" + "enable = true\n" + "flush_time_interval = 4s\n" + "sample_time_interval = 4s\n" + "server = \"\"\n" + "tags {\"t1\" = \"good\", test = 100}\n" + "}\n" +>>). + +-define(DEFAULT_CONF, << + "\n" + "statsd {\n" + "enable = true\n" + "flush_time_interval = 4s\n" + "sample_time_interval = 4s\n" + "tags {\"t1\" = \"good\", test = 100}\n" + "}\n" +>>). init_per_suite(Config) -> emqx_common_test_helpers:start_apps( @@ -55,6 +75,33 @@ set_special_configs(_) -> all() -> emqx_common_test_helpers:all(?MODULE). +t_server_validator(_) -> + Server0 = emqx_conf:get_raw([statsd, server]), + ?assertThrow( + #{ + kind := validation_error, + path := "statsd.server", + reason := "cannot_be_empty", + value := "" + }, + emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BAD_CONF, #{ + raw_with_default => true + }) + ), + %% default + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?DEFAULT_CONF, #{ + raw_with_default => true + }), + undefined = emqx_conf:get_raw([statsd, server], undefined), + ?assertMatch("127.0.0.1:8125", emqx_conf:get([statsd, server])), + %% recover + ok = emqx_common_test_helpers:load_config(emqx_statsd_schema, ?BASE_CONF, #{ + raw_with_default => true + }), + Server2 = emqx_conf:get_raw([statsd, server]), + ?assertMatch(Server0, Server2), + ok. + t_statsd(_) -> {ok, Socket} = gen_udp:open(8126, [{active, true}]), receive @@ -137,7 +184,16 @@ t_config_update(_) -> ?assertNotEqual(OldPid, NewPid) after {ok, _} = emqx_statsd_config:update(OldRawConf) - end. + end, + %% bad server url + BadRawConf = OldRawConf#{<<"server">> := <<"">>}, + {error, #{ + kind := validation_error, + path := "statsd.server", + reason := "cannot_be_empty", + value := "" + }} = emqx_statsd_config:update(BadRawConf), + ok. request(Method) -> request(Method, []). diff --git a/bin/emqx b/bin/emqx index b2bdded86..14f94f359 100755 --- a/bin/emqx +++ b/bin/emqx @@ -451,6 +451,20 @@ call_hocon() { || die "call_hocon_failed: $*" $? } +find_emqx_process() { + ## Find the running node from 'ps -ef' + ## * The grep args like '[e]mqx' but not 'emqx' is to avoid greping the grep command itself + ## * The running 'remsh' and 'nodetool' processes must be excluded + if [ -n "${EMQX_NODE__NAME:-}" ]; then + # if node name is provided, filter by node name + # shellcheck disable=SC2009 + ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -E "\s\-s?name\s${EMQX_NODE__NAME}" | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true + else + # shellcheck disable=SC2009 + ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true + fi +} + ## Resolve boot configs in a batch ## This is because starting the Erlang beam with all modules loaded ## and parsing HOCON config + environment variables is a non-trivial task @@ -468,17 +482,7 @@ fi # Turn off debug as the ps output can be quite noisy set +x -## Find the running node from 'ps -ef' -## * The grep args like '[e]mqx' but not 'emqx' is to avoid greping the grep command itself -## * The running 'remsh' and 'nodetool' processes must be excluded -if [ -n "${EMQX_NODE__NAME:-}" ]; then - # if node name is provided, filter by node name - # shellcheck disable=SC2009 - PS_LINE="$(ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -E "\s\-s?name\s${EMQX_NODE__NAME}" | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true)" -else - # shellcheck disable=SC2009 - PS_LINE="$(ps -ef | $GREP '[e]mqx' | $GREP -v -E '(remsh|nodetool)' | $GREP -oE "\-[r]oot ${RUNNER_ROOT_DIR}.*" || true)" -fi +PS_LINE="$(find_emqx_process)" logdebug "PS_LINE=$PS_LINE" RUNNING_NODES_COUNT="$(echo -e "$PS_LINE" | sed '/^\s*$/d' | wc -l)" [ "$RUNNING_NODES_COUNT" -gt 1 ] && logdebug "More than one running node found: count=$RUNNING_NODES_COUNT" @@ -927,6 +931,7 @@ case "$NAME" in NAME_TYPE='-sname' esac SHORT_NAME="$(echo "$NAME" | awk -F'@' '{print $1}')" +HOST_NAME="$(echo "$NAME" | awk -F'@' '{print $2}')" if ! (echo "$SHORT_NAME" | grep -q '^[0-9A-Za-z_\-]\+$'); then logerr "Invalid node name, should be of format '^[0-9A-Za-z_-]+$'." exit 1 @@ -969,6 +974,59 @@ if [[ "$IS_BOOT_COMMAND" == 'yes' && "$(get_boot_config 'node.db_backend')" == " fi fi +diagnose_boot_failure_and_die() { + local ps_line + ps_line="$(find_emqx_process)" + if [ -z "$ps_line" ]; then + echo "Find more information in the latest log file: ${RUNNER_LOG_DIR}/erlang.log.*" + exit 1 + fi + if ! relx_nodetool "ping" > /dev/null; then + logerr "$NAME seems to be running, but not responding to pings." + echo "Make sure '$HOST_NAME' is a resolvable and reachable hostname." + pipe_shutdown + exit 2 + fi + if ! relx_nodetool 'eval' 'true = emqx:is_running()' > /dev/null; then + logerr "$NAME node is started, but failed to complete the boot sequence in time." + echo "Please collect the logs in ${RUNNER_LOG_DIR} and report a bug to EMQX team at https://github.com/emqx/emqx/issues/new/choose" + pipe_shutdown + exit 3 + fi +} + +## Only works when started in daemon mode +pipe_shutdown() { + if [ -d "$PIPE_DIR" ]; then + echo "Shutting down $NAME from to_erl pipe." + ## can not evaluate init:stop() or erlang:halt() because the shell is restricted + echo 'emqx_machine:brutal_shutdown().' | "$BINDIR/to_erl" "$PIPE_DIR" + fi +} + +## Call nodetool to stop EMQX +nodetool_shutdown() { + # Wait for the node to completely stop... + PID="$(relx_get_pid)" + if ! relx_nodetool "stop"; then + die "Graceful shutdown failed PID=[$PID]" + fi + WAIT_TIME="${EMQX_WAIT_FOR_STOP:-120}" + if ! wait_for "$WAIT_TIME" 'is_down' "$PID"; then + msg="dangling after ${WAIT_TIME} seconds" + # also log to syslog + logger -t "${REL_NAME}[${PID}]" "STOP: $msg" + # log to user console + set +x + logerr "Stop failed, $msg" + echo "ERROR: $PID is still around" + ps -p "$PID" + exit 1 + fi + echo "ok" + logger -t "${REL_NAME}[${PID}]" "STOP: OK" +} + cd "$RUNNER_ROOT_DIR" case "${COMMAND}" in @@ -1014,33 +1072,15 @@ case "${COMMAND}" in echo "$EMQX_DESCRIPTION $REL_VSN is started successfully!" exit 0 else - echo "$EMQX_DESCRIPTION $REL_VSN failed to start in ${WAIT_TIME} seconds." - echo "Please find more information in erlang.log.N" - echo "Or run 'env DEBUG=1 $0 console' to have logs printed to console." - exit 1 + logerr "${EMQX_DESCRIPTION} ${REL_VSN} using node name '${NAME}' failed ${WAIT_TIME} probes." + diagnose_boot_failure_and_die fi ;; stop) - # Wait for the node to completely stop... - PID="$(relx_get_pid)" - if ! relx_nodetool "stop"; then - die "Graceful shutdown failed PID=[$PID]" + if ! nodetool_shutdown; then + pipe_shutdown fi - WAIT_TIME="${EMQX_WAIT_FOR_STOP:-120}" - if ! wait_for "$WAIT_TIME" 'is_down' "$PID"; then - msg="dangling after ${WAIT_TIME} seconds" - # also log to syslog - logger -t "${REL_NAME}[${PID}]" "STOP: $msg" - # log to user console - set +x - logerr "Stop failed, $msg" - echo "ERROR: $PID is still around" - ps -p "$PID" - exit 1 - fi - echo "ok" - logger -t "${REL_NAME}[${PID}]" "STOP: OK" ;; pid) @@ -1063,9 +1103,6 @@ case "${COMMAND}" in ;; attach) - assert_node_alive - - shift exec "$BINDIR/to_erl" "$PIPE_DIR" ;; diff --git a/build b/build index de00aba6c..76298f1ab 100755 --- a/build +++ b/build @@ -92,7 +92,7 @@ log() { } make_docs() { - local libs_dir1 libs_dir2 libs_dir3 + local libs_dir1 libs_dir2 libs_dir3 docdir dashboard_www_static libs_dir1="$("$FIND" "_build/$PROFILE/lib/" -maxdepth 2 -name ebin -type d)" if [ -d "_build/default/lib/" ]; then libs_dir2="$("$FIND" "_build/default/lib/" -maxdepth 2 -name ebin -type d)" @@ -112,12 +112,16 @@ make_docs() { SCHEMA_MODULE='emqx_conf_schema' ;; esac + docdir="_build/docgen/$PROFILE" + dashboard_www_static='apps/emqx_dashboard/priv/www/static/' + mkdir -p "$docdir" "$dashboard_www_static" # shellcheck disable=SC2086 erl -noshell -pa $libs_dir1 $libs_dir2 $libs_dir3 -eval \ - "Dir = filename:join([apps, emqx_dashboard, priv, www, static]), \ - I18nFile = filename:join([apps, emqx_dashboard, priv, 'i18n.conf']), \ - ok = emqx_conf:dump_schema(Dir, $SCHEMA_MODULE, I18nFile), \ + "I18nFile = filename:join([apps, emqx_dashboard, priv, 'i18n.conf']), \ + ok = emqx_conf:dump_schema('$docdir', $SCHEMA_MODULE, I18nFile), \ halt(0)." + cp "$docdir"/bridge-api-*.json "$dashboard_www_static" + cp "$docdir"/hot-config-schema-*.json "$dashboard_www_static" } assert_no_compile_time_only_deps() { diff --git a/changes/ce/feat-10022.en.md b/changes/ce/feat-10022.en.md new file mode 100644 index 000000000..61d027aa2 --- /dev/null +++ b/changes/ce/feat-10022.en.md @@ -0,0 +1 @@ +Start releasing Rocky Linux 9 (compatible with Enterprise Linux 9) and MacOS 12 packages diff --git a/changes/ce/feat-10022.zh.md b/changes/ce/feat-10022.zh.md new file mode 100644 index 000000000..970704f55 --- /dev/null +++ b/changes/ce/feat-10022.zh.md @@ -0,0 +1 @@ +开始发布Rocky Linux 9(与Enterprise Linux 9兼容)和MacOS 12软件包。 diff --git a/changes/ce/feat-9986.en.md b/changes/ce/feat-9986.en.md new file mode 100644 index 000000000..ee7a6be71 --- /dev/null +++ b/changes/ce/feat-9986.en.md @@ -0,0 +1 @@ +For helm charts, add MQTT ingress bridge; and removed stale `mgmt` references. diff --git a/changes/ce/feat-9986.zh.md b/changes/ce/feat-9986.zh.md new file mode 100644 index 000000000..a7f418587 --- /dev/null +++ b/changes/ce/feat-9986.zh.md @@ -0,0 +1 @@ +在 helm chart 中新增了 MQTT 桥接 ingress 的配置参数;并删除了旧版本遗留的 `mgmt` 配置。 diff --git a/changes/ce/fix-10013.en.md b/changes/ce/fix-10013.en.md new file mode 100644 index 000000000..ed7fa21eb --- /dev/null +++ b/changes/ce/fix-10013.en.md @@ -0,0 +1 @@ +Fix return type structure for error case in API schema for `/gateways/:name/clients`. diff --git a/changes/ce/fix-10013.zh.md b/changes/ce/fix-10013.zh.md new file mode 100644 index 000000000..171b79538 --- /dev/null +++ b/changes/ce/fix-10013.zh.md @@ -0,0 +1 @@ +修复 API `/gateways/:name/clients` 返回值的类型结构错误。 diff --git a/changes/ce/fix-10014.en.md b/changes/ce/fix-10014.en.md new file mode 100644 index 000000000..d52452bf9 --- /dev/null +++ b/changes/ce/fix-10014.en.md @@ -0,0 +1 @@ +In dashboard API for `/monitor(_current)/nodes/:node` return `404` instead of `400` if node does not exist. diff --git a/changes/ce/fix-10014.zh.md b/changes/ce/fix-10014.zh.md new file mode 100644 index 000000000..5e6a1660f --- /dev/null +++ b/changes/ce/fix-10014.zh.md @@ -0,0 +1 @@ +如果 API 查询的节点不存在,将会返回 404 而不再是 400。 diff --git a/changes/ce/fix-10027.en.md b/changes/ce/fix-10027.en.md new file mode 100644 index 000000000..531da1c50 --- /dev/null +++ b/changes/ce/fix-10027.en.md @@ -0,0 +1,2 @@ +Allow setting node name from `EMQX_NODE__NAME` when running in docker. +Prior to this fix, only `EMQX_NODE_NAME` is allowed. diff --git a/changes/ce/fix-10027.zh.md b/changes/ce/fix-10027.zh.md new file mode 100644 index 000000000..ee7055d6c --- /dev/null +++ b/changes/ce/fix-10027.zh.md @@ -0,0 +1,2 @@ +在 docker 中启动时,允许使用 `EMQX_NODE__NAME` 环境变量来配置节点名。 +在此修复前,只能使 `EMQX_NODE_NAME`。 diff --git a/changes/ce/fix-10050.en.md b/changes/ce/fix-10050.en.md new file mode 100644 index 000000000..c225c380d --- /dev/null +++ b/changes/ce/fix-10050.en.md @@ -0,0 +1 @@ +Ensure Bridge API returns `404` status code consistently for resources that don't exist. diff --git a/changes/ce/fix-10050.zh.md b/changes/ce/fix-10050.zh.md new file mode 100644 index 000000000..d7faf9434 --- /dev/null +++ b/changes/ce/fix-10050.zh.md @@ -0,0 +1 @@ +确保 Bridge API 对不存在的资源一致返回 `404` 状态代码。 diff --git a/changes/ce/fix-10052.en.md b/changes/ce/fix-10052.en.md new file mode 100644 index 000000000..f83c4d40c --- /dev/null +++ b/changes/ce/fix-10052.en.md @@ -0,0 +1,12 @@ +Improve daemon mode startup failure logs. + +Before this change, it was difficult for users to understand the reason for EMQX 'start' command failed to boot the node. +The only information they received was that the node did not start within the expected time frame, +and they were instructed to boot the node with 'console' command in the hope of obtaining some logs. +However, the node might actually be running, which could cause 'console' mode to fail for a different reason. + +With this new change, when daemon mode fails to boot, a diagnosis is issued. Here are the possible scenarios: + +* If the node cannot be found from `ps -ef`, the user is instructed to find information in log files `erlang.log.*`. +* If the node is found to be running but not responding to pings, the user is advised to check if the host name is resolvable and reachable. +* If the node is responding to pings, but the EMQX app is not running, it is likely a bug. In this case, the user is advised to report a Github issue. diff --git a/changes/ce/fix-10052.zh.md b/changes/ce/fix-10052.zh.md new file mode 100644 index 000000000..1c2eff342 --- /dev/null +++ b/changes/ce/fix-10052.zh.md @@ -0,0 +1,11 @@ +优化 EMQX daemon 模式启动启动失败的日志。 + +在进行此更改之前,当 EMQX 用 `start` 命令启动失败时,用户很难理解出错的原因。 +所知道的仅仅是节点未能在预期时间内启动,然后被指示以 `console` 式引导节点以获取一些日志。 +然而,节点实际上可能正在运行,这可能会导致 `console` 模式因不同的原因而失败。 + +此次修复后,启动脚本会发出诊断: + +* 如果无法从 `ps -ef` 中找到节点,则指示用户在 `erlang.log.*` 中查找信息。 +* 如果发现节点正在运行但不响应 ping,则建议用户检查节点主机名是否有效并可达。 +* 如果节点响应 ping 但 EMQX 应用程序未运行,则很可能是一个错误。在这种情况下,建议用户报告一个Github issue。 diff --git a/changes/ce/fix-10055.en.md b/changes/ce/fix-10055.en.md new file mode 100644 index 000000000..4ffaae195 --- /dev/null +++ b/changes/ce/fix-10055.en.md @@ -0,0 +1 @@ +Fix `mqtt.max_awaiting_rel` change does not work. diff --git a/changes/ce/fix-10055.zh.md b/changes/ce/fix-10055.zh.md new file mode 100644 index 000000000..4da371c51 --- /dev/null +++ b/changes/ce/fix-10055.zh.md @@ -0,0 +1 @@ +修复 `mqtt.max_awaiting_rel` 更新不生效问题。 diff --git a/changes/ce/fix-10056.en.md b/changes/ce/fix-10056.en.md new file mode 100644 index 000000000..ab9b980e8 --- /dev/null +++ b/changes/ce/fix-10056.en.md @@ -0,0 +1 @@ +`/bridges` API: return `400` instead of `403` in case of inconsistency in the application logic either because bridge is about to be deleted, but active rules still depend on it, or an operation (start|stop|restart) is called, but the bridge is not enabled. diff --git a/changes/ce/fix-10056.zh.md b/changes/ce/fix-10056.zh.md new file mode 100644 index 000000000..4d3317165 --- /dev/null +++ b/changes/ce/fix-10056.zh.md @@ -0,0 +1 @@ +`/bridges` API:在应用逻辑不一致的情况下,返回`400'而不是`403',因为桥即将被删除,但活动规则仍然依赖于它,或者调用了一个操作(启动|停止|重新启动),但桥没有被启用。 diff --git a/changes/ce/fix-10066.en.md b/changes/ce/fix-10066.en.md new file mode 100644 index 000000000..2d23ad5b9 --- /dev/null +++ b/changes/ce/fix-10066.en.md @@ -0,0 +1 @@ +Return human readable error message for `/briges_probe` and `[/node/:node]/bridges/:id/:operation` API calls and set HTTP status code to `400` instead of `500`. diff --git a/changes/ce/fix-10066.zh.md b/changes/ce/fix-10066.zh.md new file mode 100644 index 000000000..c72f21ff1 --- /dev/null +++ b/changes/ce/fix-10066.zh.md @@ -0,0 +1 @@ +为 `/briges_probe` 和 `[/node/:node]/bridges/:id/:operation` 的 API 调用返回人类可读的错误信息,并将 HTTP 状态代码设置为 `400` 而不是 `500`。 diff --git a/changes/ce/fix-10074.en.md b/changes/ce/fix-10074.en.md new file mode 100644 index 000000000..49c52b948 --- /dev/null +++ b/changes/ce/fix-10074.en.md @@ -0,0 +1 @@ +Check if type in `PUT /authorization/sources/:type` matches `type` given in body of request. diff --git a/changes/ce/fix-10074.zh.md b/changes/ce/fix-10074.zh.md new file mode 100644 index 000000000..930840cdf --- /dev/null +++ b/changes/ce/fix-10074.zh.md @@ -0,0 +1 @@ +检查 `PUT /authorization/sources/:type` 中的类型是否与请求正文中的 `type` 相符。 diff --git a/changes/ce/fix-10079.en.md b/changes/ce/fix-10079.en.md new file mode 100644 index 000000000..440351753 --- /dev/null +++ b/changes/ce/fix-10079.en.md @@ -0,0 +1 @@ +Fix description of `shared_subscription_strategy`. diff --git a/changes/ce/fix-10079.zh.md b/changes/ce/fix-10079.zh.md new file mode 100644 index 000000000..ca2ab9173 --- /dev/null +++ b/changes/ce/fix-10079.zh.md @@ -0,0 +1,2 @@ +修正对 `shared_subscription_strategy` 的描述。 + diff --git a/changes/ce/fix-10085.en.md b/changes/ce/fix-10085.en.md new file mode 100644 index 000000000..e539a04b4 --- /dev/null +++ b/changes/ce/fix-10085.en.md @@ -0,0 +1 @@ +Consistently return `404` for all requests on non existent source in `/authorization/sources/:source[/*]`. diff --git a/changes/ce/fix-10085.zh.md b/changes/ce/fix-10085.zh.md new file mode 100644 index 000000000..059680efa --- /dev/null +++ b/changes/ce/fix-10085.zh.md @@ -0,0 +1 @@ +如果向 `/authorization/sources/:source[/*]` 请求的 `source` 不存在,将一致地返回 `404`。 diff --git a/changes/ce/fix-10098.en.md b/changes/ce/fix-10098.en.md new file mode 100644 index 000000000..61058da0a --- /dev/null +++ b/changes/ce/fix-10098.en.md @@ -0,0 +1 @@ +A crash with an error in the log file that happened when the MongoDB authorization module queried the database has been fixed. diff --git a/changes/ce/fix-10098.zh.md b/changes/ce/fix-10098.zh.md new file mode 100644 index 000000000..6b6d86159 --- /dev/null +++ b/changes/ce/fix-10098.zh.md @@ -0,0 +1 @@ +当MongoDB授权模块查询数据库时,在日志文件中发生的崩溃与错误已经被修复。 diff --git a/changes/ce/fix-10100.en.md b/changes/ce/fix-10100.en.md new file mode 100644 index 000000000..002fb6f08 --- /dev/null +++ b/changes/ce/fix-10100.en.md @@ -0,0 +1 @@ +Fix channel crash for slow clients with enhanced authentication. diff --git a/changes/ce/fix-10100.zh.md b/changes/ce/fix-10100.zh.md new file mode 100644 index 000000000..6adb5e7e1 --- /dev/null +++ b/changes/ce/fix-10100.zh.md @@ -0,0 +1 @@ +修复响应较慢的客户端在使用增强认证时可能出现崩溃的问题。 diff --git a/changes/ce/fix-10107.en.md b/changes/ce/fix-10107.en.md new file mode 100644 index 000000000..1bcbbad60 --- /dev/null +++ b/changes/ce/fix-10107.en.md @@ -0,0 +1,9 @@ +For operations on `bridges API` if `bridge-id` is unknown we now return `404` +instead of `400`. Also a bug was fixed that caused a crash if that was a node +operation. Additionally we now also check if the given bridge is enabled when +doing the cluster operation `start` . Affected endpoints: + * [cluster] `/bridges/:id/:operation`, + * [node] `/nodes/:node/bridges/:id/:operation`, where `operation` is one of +`[start|stop|restart]`. +Moreover, for a node operation, EMQX checks if node name is in our cluster and +return `404` instead of `501`. diff --git a/changes/ce/fix-10107.zh.md b/changes/ce/fix-10107.zh.md new file mode 100644 index 000000000..e541a834f --- /dev/null +++ b/changes/ce/fix-10107.zh.md @@ -0,0 +1,8 @@ +现在对桥接的 API 进行调用时,如果 `bridge-id` 不存在,将会返回 `404`,而不再是`400`。 +然后,还修复了这种情况下,在节点级别上进行 API 调用时,可能导致崩溃的问题。 +另外,在启动某个桥接时,会先检查指定桥接是否已启用。 +受影响的接口有: + * [cluster] `/bridges/:id/:operation`, + * [node] `/nodes/:node/bridges/:id/:operation`, +其中 `operation` 是 `[start|stop|restart]` 之一。 +此外,对于节点操作,EMQX 将检查节点是否存在于集群中,如果不在,则会返回`404`,而不再是`501`。 diff --git a/changes/ce/fix-10118.en.md b/changes/ce/fix-10118.en.md new file mode 100644 index 000000000..dd6b5129f --- /dev/null +++ b/changes/ce/fix-10118.en.md @@ -0,0 +1,4 @@ +Fix problems related to manual joining of EMQX replicant nodes to the cluster. +Previously, manually joining and then leaving the cluster rendered replicant node unable to start EMQX again and required a node restart. + +[Mria PR](https://github.com/emqx/mria/pull/128) diff --git a/changes/ce/fix-10118.zh.md b/changes/ce/fix-10118.zh.md new file mode 100644 index 000000000..4334a5bba --- /dev/null +++ b/changes/ce/fix-10118.zh.md @@ -0,0 +1,4 @@ +修复与手动加入 EMQX `replicant` 节点到集群有关的问题。 +以前,手动加入然后离开集群会使 `replicant` 节点无法再次启动 EMQX,需要重新启动节点。 + +[Mria PR](https://github.com/emqx/mria/pull/128) diff --git a/changes/ce/fix-10119.en.md b/changes/ce/fix-10119.en.md new file mode 100644 index 000000000..c23a9dcdb --- /dev/null +++ b/changes/ce/fix-10119.en.md @@ -0,0 +1 @@ +Fix crash when `statsd.server` is set to an empty string. diff --git a/changes/ce/fix-10119.zh.md b/changes/ce/fix-10119.zh.md new file mode 100644 index 000000000..c77b99025 --- /dev/null +++ b/changes/ce/fix-10119.zh.md @@ -0,0 +1 @@ +修复 `statsd.server` 配置为空字符串时启动崩溃的问题。 diff --git a/changes/ce/fix-10124.en.md b/changes/ce/fix-10124.en.md new file mode 100644 index 000000000..1a4aca3d9 --- /dev/null +++ b/changes/ce/fix-10124.en.md @@ -0,0 +1 @@ +The default heartbeat period for MongoDB has been increased to reduce the risk of too excessive logging to the MongoDB log file. diff --git a/changes/ce/fix-10124.zh.md b/changes/ce/fix-10124.zh.md new file mode 100644 index 000000000..7605f2da3 --- /dev/null +++ b/changes/ce/fix-10124.zh.md @@ -0,0 +1 @@ +增加了MongoDB的默认心跳周期,以减少对MongoDB日志文件的过多记录的风险。 diff --git a/changes/ce/fix-10132.en.md b/changes/ce/fix-10132.en.md new file mode 100644 index 000000000..ceb617d11 --- /dev/null +++ b/changes/ce/fix-10132.en.md @@ -0,0 +1 @@ +Fix `systemctl stop emqx` command not stopping jq, os_mon application properly, generating some error logs. diff --git a/changes/ce/fix-10132.zh.md b/changes/ce/fix-10132.zh.md new file mode 100644 index 000000000..36811e1bf --- /dev/null +++ b/changes/ce/fix-10132.zh.md @@ -0,0 +1 @@ +修复`systemctl stop emqx` 命令没有正常停止 jq, os_mon 组件,产生一些错误日志。 diff --git a/changes/ce/perf-9998.en.md b/changes/ce/perf-9998.en.md new file mode 100644 index 000000000..e9e23a25e --- /dev/null +++ b/changes/ce/perf-9998.en.md @@ -0,0 +1 @@ +Redact the HTTP request body in the authentication error logs for security reasons. diff --git a/changes/ce/perf-9998.zh.md b/changes/ce/perf-9998.zh.md new file mode 100644 index 000000000..146eb858f --- /dev/null +++ b/changes/ce/perf-9998.zh.md @@ -0,0 +1 @@ +出于安全原因,在身份验证错误日志中模糊 HTTP 请求正文。 diff --git a/changes/ee/feat-10083.en.md b/changes/ee/feat-10083.en.md new file mode 100644 index 000000000..635549d5e --- /dev/null +++ b/changes/ee/feat-10083.en.md @@ -0,0 +1 @@ +Integrate `DynamoDB` into `bridges` as a new backend. diff --git a/changes/ee/feat-10083.zh.md b/changes/ee/feat-10083.zh.md new file mode 100644 index 000000000..061e2e416 --- /dev/null +++ b/changes/ee/feat-10083.zh.md @@ -0,0 +1 @@ +在 `桥接` 中集成 `DynamoDB`。 diff --git a/changes/ee/feat-9881.en.md b/changes/ee/feat-9881.en.md new file mode 100644 index 000000000..546178965 --- /dev/null +++ b/changes/ee/feat-9881.en.md @@ -0,0 +1,4 @@ +In this pull request, we have enhanced the error logs related to InfluxDB connectivity health checks. +Previously, if InfluxDB failed to pass the health checks using the specified parameters, the only message provided was "timed out waiting for it to become healthy". +With the updated implementation, the error message will be displayed in both the logs and the dashboard, enabling easier identification and resolution of the issue. + diff --git a/changes/ee/feat-9881.zh.md b/changes/ee/feat-9881.zh.md new file mode 100644 index 000000000..9746a4c0a --- /dev/null +++ b/changes/ee/feat-9881.zh.md @@ -0,0 +1,3 @@ +增强了与 InfluxDB 连接健康检查相关的错误日志。 +在此更改之前,如果使用配置的参数 InfluxDB 未能通过健康检查,用户仅能获得一个“超时”的信息。 +现在,详细的错误消息将显示在日志和控制台,从而让用户更容易地识别和解决问题。 diff --git a/changes/ee/fix-10095.en.md b/changes/ee/fix-10095.en.md new file mode 100644 index 000000000..49c588345 --- /dev/null +++ b/changes/ee/fix-10095.en.md @@ -0,0 +1,3 @@ +Stop MySQL client from bombarding server repeatedly with unnecessary `PREPARE` queries on every batch, trashing the server and exhausting its internal limits. This was happening when the MySQL bridge was in the batch mode. + +Ensure safer and more careful escaping of strings and binaries in batch insert queries when the MySQL bridge is in the batch mode. diff --git a/changes/ee/fix-10095.zh.md b/changes/ee/fix-10095.zh.md new file mode 100644 index 000000000..5a62ccfca --- /dev/null +++ b/changes/ee/fix-10095.zh.md @@ -0,0 +1 @@ +优化 MySQL 桥接在批量模式下能更高效的使用预处理语句 ,减少了对 MySQL 服务器的查询压力, 并确保对 SQL 语句进行更安全和谨慎的转义。 diff --git a/changes/v5.0.16/feat-9893.en.md b/changes/v5.0.16/feat-9893.en.md new file mode 100644 index 000000000..590d82a0f --- /dev/null +++ b/changes/v5.0.16/feat-9893.en.md @@ -0,0 +1 @@ +When connecting with the flag `clean_start=false`, the new session will filter out banned messages from the `mqueue` before deliver. diff --git a/changes/v5.0.16/feat-9893.zh.md b/changes/v5.0.16/feat-9893.zh.md new file mode 100644 index 000000000..30286a679 --- /dev/null +++ b/changes/v5.0.16/feat-9893.zh.md @@ -0,0 +1 @@ +当使用 `clean_start=false` 标志连接时,新会话将在传递之前从 `mqueue` 中过滤掉被封禁的消息。 diff --git a/changes/v5.0.18.en.md b/changes/v5.0.18.en.md index e6694773d..15ba9ff84 100644 --- a/changes/v5.0.18.en.md +++ b/changes/v5.0.18.en.md @@ -7,7 +7,9 @@ - [#9213](https://github.com/emqx/emqx/pull/9213) Add pod disruption budget to helm chart - [#9949](https://github.com/emqx/emqx/pull/9949) QUIC transport Multistreams support and QUIC TLS cacert support. - + +- [#9966](https://github.com/emqx/emqx/pull/9966) Add two new Erlang apps 'tools' and 'covertool' to the release. + So we can run profiling and test coverage analysis on release packages. - [#9967](https://github.com/emqx/emqx/pull/9967) New common TLS option 'hibernate_after' to reduce memory footprint per idle connecion, default: 5s. diff --git a/changes/v5.0.18.zh.md b/changes/v5.0.18.zh.md index d88c823d7..e186da913 100644 --- a/changes/v5.0.18.zh.md +++ b/changes/v5.0.18.zh.md @@ -8,6 +8,9 @@ - [#9949](https://github.com/emqx/emqx/pull/9949) QUIC 传输多流支持和 QUIC TLS cacert 支持。 +- [#9966](https://github.com/emqx/emqx/pull/9966) 在发布包中增加了2个新的 Erlang app,分别是 ‘tools’ 和 ‘covertool’。 + 这两个 app 可以用于性能和测试覆盖率的分析。 + - [#9967](https://github.com/emqx/emqx/pull/9967) 新的通用 TLS 选项 'hibernate_after', 以减少空闲连接的内存占用,默认: 5s 。 ## 修复 diff --git a/changes/v5.0.18/fix-9966.en.md b/changes/v5.0.18/fix-9966.en.md deleted file mode 100644 index cc3a0bb8f..000000000 --- a/changes/v5.0.18/fix-9966.en.md +++ /dev/null @@ -1,2 +0,0 @@ -Add two new Erlang apps 'tools' and 'covertool' to the release. -So we can run profiling and test coverage analysis on release packages. diff --git a/changes/v5.0.18/fix-9966.zh.md b/changes/v5.0.18/fix-9966.zh.md deleted file mode 100644 index df5b7cff7..000000000 --- a/changes/v5.0.18/fix-9966.zh.md +++ /dev/null @@ -1,2 +0,0 @@ -在发布包中增加了2个新的 Erlang app,分别是 ‘tools’ 和 ‘covertool’。 -这两个 app 可以用于性能和测试覆盖率的分析。 diff --git a/deploy/charts/emqx-enterprise/Chart.yaml b/deploy/charts/emqx-enterprise/Chart.yaml index 8474a00b0..4b5382090 100644 --- a/deploy/charts/emqx-enterprise/Chart.yaml +++ b/deploy/charts/emqx-enterprise/Chart.yaml @@ -14,8 +14,8 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. -version: 5.0.1 +version: 5.0.2 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. -appVersion: 5.0.1 +appVersion: 5.0.2 diff --git a/deploy/charts/emqx-enterprise/templates/service-monitor.yaml b/deploy/charts/emqx-enterprise/templates/service-monitor.yaml index 3d97ab351..b37798a59 100644 --- a/deploy/charts/emqx-enterprise/templates/service-monitor.yaml +++ b/deploy/charts/emqx-enterprise/templates/service-monitor.yaml @@ -11,7 +11,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} {{- if .Values.service.annotations }} annotations: - {{ toYaml .Values.service.annotations | indent 4 }} + {{- toYaml .Values.service.annotations | nindent 4 }} {{- end }} spec: endpoints: diff --git a/deploy/charts/emqx-enterprise/templates/service.yaml b/deploy/charts/emqx-enterprise/templates/service.yaml index 233e69b10..401746a51 100644 --- a/deploy/charts/emqx-enterprise/templates/service.yaml +++ b/deploy/charts/emqx-enterprise/templates/service.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} {{- if .Values.service.annotations }} annotations: -{{ toYaml .Values.service.annotations | indent 4 }} + {{- toYaml .Values.service.annotations | nindent 4 }} {{- end }} spec: type: {{ .Values.service.type }} diff --git a/deploy/charts/emqx/templates/service-monitor.yaml b/deploy/charts/emqx/templates/service-monitor.yaml index 3d97ab351..b37798a59 100644 --- a/deploy/charts/emqx/templates/service-monitor.yaml +++ b/deploy/charts/emqx/templates/service-monitor.yaml @@ -11,7 +11,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} {{- if .Values.service.annotations }} annotations: - {{ toYaml .Values.service.annotations | indent 4 }} + {{- toYaml .Values.service.annotations | nindent 4 }} {{- end }} spec: endpoints: diff --git a/deploy/charts/emqx/templates/service.yaml b/deploy/charts/emqx/templates/service.yaml index 233e69b10..401746a51 100644 --- a/deploy/charts/emqx/templates/service.yaml +++ b/deploy/charts/emqx/templates/service.yaml @@ -10,7 +10,7 @@ metadata: app.kubernetes.io/managed-by: {{ .Release.Service }} {{- if .Values.service.annotations }} annotations: -{{ toYaml .Values.service.annotations | indent 4 }} + {{- toYaml .Values.service.annotations | nindent 4 }} {{- end }} spec: type: {{ .Values.service.type }} diff --git a/deploy/docker/README.md b/deploy/docker/README.md index e6067a203..6a37fe979 100644 --- a/deploy/docker/README.md +++ b/deploy/docker/README.md @@ -50,48 +50,42 @@ The EMQX broker runs as Linux user `emqx` in the docker container. All EMQX Configuration in [`etc/emqx.conf`](https://github.com/emqx/emqx/blob/master/apps/emqx/etc/emqx.conf) can be configured via environment variables. -By default, the environment variables with `EMQX_` prefix are mapped to key-value pairs in configuration files. - -You can change the prefix by overriding `HOCON_ENV_OVERRIDE_PREFIX`. +The environment variables with `EMQX_` prefix are mapped to key-value pairs in configuration files. Example: ```bash -EMQX_LISTENERS__SSL__DEFAULT__ACCEPTORS <--> listeners.ssl.default.acceptors -EMQX_ZONES__DEFAULT__MQTT__MAX_PACKET_SIZE <--> zones.default.mqtt.max_packet_size +EMQX_DASHBOARD__DEFAULT_PASSWORD <--> dashboard.default_password +EMQX_NODE__COOKIE <--> node.cookie +EMQX_LISTENERS__SSL__default__ENABLE <--> listeners.ssl.default.enable ``` +Note: The lowercase use of 'default' is not a typo. It is used to demonstrate that lowercase environment variables are equivalent. + Prefix `EMQX_` is removed + All upper case letters is replaced with lower case letters + `__` is replaced with `.` -If `HOCON_ENV_OVERRIDE_PREFIX=DEV_` is set: - -```bash -DEV_LISTENER__SSL__EXTERNAL__ACCEPTORS <--> listener.ssl.external.acceptors -DEV_MQTT__MAX_PACKET_SIZE <--> mqtt.max_packet_size -DEV_LISTENERS__TCP__DEFAULT__BIND <--> listeners.tcp.default.bind -``` - For example, set MQTT TCP port to 1883 ```console -$ docker run -d --name emqx -e DEV_LISTENERS__TCP__DEFAULT__BIND=1883 -p 18083:18083 -p 1883:1883 emqx/emqx:latest +$ docker run -d --name emqx -e EMQX_DASHBOARD__DEFAULT_PASSWORD=mysecret -p 18083:18083 -p 1883:1883 emqx/emqx:latest ``` -Please read more about EMQX configuration in the [official documentation](https://www.emqx.io/docs/en/v5.0/admin/cfg.html). +Please read more about EMQX configuration in the [official documentation](https://www.emqx.io/docs/en/v5.0/configuration/configuration.html) #### EMQX node name configuration -| Options | Default | Mapped | Description | -| ---------------------------| ------------------ | ------------------------- | ------------------------------------- | -| `EMQX_NAME` | container name | none | EMQX node short name | -| `EMQX_HOST` | container IP | none | EMQX node host, IP or FQDN | +A node name consists of two parts, `EMQX_NAME` part and `EMQX_HOST` part connected by a the symbol `@`. For example: `emqx@127.0.0.1`. -These environment variables are used during container startup phase only in [docker-entrypoint.sh](./docker-entrypoint.sh). +Environment variables `EMQX_NODE_NAME` or `EMQX_NODE__NAME` can be used to set a EMQX node name. +If neither of them is set, EMQX will resolve its node name from the running environment or other environment varialbes used for node discovery. -If `EMQX_NAME` and `EMQX_HOST` are set, and `EMQX_NODE_NAME` is not set, `EMQX_NODE_NAME=$EMQX_NAME@$EMQX_HOST`. -Otherwise `EMQX_NODE_NAME` is taken verbatim. +When running in docker, by default, `EMQX_NAME` and `EMQX_HOST` are resolved as below: + +| Options | Default | Description | +| -------------| --------------- | -----------------------------| +| `EMQX_NAME` | container name | EMQX node short name | +| `EMQX_HOST` | container IP | EMQX node host, IP or FQDN | ### Cluster @@ -108,8 +102,7 @@ Let's create a static node list cluster from docker-compose. emqx1: image: emqx/emqx:latest environment: - - "EMQX_NAME=emqx" - - "EMQX_HOST=node1.emqx.io" + - "EMQX_NODE_NAME=emqx@node1.emqx.io" - "EMQX_CLUSTER__DISCOVERY_STRATEGY=static" - "EMQX_CLUSTER__STATIC__SEEDS=[emqx@node1.emqx.io, emqx@node2.emqx.io]" networks: @@ -120,8 +113,7 @@ Let's create a static node list cluster from docker-compose. emqx2: image: emqx/emqx:latest environment: - - "EMQX_NAME=emqx" - - "EMQX_HOST=node2.emqx.io" + - "EMQX_NODE_NAME=emqx@node2.emqx.io" - "EMQX_CLUSTER__DISCOVERY_STRATEGY=static" - "EMQX_CLUSTER__STATIC__SEEDS=[emqx@node1.emqx.io, emqx@node2.emqx.io]" networks: @@ -174,8 +166,7 @@ services: image: emqx/emqx:latest restart: always environment: - EMQX_NAME: foo_emqx - EMQX_HOST: 127.0.0.1 + EMQX_NODE_NAME: foo_emqx@127.0.0.1 volumes: - vol-emqx-data:/opt/emqx/data - vol-emqx-etc:/opt/emqx/etc diff --git a/deploy/docker/docker-entrypoint.sh b/deploy/docker/docker-entrypoint.sh index 1c18ef829..1824e1ee0 100755 --- a/deploy/docker/docker-entrypoint.sh +++ b/deploy/docker/docker-entrypoint.sh @@ -18,28 +18,31 @@ LOCAL_IP=$(hostname -i | grep -oE '((25[0-5]|(2[0-4]|1[0-9]|[1-9]|)[0-9])\.){3}( export EMQX_NAME="${EMQX_NAME:-emqx}" -if [[ -z "$EMQX_HOST" ]]; then - if [[ "$EMQX_CLUSTER__DISCOVERY_STRATEGY" == "dns" ]] && \ - [[ "$EMQX_CLUSTER__DNS__RECORD_TYPE" == "srv" ]] && \ - grep -q "$(hostname).$EMQX_CLUSTER__DNS__NAME" /etc/hosts; then - EMQX_HOST="$(hostname).$EMQX_CLUSTER__DNS__NAME" - elif [[ "$EMQX_CLUSTER__DISCOVERY_STRATEGY" == "k8s" ]] && \ - [[ "$EMQX_CLUSTER__K8S__ADDRESS_TYPE" == "dns" ]] && \ - [[ -n "$EMQX_CLUSTER__K8S__NAMESPACE" ]]; then - EMQX_CLUSTER__K8S__SUFFIX=${EMQX_CLUSTER__K8S__SUFFIX:-"pod.cluster.local"} - EMQX_HOST="${LOCAL_IP//./-}.$EMQX_CLUSTER__K8S__NAMESPACE.$EMQX_CLUSTER__K8S__SUFFIX" - elif [[ "$EMQX_CLUSTER__DISCOVERY_STRATEGY" == "k8s" ]] && \ - [[ "$EMQX_CLUSTER__K8S__ADDRESS_TYPE" == 'hostname' ]] && \ - [[ -n "$EMQX_CLUSTER__K8S__NAMESPACE" ]]; then - EMQX_CLUSTER__K8S__SUFFIX=${EMQX_CLUSTER__K8S__SUFFIX:-'svc.cluster.local'} - EMQX_HOST=$(grep -h "^$LOCAL_IP" /etc/hosts | grep -o "$(hostname).*.$EMQX_CLUSTER__K8S__NAMESPACE.$EMQX_CLUSTER__K8S__SUFFIX") - else - EMQX_HOST="$LOCAL_IP" +## EMQX_NODE_NAME or EMQX_NODE__NAME to indicate the full node name to be used by EMQX +## If both are set EMQX_NODE_NAME takes higher precedence than EMQX_NODE__NAME +if [[ -z "${EMQX_NODE_NAME:-}" ]] && [[ -z "${EMQX_NODE__NAME:-}" ]]; then + # No node name is provide from environment variables + # try to resolve from other settings + if [[ -z "$EMQX_HOST" ]]; then + if [[ "$EMQX_CLUSTER__DISCOVERY_STRATEGY" == "dns" ]] && \ + [[ "$EMQX_CLUSTER__DNS__RECORD_TYPE" == "srv" ]] && \ + grep -q "$(hostname).$EMQX_CLUSTER__DNS__NAME" /etc/hosts; then + EMQX_HOST="$(hostname).$EMQX_CLUSTER__DNS__NAME" + elif [[ "$EMQX_CLUSTER__DISCOVERY_STRATEGY" == "k8s" ]] && \ + [[ "$EMQX_CLUSTER__K8S__ADDRESS_TYPE" == "dns" ]] && \ + [[ -n "$EMQX_CLUSTER__K8S__NAMESPACE" ]]; then + EMQX_CLUSTER__K8S__SUFFIX=${EMQX_CLUSTER__K8S__SUFFIX:-"pod.cluster.local"} + EMQX_HOST="${LOCAL_IP//./-}.$EMQX_CLUSTER__K8S__NAMESPACE.$EMQX_CLUSTER__K8S__SUFFIX" + elif [[ "$EMQX_CLUSTER__DISCOVERY_STRATEGY" == "k8s" ]] && \ + [[ "$EMQX_CLUSTER__K8S__ADDRESS_TYPE" == 'hostname' ]] && \ + [[ -n "$EMQX_CLUSTER__K8S__NAMESPACE" ]]; then + EMQX_CLUSTER__K8S__SUFFIX=${EMQX_CLUSTER__K8S__SUFFIX:-'svc.cluster.local'} + EMQX_HOST=$(grep -h "^$LOCAL_IP" /etc/hosts | grep -o "$(hostname).*.$EMQX_CLUSTER__K8S__NAMESPACE.$EMQX_CLUSTER__K8S__SUFFIX") + else + EMQX_HOST="$LOCAL_IP" + fi + export EMQX_HOST fi - export EMQX_HOST -fi - -if [[ -z "$EMQX_NODE_NAME" ]]; then export EMQX_NODE_NAME="$EMQX_NAME@$EMQX_HOST" fi diff --git a/lib-ee/emqx_ee_bridge/docker-ct b/lib-ee/emqx_ee_bridge/docker-ct index 967faa343..ac1728ad2 100644 --- a/lib-ee/emqx_ee_bridge/docker-ct +++ b/lib-ee/emqx_ee_bridge/docker-ct @@ -8,3 +8,5 @@ redis redis_cluster pgsql tdengine +clickhouse +dynamo diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_clickhouse.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_clickhouse.conf new file mode 100644 index 000000000..6a28b371a --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_clickhouse.conf @@ -0,0 +1,109 @@ +emqx_ee_bridge_clickhouse { + + local_topic { + desc { + en: """The MQTT topic filter to be forwarded to Clickhouse. All MQTT 'PUBLISH' messages with the topic +matching the local_topic will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also local_topic is +configured, then both the data got from the rule and the MQTT messages that match local_topic +will be forwarded. +""" + zh: """发送到 'local_topic' 的消息都会转发到 Clickhouse。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。 +""" + } + label { + en: "Local Topic" + zh: "本地 Topic" + } + } + sql_template { + desc { + en: """SQL Template. The template string can contain placeholders +for message metadata and payload field. The placeholders are inserted +without any checking and special formatting, so it is important to +ensure that the inserted values are formatted and escaped correctly.""" + zh: + """SQL模板。模板字符串可以包含消息元数据和有效载荷字段的占位符。占位符 +的插入不需要任何检查和特殊格式化,因此必须确保插入的数值格式化和转义正确。模板字符串可以包含占位符 +模板字符串可以包含消息元数据和有效载荷字段的占位符。这些占位符被插入 +所以必须确保插入的值的格式正确。因此,确保插入的值格式化和转义正确是非常重要的。模板字符串可以包含占位符 +模板字符串可以包含消息元数据和有效载荷字段的占位符。这些占位符被插入 +所以必须确保插入的值的格式正确。确保插入的值被正确地格式化和转义。""" + } + label { + en: "SQL Template" + zh: "SQL 模板" + } + } + batch_value_separator { + desc { + en: """The bridge repeats what comes after the VALUES or FORMAT FormatType in the +SQL template to form a batch request. The value specified with +this parameter will be inserted between the values. The default +value ',' works for the VALUES format, but other values +might be needed if you specify some other format with the +clickhouse FORMAT syntax. + +See https://clickhouse.com/docs/en/sql-reference/statements/insert-into/ and +https://clickhouse.com/docs/en/interfaces/formats#formats for more information about +the format syntax and the available formats.""" + zh: """桥接会重复 VALUES 或 FORMAT 格式类型之后的内容。中 VALUES 或 +FORMAT FormatType 后面的内容,以形成一个批处理请求。用这个参数指定的值 +这个参数指定的值将被插入到这些值之间。默认的 +默认值','适用于VALUES格式,但是如果你指定了其他的格式,可能需要其他的值。可能需要其他值,如果你用 +"clickhouse FORMAT "语法指定其他格式。语法指定其他格式。 + +参见https://clickhouse.com/docs/en/sql-reference/statements/insert-into/ 和 +https://clickhouse.com/docs/en/interfaces/formats#formats 了解更多关于 +格式语法和可用的格式。""" + } + label { + en: "Batch Value Separator" + zh: "批量值分离器" + } + } + config_enable { + desc { + en: """Enable or disable this bridge""" + zh: """启用/禁用桥接""" + } + label { + en: "Enable Or Disable Bridge" + zh: "启用/禁用桥接" + } + } + + desc_config { + desc { + en: """Configuration for a Clickhouse bridge.""" + zh: """Clickhouse 桥接配置""" + } + label: { + en: "Clickhouse Bridge Configuration" + zh: "Clickhouse 桥接配置" + } + } + + desc_type { + desc { + en: """The Bridge Type""" + zh: """Bridge 类型""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + + desc_name { + desc { + en: """Bridge name.""" + zh: """桥接名字""" + } + label { + en: "Bridge Name" + zh: "桥接名字" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_dynamo.conf b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_dynamo.conf new file mode 100644 index 000000000..664b13174 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/i18n/emqx_ee_bridge_dynamo.conf @@ -0,0 +1,72 @@ +emqx_ee_bridge_dynamo { + + local_topic { + desc { + en: """The MQTT topic filter to be forwarded to DynamoDB. All MQTT `PUBLISH` messages with the topic +matching the `local_topic` will be forwarded.
+NOTE: if this bridge is used as the action of a rule (EMQX rule engine), and also `local_topic` is +configured, then both the data got from the rule and the MQTT messages that match `local_topic` +will be forwarded.""" + zh: """发送到 'local_topic' 的消息都会转发到 DynamoDB。
+注意:如果这个 Bridge 被用作规则(EMQX 规则引擎)的输出,同时也配置了 'local_topic' ,那么这两部分的消息都会被转发。""" + } + label { + en: "Local Topic" + zh: "本地 Topic" + } + } + + template { + desc { + en: """Template, the default value is empty. When this value is empty the whole message will be stored in the database""" + zh: """模板, 默认为空,为空时将会将整个消息存入数据库""" + } + label { + en: "Template" + zh: "模板" + } + } + config_enable { + desc { + en: """Enable or disable this bridge""" + zh: """启用/禁用桥接""" + } + label { + en: "Enable Or Disable Bridge" + zh: "启用/禁用桥接" + } + } + + desc_config { + desc { + en: """Configuration for an DynamoDB bridge.""" + zh: """DynamoDB 桥接配置""" + } + label: { + en: "DynamoDB Bridge Configuration" + zh: "DynamoDB 桥接配置" + } + } + + desc_type { + desc { + en: """The Bridge Type""" + zh: """Bridge 类型""" + } + label { + en: "Bridge Type" + zh: "桥接类型" + } + } + + desc_name { + desc { + en: """Bridge name.""" + zh: """桥接名字""" + } + label { + en: "Bridge Name" + zh: "桥接名字" + } + } +} diff --git a/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_acked.json b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_acked.json new file mode 100644 index 000000000..6ede088a4 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_acked.json @@ -0,0 +1,15 @@ +{ + "TableName": "mqtt_acked", + "KeySchema": [ + { "AttributeName": "topic", "KeyType": "HASH" }, + { "AttributeName": "clientid", "KeyType": "RANGE" } + ], + "AttributeDefinitions": [ + { "AttributeName": "topic", "AttributeType": "S" }, + { "AttributeName": "clientid", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_client.json b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_client.json new file mode 100644 index 000000000..ce1b7d267 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_client.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_client", + "KeySchema": [ + { "AttributeName": "clientid", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "clientid", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_clientid_msg_map.json b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_clientid_msg_map.json new file mode 100644 index 000000000..fd703c664 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_clientid_msg_map.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_clientid_msg_map", + "KeySchema": [ + { "AttributeName": "clientid", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "clientid", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_msg.json b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_msg.json new file mode 100644 index 000000000..ad94b8f72 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_msg.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_msg", + "KeySchema": [ + { "AttributeName": "id", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "id", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_retain.json b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_retain.json new file mode 100644 index 000000000..2a0af2e86 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_retain.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_retain", + "KeySchema": [ + { "AttributeName": "topic", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "topic", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_sub.json b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_sub.json new file mode 100644 index 000000000..9a559f048 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_sub.json @@ -0,0 +1,16 @@ +{ + "TableName": "mqtt_sub", + "KeySchema": [ + { "AttributeName": "clientid", "KeyType": "HASH" }, + { "AttributeName": "topic", "KeyType": "RANGE" } + ], + "AttributeDefinitions": [ + { "AttributeName": "clientid", "AttributeType": "S" }, + { "AttributeName": "topic", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} + diff --git a/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_topic_msg_map.json b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_topic_msg_map.json new file mode 100644 index 000000000..effd4b4b9 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_topic_msg_map.json @@ -0,0 +1,13 @@ +{ + "TableName": "mqtt_topic_msg_map", + "KeySchema": [ + { "AttributeName": "topic", "KeyType": "HASH" } + ], + "AttributeDefinitions": [ + { "AttributeName": "topic", "AttributeType": "S" } + ], + "ProvisionedThroughput": { + "ReadCapacityUnits": 5, + "WriteCapacityUnits": 5 + } +} diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src index 05d893a79..ac181b251 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.app.src @@ -1,6 +1,6 @@ {application, emqx_ee_bridge, [ {description, "EMQX Enterprise data bridges"}, - {vsn, "0.1.6"}, + {vsn, "0.1.7"}, {registered, []}, {applications, [ kernel, diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl index 1a358fdfe..b5c656291 100644 --- a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge.erl @@ -29,7 +29,9 @@ api_schemas(Method) -> ref(emqx_ee_bridge_redis, Method ++ "_cluster"), ref(emqx_ee_bridge_timescale, Method), ref(emqx_ee_bridge_matrix, Method), - ref(emqx_ee_bridge_tdengine, Method) + ref(emqx_ee_bridge_tdengine, Method), + ref(emqx_ee_bridge_clickhouse, Method), + ref(emqx_ee_bridge_dynamo, Method) ]. schema_modules() -> @@ -44,7 +46,9 @@ schema_modules() -> emqx_ee_bridge_pgsql, emqx_ee_bridge_timescale, emqx_ee_bridge_matrix, - emqx_ee_bridge_tdengine + emqx_ee_bridge_tdengine, + emqx_ee_bridge_clickhouse, + emqx_ee_bridge_dynamo ]. examples(Method) -> @@ -75,7 +79,9 @@ resource_type(redis_cluster) -> emqx_ee_connector_redis; resource_type(pgsql) -> emqx_connector_pgsql; resource_type(timescale) -> emqx_connector_pgsql; resource_type(matrix) -> emqx_connector_pgsql; -resource_type(tdengine) -> emqx_ee_connector_tdengine. +resource_type(tdengine) -> emqx_ee_connector_tdengine; +resource_type(clickhouse) -> emqx_ee_connector_clickhouse; +resource_type(dynamo) -> emqx_ee_connector_dynamo. fields(bridges) -> [ @@ -118,8 +124,17 @@ fields(bridges) -> desc => <<"TDengine Bridge Config">>, required => false } + )}, + {dynamo, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_dynamo, "config")), + #{ + desc => <<"Dynamo Bridge Config">>, + required => false + } )} - ] ++ mongodb_structs() ++ influxdb_structs() ++ redis_structs() ++ pgsql_structs(). + ] ++ mongodb_structs() ++ influxdb_structs() ++ redis_structs() ++ pgsql_structs() ++ + clickhouse_structs(). mongodb_structs() -> [ @@ -183,3 +198,15 @@ pgsql_structs() -> {matrix, <<"Matrix">>} ] ]. + +clickhouse_structs() -> + [ + {clickhouse, + mk( + hoconsc:map(name, ref(emqx_ee_bridge_clickhouse, "config")), + #{ + desc => <<"Clickhouse Bridge Config">>, + required => false + } + )} + ]. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_clickhouse.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_clickhouse.erl new file mode 100644 index 000000000..9e03aca4a --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_clickhouse.erl @@ -0,0 +1,143 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_clickhouse). + +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_SQL, + <<"INSERT INTO mqtt_test(payload, arrived) VALUES ('${payload}', ${timestamp})">> +). + +-define(DEFAULT_BATCH_VALUE_SEPARATOR, <<", ">>). + +%% ------------------------------------------------------------------------------------------------- +%% Callback used by HTTP API +%% ------------------------------------------------------------------------------------------------- + +conn_bridge_examples(Method) -> + [ + #{ + <<"clickhouse">> => #{ + summary => <<"Clickhouse Bridge">>, + value => values(Method, "clickhouse") + } + } + ]. + +values(get, Type) -> + maps:merge(values(post, Type), ?METRICS_EXAMPLE); +values(post, Type) -> + #{ + enable => true, + type => Type, + name => <<"foo">>, + server => <<"127.0.0.1:8123">>, + database => <<"mqtt">>, + pool_size => 8, + username => <<"default">>, + password => <<"public">>, + sql => ?DEFAULT_SQL, + batch_value_separator => ?DEFAULT_BATCH_VALUE_SEPARATOR, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => async, + max_queue_bytes => ?DEFAULT_QUEUE_SIZE + } + }; +values(put, Type) -> + values(post, Type). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +%% ------------------------------------------------------------------------------------------------- + +namespace() -> "bridge_clickhouse". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {sql, + mk( + binary(), + #{desc => ?DESC("sql_template"), default => ?DEFAULT_SQL, format => <<"sql">>} + )}, + {batch_value_separator, + mk( + binary(), + #{desc => ?DESC("batch_value_separator"), default => ?DEFAULT_BATCH_VALUE_SEPARATOR} + )}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )}, + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ] ++ + emqx_ee_connector_clickhouse:fields(config); +fields("creation_opts") -> + Opts = emqx_resource_schema:fields("creation_opts"), + [O || {Field, _} = O <- Opts, not is_hidden_opts(Field)]; +fields("post") -> + fields("post", clickhouse); +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +fields("post", Type) -> + [type_field(Type), name_field() | fields("config")]. + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for Clickhouse using `", string:to_upper(Method), "` method."]; +desc("creation_opts" = Name) -> + emqx_resource_schema:desc(Name); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- +%% internal +%% ------------------------------------------------------------------------------------------------- +is_hidden_opts(Field) -> + lists:member(Field, [ + async_inflight_window + ]). + +type_field(Type) -> + {type, mk(enum([Type]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_dynamo.erl b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_dynamo.erl new file mode 100644 index 000000000..066b873ce --- /dev/null +++ b/lib-ee/emqx_ee_bridge/src/emqx_ee_bridge_dynamo.erl @@ -0,0 +1,122 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- +-module(emqx_ee_bridge_dynamo). + +-include_lib("typerefl/include/types.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("emqx_bridge/include/emqx_bridge.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-export([ + conn_bridge_examples/1, + values/1 +]). + +-export([ + namespace/0, + roots/0, + fields/1, + desc/1 +]). + +-define(DEFAULT_TEMPLATE, <<>>). + +%% ------------------------------------------------------------------------------------------------- +%% api + +conn_bridge_examples(Method) -> + [ + #{ + <<"dynamo">> => #{ + summary => <<"DynamoDB Bridge">>, + value => values(Method) + } + } + ]. + +values(get) -> + maps:merge(values(post), ?METRICS_EXAMPLE); +values(post) -> + #{ + enable => true, + type => dynamo, + name => <<"foo">>, + url => <<"http://127.0.0.1:8000">>, + database => <<"mqtt">>, + pool_size => 8, + username => <<"root">>, + password => <<"public">>, + template => ?DEFAULT_TEMPLATE, + local_topic => <<"local/topic/#">>, + resource_opts => #{ + worker_pool_size => 8, + health_check_interval => ?HEALTHCHECK_INTERVAL_RAW, + auto_restart_interval => ?AUTO_RESTART_INTERVAL_RAW, + batch_size => ?DEFAULT_BATCH_SIZE, + batch_time => ?DEFAULT_BATCH_TIME, + query_mode => sync, + max_queue_bytes => ?DEFAULT_QUEUE_SIZE + } + }; +values(put) -> + values(post). + +%% ------------------------------------------------------------------------------------------------- +%% Hocon Schema Definitions +namespace() -> "bridge_dynamo". + +roots() -> []. + +fields("config") -> + [ + {enable, mk(boolean(), #{desc => ?DESC("config_enable"), default => true})}, + {template, + mk( + binary(), + #{desc => ?DESC("template"), default => ?DEFAULT_TEMPLATE} + )}, + {local_topic, + mk( + binary(), + #{desc => ?DESC("local_topic"), default => undefined} + )}, + {resource_opts, + mk( + ref(?MODULE, "creation_opts"), + #{ + required => false, + default => #{}, + desc => ?DESC(emqx_resource_schema, <<"resource_opts">>) + } + )} + ] ++ + (emqx_ee_connector_dynamo:fields(config) -- + emqx_connector_schema_lib:prepare_statement_fields()); +fields("creation_opts") -> + emqx_resource_schema:fields("creation_opts"); +fields("post") -> + [type_field(), name_field() | fields("config")]; +fields("put") -> + fields("config"); +fields("get") -> + emqx_bridge_schema:status_fields() ++ fields("post"). + +desc("config") -> + ?DESC("desc_config"); +desc(Method) when Method =:= "get"; Method =:= "put"; Method =:= "post" -> + ["Configuration for PostgreSQL using `", string:to_upper(Method), "` method."]; +desc("creation_opts" = Name) -> + emqx_resource_schema:desc(Name); +desc(_) -> + undefined. + +%% ------------------------------------------------------------------------------------------------- + +type_field() -> + {type, mk(enum([dynamo]), #{required => true, desc => ?DESC("desc_type")})}. + +name_field() -> + {name, mk(binary(), #{required => true, desc => ?DESC("desc_name")})}. diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_clickhouse_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_clickhouse_SUITE.erl new file mode 100644 index 000000000..6d4762882 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_clickhouse_SUITE.erl @@ -0,0 +1,325 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_ee_bridge_clickhouse_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-define(CLICKHOUSE_HOST, "clickhouse"). +-define(CLICKHOUSE_RESOURCE_MOD, emqx_ee_connector_clickhouse). +-include_lib("emqx_connector/include/emqx_connector.hrl"). + +%% See comment in +%% lib-ee/emqx_ee_connector/test/ee_connector_clickhouse_SUITE.erl for how to +%% run this without bringing up the whole CI infrastucture + +%%------------------------------------------------------------------------------ +%% Common Test Setup, Teardown and Testcase List +%%------------------------------------------------------------------------------ + +init_per_suite(Config) -> + case + emqx_common_test_helpers:is_tcp_server_available(?CLICKHOUSE_HOST, ?CLICKHOUSE_DEFAULT_PORT) + of + true -> + emqx_common_test_helpers:render_and_load_app_config(emqx_conf), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + {ok, _} = application:ensure_all_started(emqx_ee_connector), + {ok, _} = application:ensure_all_started(emqx_ee_bridge), + snabbkaffe:fix_ct_logging(), + %% Create the db table + Conn = start_clickhouse_connection(), + % erlang:monitor,sb + {ok, _, _} = clickhouse:query(Conn, sql_create_database(), #{}), + {ok, _, _} = clickhouse:query(Conn, sql_create_table(), []), + clickhouse:query(Conn, sql_find_key(42), []), + [{clickhouse_connection, Conn} | Config]; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_clickhouse); + _ -> + {skip, no_clickhouse} + end + end. + +start_clickhouse_connection() -> + %% Start clickhouse connector in sub process so that it does not go + %% down with the process that is calling init_per_suite + InitPerSuiteProcess = self(), + erlang:spawn( + fun() -> + {ok, Conn} = + clickhouse:start_link([ + {url, clickhouse_url()}, + {user, <<"default">>}, + {key, "public"}, + {pool, tmp_pool} + ]), + InitPerSuiteProcess ! {clickhouse_connection, Conn}, + Ref = erlang:monitor(process, Conn), + receive + {'DOWN', Ref, process, _, _} -> + erlang:display(helper_down), + ok + end + end + ), + receive + {clickhouse_connection, C} -> C + end. + +end_per_suite(Config) -> + ClickhouseConnection = proplists:get_value(clickhouse_connection, Config), + clickhouse:stop(ClickhouseConnection), + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector), + _ = application:stop(emqx_ee_connector), + _ = application:stop(emqx_bridge). + +init_per_testcase(_, Config) -> + reset_table(Config), + Config. + +end_per_testcase(_, Config) -> + reset_table(Config), + ok. + +all() -> + emqx_common_test_helpers:all(?MODULE). + +%%------------------------------------------------------------------------------ +%% Helper functions for test cases +%%------------------------------------------------------------------------------ + +sql_insert_template_for_bridge() -> + "INSERT INTO mqtt_test(key, data, arrived) VALUES " + "(${key}, '${data}', ${timestamp})". + +sql_insert_template_for_bridge_json() -> + "INSERT INTO mqtt_test(key, data, arrived) FORMAT JSONCompactEachRow " + "[${key}, \\\"${data}\\\", ${timestamp}]". + +sql_create_table() -> + "CREATE TABLE IF NOT EXISTS mqtt.mqtt_test (key BIGINT, data String, arrived BIGINT) ENGINE = Memory". + +sql_find_key(Key) -> + io_lib:format("SELECT key FROM mqtt.mqtt_test WHERE key = ~p", [Key]). + +sql_find_all_keys() -> + "SELECT key FROM mqtt.mqtt_test". + +sql_drop_table() -> + "DROP TABLE IF EXISTS mqtt.mqtt_test". + +sql_create_database() -> + "CREATE DATABASE IF NOT EXISTS mqtt". + +clickhouse_url() -> + erlang:iolist_to_binary([ + <<"http://">>, + ?CLICKHOUSE_HOST, + ":", + erlang:integer_to_list(?CLICKHOUSE_DEFAULT_PORT) + ]). + +clickhouse_config(Config) -> + SQL = maps:get(sql, Config, sql_insert_template_for_bridge()), + BatchSeparator = maps:get(batch_value_separator, Config, <<", ">>), + BatchSize = maps:get(batch_size, Config, 1), + BatchTime = maps:get(batch_time_ms, Config, 0), + EnableBatch = maps:get(enable_batch, Config, true), + Name = atom_to_binary(?MODULE), + URL = clickhouse_url(), + ConfigString = + io_lib:format( + "bridges.clickhouse.~s {\n" + " enable = true\n" + " url = \"~s\"\n" + " database = \"mqtt\"\n" + " sql = \"~s\"\n" + " batch_value_separator = \"~s\"" + " resource_opts = {\n" + " enable_batch = ~w\n" + " batch_size = ~b\n" + " batch_time = ~bms\n" + " }\n" + "}\n", + [ + Name, + URL, + SQL, + BatchSeparator, + EnableBatch, + BatchSize, + BatchTime + ] + ), + ct:pal(ConfigString), + parse_and_check(ConfigString, <<"clickhouse">>, Name). + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := RetConfig}}} = RawConf, + RetConfig. + +make_bridge(Config) -> + Type = <<"clickhouse">>, + Name = atom_to_binary(?MODULE), + BridgeConfig = clickhouse_config(Config), + {ok, _} = emqx_bridge:create( + Type, + Name, + BridgeConfig + ), + emqx_bridge_resource:bridge_id(Type, Name). + +delete_bridge() -> + Type = <<"clickhouse">>, + Name = atom_to_binary(?MODULE), + {ok, _} = emqx_bridge:remove(Type, Name), + ok. + +reset_table(Config) -> + ClickhouseConnection = proplists:get_value(clickhouse_connection, Config), + {ok, _, _} = clickhouse:query(ClickhouseConnection, sql_drop_table(), []), + {ok, _, _} = clickhouse:query(ClickhouseConnection, sql_create_table(), []), + ok. + +check_key_in_clickhouse(AttempsLeft, Key, Config) -> + ClickhouseConnection = proplists:get_value(clickhouse_connection, Config), + check_key_in_clickhouse(AttempsLeft, Key, none, ClickhouseConnection). + +check_key_in_clickhouse(Key, Config) -> + ClickhouseConnection = proplists:get_value(clickhouse_connection, Config), + check_key_in_clickhouse(30, Key, none, ClickhouseConnection). + +check_key_in_clickhouse(0, Key, PrevResult, _) -> + ct:fail("Expected ~p in database but got ~s", [Key, PrevResult]); +check_key_in_clickhouse(AttempsLeft, Key, _, ClickhouseConnection) -> + {ok, 200, ResultString} = clickhouse:query(ClickhouseConnection, sql_find_key(Key), []), + Expected = erlang:integer_to_binary(Key), + case iolist_to_binary(string:trim(ResultString)) of + Expected -> + ok; + SomethingElse -> + timer:sleep(100), + check_key_in_clickhouse(AttempsLeft - 1, Key, SomethingElse, ClickhouseConnection) + end. + +%%------------------------------------------------------------------------------ +%% Test Cases +%%------------------------------------------------------------------------------ + +t_make_delete_bridge(_Config) -> + make_bridge(#{}), + %% Check that the new brige is in the list of bridges + Bridges = emqx_bridge:list(), + Name = atom_to_binary(?MODULE), + IsRightName = + fun + (#{name := BName}) when BName =:= Name -> + true; + (_) -> + false + end, + true = lists:any(IsRightName, Bridges), + delete_bridge(), + BridgesAfterDelete = emqx_bridge:list(), + false = lists:any(IsRightName, BridgesAfterDelete), + ok. + +t_send_message_query(Config) -> + BridgeID = make_bridge(#{enable_batch => false}), + Key = 42, + Payload = #{key => Key, data => <<"clickhouse_data">>, timestamp => 10000}, + %% This will use the SQL template included in the bridge + emqx_bridge:send_message(BridgeID, Payload), + %% Check that the data got to the database + check_key_in_clickhouse(Key, Config), + delete_bridge(), + ok. + +t_send_simple_batch(Config) -> + send_simple_batch_helper(Config, #{}). + +t_send_simple_batch_alternative_format(Config) -> + send_simple_batch_helper( + Config, + #{ + sql => sql_insert_template_for_bridge_json(), + batch_value_separator => <<"">> + } + ). + +send_simple_batch_helper(Config, BridgeConfigExt) -> + BridgeConf = maps:merge( + #{ + batch_size => 100, + enable_batch => true + }, + BridgeConfigExt + ), + BridgeID = make_bridge(BridgeConf), + Key = 42, + Payload = #{key => Key, data => <<"clickhouse_data">>, timestamp => 10000}, + %% This will use the SQL template included in the bridge + emqx_bridge:send_message(BridgeID, Payload), + check_key_in_clickhouse(Key, Config), + delete_bridge(), + ok. + +t_heavy_batching(Config) -> + heavy_batching_helper(Config, #{}). + +t_heavy_batching_alternative_format(Config) -> + heavy_batching_helper( + Config, + #{ + sql => sql_insert_template_for_bridge_json(), + batch_value_separator => <<"">> + } + ). + +heavy_batching_helper(Config, BridgeConfigExt) -> + ClickhouseConnection = proplists:get_value(clickhouse_connection, Config), + NumberOfMessages = 10000, + BridgeConf = maps:merge( + #{ + batch_size => 743, + batch_time_ms => 50, + enable_batch => true + }, + BridgeConfigExt + ), + BridgeID = make_bridge(BridgeConf), + SendMessageKey = fun(Key) -> + Payload = #{ + key => Key, + data => <<"clickhouse_data">>, + timestamp => 10000 + }, + emqx_bridge:send_message(BridgeID, Payload) + end, + [SendMessageKey(Key) || Key <- lists:seq(1, NumberOfMessages)], + % Wait until the last message is in clickhouse + %% The delay between attempts is 100ms so 150 attempts means 15 seconds + check_key_in_clickhouse(_AttemptsToFindKey = 150, NumberOfMessages, Config), + %% In case the messages are not sent in order (could happend with multiple buffer workers) + timer:sleep(1000), + {ok, 200, ResultString1} = clickhouse:query(ClickhouseConnection, sql_find_all_keys(), []), + ResultString2 = iolist_to_binary(string:trim(ResultString1)), + KeyStrings = string:lexemes(ResultString2, "\n"), + Keys = [erlang:binary_to_integer(iolist_to_binary(K)) || K <- KeyStrings], + KeySet = maps:from_keys(Keys, true), + NumberOfMessages = maps:size(KeySet), + CheckKey = fun(Key) -> maps:get(Key, KeySet, false) end, + true = lists:all(CheckKey, lists:seq(1, NumberOfMessages)), + delete_bridge(), + ok. diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_dynamo_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_dynamo_SUITE.erl new file mode 100644 index 000000000..26666c6d8 --- /dev/null +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_dynamo_SUITE.erl @@ -0,0 +1,422 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2022-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_ee_bridge_dynamo_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include_lib("eunit/include/eunit.hrl"). +-include_lib("common_test/include/ct.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +% DB defaults +-define(TABLE, "mqtt"). +-define(TABLE_BIN, to_bin(?TABLE)). +-define(USERNAME, "root"). +-define(PASSWORD, "public"). +-define(HOST, "dynamo"). +-define(PORT, 8000). +-define(SCHEMA, "http://"). +-define(BATCH_SIZE, 10). +-define(PAYLOAD, <<"HELLO">>). + +-define(GET_CONFIG(KEY__, CFG__), proplists:get_value(KEY__, CFG__)). + +%%------------------------------------------------------------------------------ +%% CT boilerplate +%%------------------------------------------------------------------------------ + +all() -> + [ + {group, with_batch}, + {group, without_batch}, + {group, flaky} + ]. + +groups() -> + TCs0 = emqx_common_test_helpers:all(?MODULE), + + %% due to the poorly implemented driver or other reasons + %% if we mix these cases with others, this suite will become flaky. + Flaky = [t_get_status, t_write_failure, t_write_timeout], + TCs = TCs0 -- Flaky, + + [ + {with_batch, TCs}, + {without_batch, TCs}, + {flaky, Flaky} + ]. + +init_per_group(with_batch, Config0) -> + Config = [{batch_size, ?BATCH_SIZE} | Config0], + common_init(Config); +init_per_group(without_batch, Config0) -> + Config = [{batch_size, 1} | Config0], + common_init(Config); +init_per_group(flaky, Config0) -> + Config = [{batch_size, 1} | Config0], + common_init(Config); +init_per_group(_Group, Config) -> + Config. + +end_per_group(Group, Config) when Group =:= with_batch; Group =:= without_batch -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok; +end_per_group(Group, Config) when Group =:= flaky -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + timer:sleep(1000), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + emqx_mgmt_api_test_util:end_suite(), + ok = emqx_common_test_helpers:stop_apps([emqx_bridge, emqx_conf]), + ok. + +init_per_testcase(_Testcase, Config) -> + create_table(Config), + Config. + +end_per_testcase(_Testcase, Config) -> + ProxyHost = ?config(proxy_host, Config), + ProxyPort = ?config(proxy_port, Config), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + ok = snabbkaffe:stop(), + delete_table(Config), + delete_bridge(Config), + ok. + +%%------------------------------------------------------------------------------ +%% Helper fns +%%------------------------------------------------------------------------------ + +common_init(ConfigT) -> + Host = os:getenv("DYNAMO_HOST", "toxiproxy"), + Port = list_to_integer(os:getenv("DYNAMO_PORT", "8000")), + + Config0 = [ + {host, Host}, + {port, Port}, + {query_mode, sync}, + {proxy_name, "dynamo"} + | ConfigT + ], + + BridgeType = proplists:get_value(bridge_type, Config0, <<"dynamo">>), + case emqx_common_test_helpers:is_tcp_server_available(Host, Port) of + true -> + % Setup toxiproxy + ProxyHost = os:getenv("PROXY_HOST", "toxiproxy"), + ProxyPort = list_to_integer(os:getenv("PROXY_PORT", "8474")), + emqx_common_test_helpers:reset_proxy(ProxyHost, ProxyPort), + % Ensure EE bridge module is loaded + _ = application:load(emqx_ee_bridge), + _ = emqx_ee_bridge:module_info(), + ok = emqx_common_test_helpers:start_apps([emqx_conf, emqx_bridge]), + emqx_mgmt_api_test_util:init_suite(), + % setup dynamo + setup_dynamo(Config0), + {Name, TDConf} = dynamo_config(BridgeType, Config0), + Config = + [ + {dynamo_config, TDConf}, + {dynamo_bridge_type, BridgeType}, + {dynamo_name, Name}, + {proxy_host, ProxyHost}, + {proxy_port, ProxyPort} + | Config0 + ], + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_dynamo); + _ -> + {skip, no_dynamo} + end + end. + +dynamo_config(BridgeType, Config) -> + Port = integer_to_list(?GET_CONFIG(port, Config)), + Url = "http://" ++ ?GET_CONFIG(host, Config) ++ ":" ++ Port, + Name = atom_to_binary(?MODULE), + BatchSize = ?GET_CONFIG(batch_size, Config), + QueryMode = ?GET_CONFIG(query_mode, Config), + ConfigString = + io_lib:format( + "bridges.~s.~s {\n" + " enable = true\n" + " url = ~p\n" + " database = ~p\n" + " username = ~p\n" + " password = ~p\n" + " resource_opts = {\n" + " request_timeout = 500ms\n" + " batch_size = ~b\n" + " query_mode = ~s\n" + " }\n" + "}", + [ + BridgeType, + Name, + Url, + ?TABLE, + ?USERNAME, + ?PASSWORD, + BatchSize, + QueryMode + ] + ), + {Name, parse_and_check(ConfigString, BridgeType, Name)}. + +parse_and_check(ConfigString, BridgeType, Name) -> + {ok, RawConf} = hocon:binary(ConfigString, #{format => map}), + hocon_tconf:check_plain(emqx_bridge_schema, RawConf, #{required => false, atom_key => false}), + #{<<"bridges">> := #{BridgeType := #{Name := Config}}} = RawConf, + Config. + +create_bridge(Config) -> + BridgeType = ?config(dynamo_bridge_type, Config), + Name = ?config(dynamo_name, Config), + TDConfig = ?config(dynamo_config, Config), + emqx_bridge:create(BridgeType, Name, TDConfig). + +delete_bridge(Config) -> + BridgeType = ?config(dynamo_bridge_type, Config), + Name = ?config(dynamo_name, Config), + emqx_bridge:remove(BridgeType, Name). + +create_bridge_http(Params) -> + Path = emqx_mgmt_api_test_util:api_path(["bridges"]), + AuthHeader = emqx_mgmt_api_test_util:auth_header_(), + case emqx_mgmt_api_test_util:request_api(post, Path, "", AuthHeader, Params) of + {ok, Res} -> {ok, emqx_json:decode(Res, [return_maps])}; + Error -> Error + end. + +send_message(Config, Payload) -> + Name = ?config(dynamo_name, Config), + BridgeType = ?config(dynamo_bridge_type, Config), + BridgeID = emqx_bridge_resource:bridge_id(BridgeType, Name), + emqx_bridge:send_message(BridgeID, Payload). + +query_resource(Config, Request) -> + Name = ?config(dynamo_name, Config), + BridgeType = ?config(dynamo_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + emqx_resource:query(ResourceID, Request, #{timeout => 1_000}). + +%% create a table, use the lib-ee/emqx_ee_bridge/priv/dynamo/mqtt_msg.json as template +create_table(Config) -> + directly_setup_dynamo(), + delete_table(Config), + ?assertMatch( + {ok, _}, + erlcloud_ddb2:create_table( + ?TABLE_BIN, + [{<<"id">>, s}], + <<"id">>, + [{provisioned_throughput, {5, 5}}] + ) + ). + +delete_table(_Config) -> + erlcloud_ddb2:delete_table(?TABLE_BIN). + +setup_dynamo(Config) -> + Host = ?GET_CONFIG(host, Config), + Port = ?GET_CONFIG(port, Config), + erlcloud_ddb2:configure(?USERNAME, ?PASSWORD, Host, Port, ?SCHEMA). + +directly_setup_dynamo() -> + erlcloud_ddb2:configure(?USERNAME, ?PASSWORD, ?HOST, ?PORT, ?SCHEMA). + +directly_query(Query) -> + directly_setup_dynamo(), + emqx_ee_connector_dynamo:execute(Query, ?TABLE_BIN). + +directly_get_payload(Key) -> + case directly_query({get_item, {<<"id">>, Key}}) of + {ok, Values} -> + proplists:get_value(<<"payload">>, Values, {error, {invalid_item, Values}}); + Error -> + Error + end. + +%%------------------------------------------------------------------------------ +%% Testcases +%%------------------------------------------------------------------------------ + +t_setup_via_config_and_publish(Config) -> + ?assertNotEqual(undefined, get(aws_config)), + create_table(Config), + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + MsgId = emqx_misc:gen_id(), + SentData = #{id => MsgId, payload => ?PAYLOAD}, + ?check_trace( + begin + ?wait_async_action( + ?assertMatch( + {ok, _}, send_message(Config, SentData) + ), + #{?snk_kind := dynamo_connector_query_return}, + 10_000 + ), + ?assertMatch( + ?PAYLOAD, + directly_get_payload(MsgId) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(dynamo_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, _}}], Trace), + ok + end + ), + ok. + +t_setup_via_http_api_and_publish(Config) -> + BridgeType = ?config(dynamo_bridge_type, Config), + Name = ?config(dynamo_name, Config), + PgsqlConfig0 = ?config(dynamo_config, Config), + PgsqlConfig = PgsqlConfig0#{ + <<"name">> => Name, + <<"type">> => BridgeType + }, + ?assertMatch( + {ok, _}, + create_bridge_http(PgsqlConfig) + ), + MsgId = emqx_misc:gen_id(), + SentData = #{id => MsgId, payload => ?PAYLOAD}, + ?check_trace( + begin + ?wait_async_action( + ?assertMatch( + {ok, _}, send_message(Config, SentData) + ), + #{?snk_kind := dynamo_connector_query_return}, + 10_000 + ), + ?assertMatch( + ?PAYLOAD, + directly_get_payload(MsgId) + ), + ok + end, + fun(Trace0) -> + Trace = ?of_kind(dynamo_connector_query_return, Trace0), + ?assertMatch([#{result := {ok, _}}], Trace), + ok + end + ), + ok. + +t_get_status(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + ProxyName = ?config(proxy_name, Config), + + Name = ?config(dynamo_name, Config), + BridgeType = ?config(dynamo_bridge_type, Config), + ResourceID = emqx_bridge_resource:resource_id(BridgeType, Name), + + ?assertEqual({ok, connected}, emqx_resource_manager:health_check(ResourceID)), + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + case emqx_resource_manager:health_check(ResourceID) of + {ok, Status} when Status =:= disconnected orelse Status =:= connecting -> + ok; + {error, timeout} -> + ok; + Other -> + ?assert( + false, lists:flatten(io_lib:format("invalid health check result:~p~n", [Other])) + ) + end + end), + ok. + +t_write_failure(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge(Config), + SentData = #{id => emqx_misc:gen_id(), payload => ?PAYLOAD}, + emqx_common_test_helpers:with_failure(down, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, send_message(Config, SentData) + ) + end), + ok. + +t_write_timeout(Config) -> + ProxyName = ?config(proxy_name, Config), + ProxyPort = ?config(proxy_port, Config), + ProxyHost = ?config(proxy_host, Config), + {ok, _} = create_bridge(Config), + SentData = #{id => emqx_misc:gen_id(), payload => ?PAYLOAD}, + emqx_common_test_helpers:with_failure(timeout, ProxyName, ProxyHost, ProxyPort, fun() -> + ?assertMatch( + {error, {resource_error, #{reason := timeout}}}, + query_resource(Config, {send_message, SentData}) + ) + end), + ok. + +t_simple_query(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {get_item, {<<"id">>, <<"not_exists">>}}, + Result = query_resource(Config, Request), + case ?GET_CONFIG(batch_size, Config) of + ?BATCH_SIZE -> + ?assertMatch({error, {unrecoverable_error, {invalid_request, _}}}, Result); + 1 -> + ?assertMatch({ok, []}, Result) + end, + ok. + +t_missing_data(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Result = send_message(Config, #{}), + ?assertMatch({error, {unrecoverable_error, {invalid_request, _}}}, Result), + ok. + +t_bad_parameter(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Request = {insert_item, bad_parameter}, + Result = query_resource(Config, Request), + ?assertMatch({error, {unrecoverable_error, {invalid_request, _}}}, Result), + ok. + +to_bin(List) when is_list(List) -> + unicode:characters_to_binary(List, utf8); +to_bin(Bin) when is_binary(Bin) -> + Bin. diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl index 8014dbdcc..2b2214df0 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_influxdb_SUITE.erl @@ -877,7 +877,7 @@ t_create_disconnected(Config) -> end), fun(Trace) -> ?assertMatch( - [#{error := influxdb_client_not_alive}], + [#{error := influxdb_client_not_alive, reason := econnrefused}], ?of_kind(influxdb_connector_start_failed, Trace) ), ok diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl index fec85c874..93e9e6fee 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_mysql_SUITE.erl @@ -28,6 +28,9 @@ -define(MYSQL_DATABASE, "mqtt"). -define(MYSQL_USERNAME, "root"). -define(MYSQL_PASSWORD, "public"). +-define(MYSQL_POOL_SIZE, 4). + +-define(WORKER_POOL_SIZE, 4). %%------------------------------------------------------------------------------ %% CT boilerplate @@ -168,11 +171,13 @@ mysql_config(BridgeType, Config) -> " database = ~p\n" " username = ~p\n" " password = ~p\n" + " pool_size = ~b\n" " sql = ~p\n" " resource_opts = {\n" " request_timeout = 500ms\n" " batch_size = ~b\n" " query_mode = ~s\n" + " worker_pool_size = ~b\n" " }\n" " ssl = {\n" " enable = ~w\n" @@ -185,9 +190,11 @@ mysql_config(BridgeType, Config) -> ?MYSQL_DATABASE, ?MYSQL_USERNAME, ?MYSQL_PASSWORD, + ?MYSQL_POOL_SIZE, ?SQL_BRIDGE, BatchSize, QueryMode, + ?WORKER_POOL_SIZE, TlsEnabled ] ), @@ -265,27 +272,26 @@ connect_direct_mysql(Config) -> {ok, Pid} = mysql:start_link(Opts ++ SslOpts), Pid. +query_direct_mysql(Config, Query) -> + Pid = connect_direct_mysql(Config), + try + mysql:query(Pid, Query) + after + mysql:stop(Pid) + end. + % These funs connect and then stop the mysql connection connect_and_create_table(Config) -> - DirectPid = connect_direct_mysql(Config), - ok = mysql:query(DirectPid, ?SQL_CREATE_TABLE), - mysql:stop(DirectPid). + query_direct_mysql(Config, ?SQL_CREATE_TABLE). connect_and_drop_table(Config) -> - DirectPid = connect_direct_mysql(Config), - ok = mysql:query(DirectPid, ?SQL_DROP_TABLE), - mysql:stop(DirectPid). + query_direct_mysql(Config, ?SQL_DROP_TABLE). connect_and_clear_table(Config) -> - DirectPid = connect_direct_mysql(Config), - ok = mysql:query(DirectPid, ?SQL_DELETE), - mysql:stop(DirectPid). + query_direct_mysql(Config, ?SQL_DELETE). connect_and_get_payload(Config) -> - DirectPid = connect_direct_mysql(Config), - Result = mysql:query(DirectPid, ?SQL_SELECT), - mysql:stop(DirectPid), - Result. + query_direct_mysql(Config, ?SQL_SELECT). %%------------------------------------------------------------------------------ %% Testcases @@ -505,6 +511,50 @@ t_bad_sql_parameter(Config) -> end, ok. +t_nasty_sql_string(Config) -> + ?assertMatch({ok, _}, create_bridge(Config)), + Payload = list_to_binary(lists:seq(0, 255)), + Message = #{payload => Payload, timestamp => erlang:system_time(millisecond)}, + Result = send_message(Config, Message), + ?assertEqual(ok, Result), + ?assertMatch( + {ok, [<<"payload">>], [[Payload]]}, + connect_and_get_payload(Config) + ). + +t_workload_fits_prepared_statement_limit(Config) -> + N = 50, + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + Results = lists:append( + emqx_misc:pmap( + fun(_) -> + [ + begin + Payload = integer_to_binary(erlang:unique_integer()), + Timestamp = erlang:system_time(millisecond), + send_message(Config, #{payload => Payload, timestamp => Timestamp}) + end + || _ <- lists:seq(1, N) + ] + end, + lists:seq(1, ?WORKER_POOL_SIZE * ?MYSQL_POOL_SIZE), + _Timeout = 10_000 + ) + ), + ?assertEqual( + [], + [R || R <- Results, R /= ok] + ), + {ok, _, [[_Var, Count]]} = + query_direct_mysql(Config, "SHOW GLOBAL STATUS LIKE 'Prepared_stmt_count'"), + ?assertEqual( + ?MYSQL_POOL_SIZE, + binary_to_integer(Count) + ). + t_unprepared_statement_query(Config) -> ?assertMatch( {ok, _}, diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_pgsql_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_pgsql_SUITE.erl index 6fbb9689f..10359a128 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_pgsql_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_pgsql_SUITE.erl @@ -510,3 +510,10 @@ t_bad_sql_parameter(Config) -> ) end, ok. + +t_nasty_sql_string(Config) -> + ?assertMatch({ok, _}, create_bridge(Config)), + Payload = list_to_binary(lists:seq(1, 127)), + Message = #{payload => Payload, timestamp => erlang:system_time(millisecond)}, + ?assertEqual({ok, 1}, send_message(Config, Message)), + ?assertEqual(Payload, connect_and_get_payload(Config)). diff --git a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_tdengine_SUITE.erl b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_tdengine_SUITE.erl index 4c17ba1a1..3b580ec61 100644 --- a/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_tdengine_SUITE.erl +++ b/lib-ee/emqx_ee_bridge/test/emqx_ee_bridge_tdengine_SUITE.erl @@ -426,6 +426,32 @@ t_bad_sql_parameter(Config) -> end, ok. +t_nasty_sql_string(Config) -> + ?assertMatch( + {ok, _}, + create_bridge(Config) + ), + % NOTE + % Column `payload` has BINARY type, so we would certainly like to test it + % with `lists:seq(1, 127)`, but: + % 1. There's no way to insert zero byte in an SQL string, seems that TDengine's + % parser[1] has no escaping sequence for it so a zero byte probably confuses + % interpreter somewhere down the line. + % 2. Bytes > 127 come back as U+FFFDs (i.e. replacement characters) in UTF-8 for + % some reason. + % + % [1]: https://github.com/taosdata/TDengine/blob/066cb34a/source/libs/parser/src/parUtil.c#L279-L301 + Payload = list_to_binary(lists:seq(1, 127)), + Message = #{payload => Payload, timestamp => erlang:system_time(millisecond)}, + ?assertMatch( + {ok, #{<<"code">> := 0, <<"rows">> := 1}}, + send_message(Config, Message) + ), + ?assertEqual( + Payload, + connect_and_get_payload(Config) + ). + to_bin(List) when is_list(List) -> unicode:characters_to_binary(List, utf8); to_bin(Bin) when is_binary(Bin) -> diff --git a/lib-ee/emqx_ee_connector/docker-ct b/lib-ee/emqx_ee_connector/docker-ct index ef579c036..3db090939 100644 --- a/lib-ee/emqx_ee_connector/docker-ct +++ b/lib-ee/emqx_ee_connector/docker-ct @@ -1,2 +1,3 @@ toxiproxy influxdb +clickhouse diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_clickhouse.conf b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_clickhouse.conf new file mode 100644 index 000000000..1e07c29b4 --- /dev/null +++ b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_clickhouse.conf @@ -0,0 +1,15 @@ + +emqx_ee_connector_clickhouse { + + base_url { + desc { + en: """The HTTP URL to the Clickhouse server that you want to connect to (for example http://myhostname:8123)""" + zh: """你想连接到的Clickhouse服务器的HTTP URL(例如http://myhostname:8123)。""" + } + label: { + en: "URL to clickhouse server" + zh: "到clickhouse服务器的URL" + } + } + +} diff --git a/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_dynamo.conf b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_dynamo.conf new file mode 100644 index 000000000..e1fc11e03 --- /dev/null +++ b/lib-ee/emqx_ee_connector/i18n/emqx_ee_connector_dynamo.conf @@ -0,0 +1,14 @@ +emqx_ee_connector_dynamo { + + url { + desc { + en: """The url of DynamoDB endpoint.
""" + zh: """DynamoDB 的地址。
""" + } + label: { + en: "DynamoDB Endpoint" + zh: "DynamoDB 地址" + } + } + +} diff --git a/lib-ee/emqx_ee_connector/rebar.config b/lib-ee/emqx_ee_connector/rebar.config index 54c471f96..76f6ccfba 100644 --- a/lib-ee/emqx_ee_connector/rebar.config +++ b/lib-ee/emqx_ee_connector/rebar.config @@ -1,8 +1,10 @@ {erl_opts, [debug_info]}. {deps, [ {hstreamdb_erl, {git, "https://github.com/hstreamdb/hstreamdb_erl.git", {tag, "0.2.5"}}}, - {influxdb, {git, "https://github.com/emqx/influxdb-client-erl", {tag, "1.1.8"}}}, + {influxdb, {git, "https://github.com/emqx/influxdb-client-erl", {tag, "1.1.9"}}}, {tdengine, {git, "https://github.com/emqx/tdengine-client-erl", {tag, "0.1.5"}}}, + {clickhouse, {git, "https://github.com/emqx/clickhouse-client-erl", {tag, "0.2"}}}, + {erlcloud, {git, "https://github.com/emqx/erlcloud.git", {tag,"3.5.16-emqx-1"}}}, {emqx, {path, "../../apps/emqx"}} ]}. diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src index 6acbc43bd..6f40f7158 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector.app.src @@ -1,6 +1,6 @@ {application, emqx_ee_connector, [ {description, "EMQX Enterprise connectors"}, - {vsn, "0.1.6"}, + {vsn, "0.1.7"}, {registered, []}, {applications, [ kernel, @@ -9,7 +9,9 @@ influxdb, tdengine, wolff, - brod + brod, + clickhouse, + erlcloud ]}, {env, []}, {modules, []}, diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_clickhouse.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_clickhouse.erl new file mode 100644 index 000000000..b1ad6c787 --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_clickhouse.erl @@ -0,0 +1,444 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%%-------------------------------------------------------------------- + +-module(emqx_ee_connector_clickhouse). + +-include_lib("emqx_connector/include/emqx_connector.hrl"). +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). + +-behaviour(emqx_resource). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +%%===================================================================== +%% Exports +%%===================================================================== + +%% Hocon config schema exports +-export([ + roots/0, + fields/1, + values/1 +]). + +%% callbacks for behaviour emqx_resource +-export([ + callback_mode/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_get_status/2 +]). + +%% callbacks for ecpool +-export([connect/1]). + +%% Internal exports used to execute code with ecpool worker +-export([ + check_database_status/1, + execute_sql_in_clickhouse_server_using_connection/2 +]). + +%%===================================================================== +%% Types +%%===================================================================== + +-type url() :: emqx_http_lib:uri_map(). +-reflect_type([url/0]). +-typerefl_from_string({url/0, emqx_http_lib, uri_parse}). + +-type templates() :: + #{} + | #{ + send_message_template := term(), + extend_send_message_template := term() + }. + +-type state() :: + #{ + templates := templates(), + poolname := atom() + }. + +-type clickhouse_config() :: map(). + +%%===================================================================== +%% Configuration and default values +%%===================================================================== + +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {url, + hoconsc:mk( + url(), + #{ + required => true, + validator => fun + (#{query := _Query}) -> + {error, "There must be no query in the url"}; + (_) -> + ok + end, + desc => ?DESC("base_url") + } + )} + ] ++ emqx_connector_schema_lib:relational_db_fields(). + +values(post) -> + maps:merge(values(put), #{name => <<"connector">>}); +values(get) -> + values(post); +values(put) -> + #{ + database => <<"mqtt">>, + enable => true, + pool_size => 8, + type => clickhouse, + url => <<"http://127.0.0.1:8123">> + }; +values(_) -> + #{}. + +%% =================================================================== +%% Callbacks defined in emqx_resource +%% =================================================================== + +callback_mode() -> always_sync. + +%% ------------------------------------------------------------------- +%% on_start callback and related functions +%% ------------------------------------------------------------------- + +-spec on_start(resource_id(), clickhouse_config()) -> {ok, state()} | {error, _}. + +on_start( + InstanceID, + #{ + url := URL, + database := DB, + pool_size := PoolSize + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_clickhouse_connector", + connector => InstanceID, + config => emqx_misc:redact(Config) + }), + PoolName = emqx_plugin_libs_pool:pool_name(InstanceID), + Options = [ + {url, URL}, + {user, maps:get(username, Config, "default")}, + {key, emqx_secret:wrap(maps:get(password, Config, "public"))}, + {database, DB}, + {auto_reconnect, ?AUTO_RECONNECT_INTERVAL}, + {pool_size, PoolSize}, + {pool, PoolName} + ], + InitState = #{poolname => PoolName}, + try + Templates = prepare_sql_templates(Config), + State = maps:merge(InitState, #{templates => Templates}), + case emqx_plugin_libs_pool:start_pool(PoolName, ?MODULE, Options) of + ok -> + {ok, State}; + {error, Reason} -> + log_start_error(Config, Reason, none), + {error, Reason} + end + catch + _:CatchReason:Stacktrace -> + log_start_error(Config, CatchReason, Stacktrace), + {error, CatchReason} + end. + +log_start_error(Config, Reason, Stacktrace) -> + StacktraceMap = + case Stacktrace of + none -> #{}; + _ -> #{stacktrace => Stacktrace} + end, + LogMessage = + #{ + msg => "clickhouse_connector_start_failed", + error_reason => Reason, + config => emqx_misc:redact(Config) + }, + ?SLOG(info, maps:merge(LogMessage, StacktraceMap)), + ?tp( + clickhouse_connector_start_failed, + #{error => Reason} + ). + +%% Helper functions to prepare SQL tempaltes + +prepare_sql_templates(#{ + sql := Template, + batch_value_separator := Separator +}) -> + InsertTemplate = + emqx_plugin_libs_rule:preproc_tmpl(Template), + BulkExtendInsertTemplate = + prepare_sql_bulk_extend_template(Template, Separator), + #{ + send_message_template => InsertTemplate, + extend_send_message_template => BulkExtendInsertTemplate + }; +prepare_sql_templates(_) -> + %% We don't create any templates if this is a non-bridge connector + #{}. + +prepare_sql_bulk_extend_template(Template, Separator) -> + ValuesTemplate = split_clickhouse_insert_sql(Template), + %% The value part has been extracted + %% Add separator before ValuesTemplate so that one can append it + %% to an insert template + ExtendParamTemplate = iolist_to_binary([Separator, ValuesTemplate]), + emqx_plugin_libs_rule:preproc_tmpl(ExtendParamTemplate). + +%% This function is similar to emqx_plugin_libs_rule:split_insert_sql/1 but can +%% also handle Clickhouse's SQL extension for INSERT statments that allows the +%% user to specify different formats: +%% +%% https://clickhouse.com/docs/en/sql-reference/statements/insert-into/ +%% +split_clickhouse_insert_sql(SQL) -> + ErrorMsg = <<"The SQL template should be an SQL INSERT statement but it is something else.">>, + case + re:split(SQL, "(\\s+(?i:values)|(?i:format\\s+(?:[A-Za-z0-9_])+)\\s+)", [{return, binary}]) + of + [Part1, _, Part3] -> + case string:trim(Part1, leading) of + <<"insert", _/binary>> -> + Part3; + <<"INSERT", _/binary>> -> + Part3; + _ -> + erlang:error(ErrorMsg) + end; + _ -> + erlang:error(ErrorMsg) + end. + +% This is a callback for ecpool which is triggered by the call to +% emqx_plugin_libs_pool:start_pool in on_start/2 + +connect(Options) -> + URL = iolist_to_binary(emqx_http_lib:normalize(proplists:get_value(url, Options))), + User = proplists:get_value(user, Options), + Database = proplists:get_value(database, Options), + Key = emqx_secret:unwrap(proplists:get_value(key, Options)), + Pool = proplists:get_value(pool, Options), + PoolSize = proplists:get_value(pool_size, Options), + FixedOptions = [ + {url, URL}, + {database, Database}, + {user, User}, + {key, Key}, + {pool, Pool}, + {pool_size, PoolSize} + ], + case clickhouse:start_link(FixedOptions) of + {ok, _Conn} = Ok -> + Ok; + {error, Reason} -> + {error, Reason} + end. + +%% ------------------------------------------------------------------- +%% on_stop emqx_resouce callback +%% ------------------------------------------------------------------- + +-spec on_stop(resource_id(), resource_state()) -> term(). + +on_stop(ResourceID, #{poolname := PoolName}) -> + ?SLOG(info, #{ + msg => "stopping clickouse connector", + connector => ResourceID + }), + emqx_plugin_libs_pool:stop_pool(PoolName). + +%% ------------------------------------------------------------------- +%% on_get_status emqx_resouce callback and related functions +%% ------------------------------------------------------------------- + +on_get_status(_ResourceID, #{poolname := Pool} = _State) -> + case + emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:check_database_status/1) + of + true -> + connected; + false -> + connecting + end. + +check_database_status(Connection) -> + clickhouse:status(Connection). + +%% ------------------------------------------------------------------- +%% on_query emqx_resouce callback and related functions +%% ------------------------------------------------------------------- + +-spec on_query + (resource_id(), Request, resource_state()) -> query_result() when + Request :: {RequestType, Data}, + RequestType :: send_message, + Data :: map(); + (resource_id(), Request, resource_state()) -> query_result() when + Request :: {RequestType, SQL}, + RequestType :: sql | query, + SQL :: binary(). + +on_query( + ResourceID, + {RequestType, DataOrSQL}, + #{poolname := PoolName} = State +) -> + ?SLOG(debug, #{ + msg => "clickhouse connector received sql query", + connector => ResourceID, + type => RequestType, + sql => DataOrSQL, + state => State + }), + %% Have we got a query or data to fit into an SQL template? + SimplifiedRequestType = query_type(RequestType), + #{templates := Templates} = State, + SQL = get_sql(SimplifiedRequestType, Templates, DataOrSQL), + ClickhouseResult = execute_sql_in_clickhouse_server(PoolName, SQL), + transform_and_log_clickhouse_result(ClickhouseResult, ResourceID, SQL). + +get_sql(send_message, #{send_message_template := PreparedSQL}, Data) -> + emqx_plugin_libs_rule:proc_tmpl(PreparedSQL, Data); +get_sql(_, _, SQL) -> + SQL. + +query_type(sql) -> + query; +query_type(query) -> + query; +%% Data that goes to bridges use the prepared template +query_type(send_message) -> + send_message. + +%% ------------------------------------------------------------------- +%% on_batch_query emqx_resouce callback and related functions +%% ------------------------------------------------------------------- + +-spec on_batch_query(resource_id(), BatchReq, resource_state()) -> query_result() when + BatchReq :: nonempty_list({'send_message', map()}). + +on_batch_query( + ResourceID, + BatchReq, + State +) -> + %% Currently we only support batch requests with the send_message key + {Keys, ObjectsToInsert} = lists:unzip(BatchReq), + ensure_keys_are_of_type_send_message(Keys), + %% Pick out the SQL template + #{ + templates := Templates, + poolname := PoolName + } = State, + %% Create batch insert SQL statement + SQL = objects_to_sql(ObjectsToInsert, Templates), + %% Do the actual query in the database + ResultFromClickhouse = execute_sql_in_clickhouse_server(PoolName, SQL), + %% Transform the result to a better format + transform_and_log_clickhouse_result(ResultFromClickhouse, ResourceID, SQL). + +ensure_keys_are_of_type_send_message(Keys) -> + case lists:all(fun is_send_message_atom/1, Keys) of + true -> + ok; + false -> + erlang:error( + {unrecoverable_error, + <<"Unexpected type for batch message (Expected send_message)">>} + ) + end. + +is_send_message_atom(send_message) -> + true; +is_send_message_atom(_) -> + false. + +objects_to_sql( + [FirstObject | RemainingObjects] = _ObjectsToInsert, + #{ + send_message_template := InsertTemplate, + extend_send_message_template := BulkExtendInsertTemplate + } +) -> + %% Prepare INSERT-statement and the first row after VALUES + InsertStatementHead = emqx_plugin_libs_rule:proc_tmpl(InsertTemplate, FirstObject), + FormatObjectDataFunction = + fun(Object) -> + emqx_plugin_libs_rule:proc_tmpl(BulkExtendInsertTemplate, Object) + end, + InsertStatementTail = lists:map(FormatObjectDataFunction, RemainingObjects), + CompleteStatement = erlang:iolist_to_binary([InsertStatementHead, InsertStatementTail]), + CompleteStatement; +objects_to_sql(_, _) -> + erlang:error(<<"Templates for bulk insert missing.">>). + +%% ------------------------------------------------------------------- +%% Helper functions that are used by both on_query/3 and on_batch_query/3 +%% ------------------------------------------------------------------- + +%% This function is used by on_query/3 and on_batch_query/3 to send a query to +%% the database server and receive a result +execute_sql_in_clickhouse_server(PoolName, SQL) -> + ecpool:pick_and_do( + PoolName, + {?MODULE, execute_sql_in_clickhouse_server_using_connection, [SQL]}, + no_handover + ). + +execute_sql_in_clickhouse_server_using_connection(Connection, SQL) -> + clickhouse:query(Connection, SQL, []). + +%% This function transforms the result received from clickhouse to something +%% that is a little bit more readable and creates approprieate log messages +transform_and_log_clickhouse_result({ok, 200, <<"">>} = _ClickhouseResult, _, _) -> + snabbkaffe_log_return(ok), + ok; +transform_and_log_clickhouse_result({ok, 200, Data}, _, _) -> + Result = {ok, Data}, + snabbkaffe_log_return(Result), + Result; +transform_and_log_clickhouse_result(ClickhouseErrorResult, ResourceID, SQL) -> + ?SLOG(error, #{ + msg => "clickhouse connector do sql query failed", + connector => ResourceID, + sql => SQL, + reason => ClickhouseErrorResult + }), + {error, ClickhouseErrorResult}. + +snabbkaffe_log_return(_Result) -> + ?tp( + clickhouse_connector_query_return, + #{result => _Result} + ). diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl new file mode 100644 index 000000000..957706f6a --- /dev/null +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_dynamo.erl @@ -0,0 +1,345 @@ +%%-------------------------------------------------------------------- +%% Copyright (c) 2023 EMQ Technologies Co., Ltd. All Rights Reserved. +%%-------------------------------------------------------------------- + +-module(emqx_ee_connector_dynamo). + +-behaviour(emqx_resource). + +-include_lib("emqx_resource/include/emqx_resource.hrl"). +-include_lib("typerefl/include/types.hrl"). +-include_lib("emqx/include/logger.hrl"). +-include_lib("snabbkaffe/include/snabbkaffe.hrl"). +-include_lib("hocon/include/hoconsc.hrl"). + +-export([roots/0, fields/1]). + +%% `emqx_resource' API +-export([ + callback_mode/0, + is_buffer_supported/0, + on_start/2, + on_stop/2, + on_query/3, + on_batch_query/3, + on_query_async/4, + on_batch_query_async/4, + on_get_status/2 +]). + +-export([ + connect/1, + do_get_status/1, + do_async_reply/2, + worker_do_query/4, + worker_do_get_status/1 +]). + +-import(hoconsc, [mk/2, enum/1, ref/2]). + +-define(DYNAMO_HOST_OPTIONS, #{ + default_port => 8000 +}). + +-ifdef(TEST). +-export([execute/2]). +-endif. + +%%===================================================================== +%% Hocon schema +roots() -> + [{config, #{type => hoconsc:ref(?MODULE, config)}}]. + +fields(config) -> + [ + {url, mk(binary(), #{required => true, desc => ?DESC("url")})} + | add_default_username( + emqx_connector_schema_lib:relational_db_fields() + ) + ]. + +add_default_username(Fields) -> + lists:map( + fun + ({username, OrigUsernameFn}) -> + {username, add_default_fn(OrigUsernameFn, <<"root">>)}; + (Field) -> + Field + end, + Fields + ). + +add_default_fn(OrigFn, Default) -> + fun + (default) -> Default; + (Field) -> OrigFn(Field) + end. + +%%======================================================================================== +%% `emqx_resource' API +%%======================================================================================== + +callback_mode() -> async_if_possible. + +is_buffer_supported() -> false. + +on_start( + InstanceId, + #{ + url := Url, + username := Username, + password := Password, + database := Database, + pool_size := PoolSize + } = Config +) -> + ?SLOG(info, #{ + msg => "starting_dynamo_connector", + connector => InstanceId, + config => emqx_misc:redact(Config) + }), + + {Schema, Server} = get_host_schema(to_str(Url)), + {Host, Port} = emqx_schema:parse_server(Server, ?DYNAMO_HOST_OPTIONS), + + Options = [ + {config, #{ + host => Host, + port => Port, + username => to_str(Username), + password => to_str(Password), + schema => Schema + }}, + {pool_size, PoolSize} + ], + + Templates = parse_template(Config), + State = #{ + poolname => InstanceId, + database => Database, + templates => Templates + }, + case emqx_plugin_libs_pool:start_pool(InstanceId, ?MODULE, Options) of + ok -> + {ok, State}; + Error -> + Error + end. + +on_stop(InstanceId, #{poolname := PoolName} = _State) -> + ?SLOG(info, #{ + msg => "stopping_dynamo_connector", + connector => InstanceId + }), + emqx_plugin_libs_pool:stop_pool(PoolName). + +on_query(InstanceId, Query, State) -> + do_query(InstanceId, Query, handover, State). + +on_query_async(InstanceId, Query, Reply, State) -> + do_query( + InstanceId, + Query, + {handover_async, {?MODULE, do_async_reply, [Reply]}}, + State + ). + +%% we only support batch insert +on_batch_query(InstanceId, [{send_message, _} | _] = Query, State) -> + do_query(InstanceId, Query, handover, State); +on_batch_query(_InstanceId, Query, _State) -> + {error, {unrecoverable_error, {invalid_request, Query}}}. + +%% we only support batch insert +on_batch_query_async(InstanceId, [{send_message, _} | _] = Query, Reply, State) -> + do_query( + InstanceId, + Query, + {handover_async, {?MODULE, do_async_reply, [Reply]}}, + State + ); +on_batch_query_async(_InstanceId, Query, _Reply, _State) -> + {error, {unrecoverable_error, {invalid_request, Query}}}. + +on_get_status(_InstanceId, #{poolname := Pool}) -> + Health = emqx_plugin_libs_pool:health_check_ecpool_workers(Pool, fun ?MODULE:do_get_status/1), + status_result(Health). + +do_get_status(Conn) -> + %% because the dynamodb driver connection process is the ecpool worker self + %% so we must call the checker function inside the worker + ListTables = ecpool_worker:exec(Conn, {?MODULE, worker_do_get_status, []}, infinity), + case ListTables of + {ok, _} -> true; + _ -> false + end. + +worker_do_get_status(_) -> + erlcloud_ddb2:list_tables(). + +status_result(_Status = true) -> connected; +status_result(_Status = false) -> connecting. + +%%======================================================================================== +%% Helper fns +%%======================================================================================== + +do_query( + InstanceId, + Query, + ApplyMode, + #{poolname := PoolName, templates := Templates, database := Database} = State +) -> + ?TRACE( + "QUERY", + "dynamo_connector_received", + #{connector => InstanceId, query => Query, state => State} + ), + Result = ecpool:pick_and_do( + PoolName, + {?MODULE, worker_do_query, [Database, Query, Templates]}, + ApplyMode + ), + + case Result of + {error, Reason} -> + ?tp( + dynamo_connector_query_return, + #{error => Reason} + ), + ?SLOG(error, #{ + msg => "dynamo_connector_do_query_failed", + connector => InstanceId, + query => Query, + reason => Reason + }), + Result; + _ -> + ?tp( + dynamo_connector_query_return, + #{result => Result} + ), + Result + end. + +worker_do_query(_Client, Database, Query0, Templates) -> + try + Query = apply_template(Query0, Templates), + execute(Query, Database) + catch + _Type:Reason -> + {error, {unrecoverable_error, {invalid_request, Reason}}} + end. + +%% some simple query commands for authn/authz or test +execute({insert_item, Msg}, Database) -> + Item = convert_to_item(Msg), + erlcloud_ddb2:put_item(Database, Item); +execute({delete_item, Key}, Database) -> + erlcloud_ddb2:delete_item(Database, Key); +execute({get_item, Key}, Database) -> + erlcloud_ddb2:get_item(Database, Key); +%% commands for data bridge query or batch query +execute({send_message, Msg}, Database) -> + Item = convert_to_item(Msg), + erlcloud_ddb2:put_item(Database, Item); +execute([{put, _} | _] = Msgs, Database) -> + %% type of batch_write_item argument :: batch_write_item_request_items() + %% batch_write_item_request_items() :: maybe_list(batch_write_item_request_item()) + %% batch_write_item_request_item() :: {table_name(), list(batch_write_item_request())} + %% batch_write_item_request() :: {put, item()} | {delete, key()} + erlcloud_ddb2:batch_write_item({Database, Msgs}). + +connect(Opts) -> + #{ + username := Username, + password := Password, + host := Host, + port := Port, + schema := Schema + } = proplists:get_value(config, Opts), + erlcloud_ddb2:configure(Username, Password, Host, Port, Schema), + + %% The dynamodb driver uses caller process as its connection process + %% so at here, the connection process is the ecpool worker self + {ok, self()}. + +parse_template(Config) -> + Templates = + case maps:get(template, Config, undefined) of + undefined -> #{}; + <<>> -> #{}; + Template -> #{send_message => Template} + end, + + parse_template(maps:to_list(Templates), #{}). + +parse_template([{Key, H} | T], Templates) -> + ParamsTks = emqx_plugin_libs_rule:preproc_tmpl(H), + parse_template( + T, + Templates#{Key => ParamsTks} + ); +parse_template([], Templates) -> + Templates. + +to_str(List) when is_list(List) -> + List; +to_str(Bin) when is_binary(Bin) -> + erlang:binary_to_list(Bin). + +get_host_schema("http://" ++ Server) -> + {"http://", Server}; +get_host_schema("https://" ++ Server) -> + {"https://", Server}; +get_host_schema(Server) -> + {"http://", Server}. + +apply_template({Key, Msg} = Req, Templates) -> + case maps:get(Key, Templates, undefined) of + undefined -> + Req; + Template -> + {Key, emqx_plugin_libs_rule:proc_tmpl(Template, Msg)} + end; +%% now there is no batch delete, so +%% 1. we can simply replace the `send_message` to `put` +%% 2. convert the message to in_item() here, not at the time when calling `batch_write_items`, +%% so we can reduce some list map cost +apply_template([{send_message, _Msg} | _] = Msgs, Templates) -> + lists:map( + fun(Req) -> + {_, Msg} = apply_template(Req, Templates), + {put, convert_to_item(Msg)} + end, + Msgs + ). + +convert_to_item(Msg) when is_map(Msg), map_size(Msg) > 0 -> + maps:fold( + fun + (_K, <<>>, AccIn) -> + AccIn; + (K, V, AccIn) -> + [{convert2binary(K), convert2binary(V)} | AccIn] + end, + [], + Msg + ); +convert_to_item(MsgBin) when is_binary(MsgBin) -> + Msg = emqx_json:decode(MsgBin), + convert_to_item(Msg); +convert_to_item(Item) -> + erlang:throw({invalid_item, Item}). + +convert2binary(Value) when is_atom(Value) -> + erlang:atom_to_binary(Value, utf8); +convert2binary(Value) when is_binary(Value); is_number(Value) -> + Value; +convert2binary(Value) when is_list(Value) -> + unicode:characters_to_binary(Value); +convert2binary(Value) when is_map(Value) -> + emqx_json:encode(Value). + +do_async_reply(Result, {ReplyFun, [Context]}) -> + ReplyFun(Context, Result). diff --git a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl index 7f5b56181..5c99a23a8 100644 --- a/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl +++ b/lib-ee/emqx_ee_connector/src/emqx_ee_connector_influxdb.erl @@ -239,7 +239,7 @@ do_start_client( Precision = maps:get(precision, Config, ms), case influxdb:start_client(ClientConfig) of {ok, Client} -> - case influxdb:is_alive(Client) of + case influxdb:is_alive(Client, true) of true -> State = #{ client => Client, @@ -252,13 +252,15 @@ do_start_client( state => redact_auth(State) }), {ok, State}; - false -> - ?tp(influxdb_connector_start_failed, #{error => influxdb_client_not_alive}), + {false, Reason} -> + ?tp(influxdb_connector_start_failed, #{ + error => influxdb_client_not_alive, reason => Reason + }), ?SLOG(warning, #{ - msg => "starting influxdb connector failed", + msg => "failed_to_start_influxdb_connector", connector => InstId, client => redact_auth(Client), - reason => "client is not alive" + reason => Reason }), %% no leak _ = influxdb:stop_client(Client), @@ -276,7 +278,7 @@ do_start_client( {error, Reason} -> ?tp(influxdb_connector_start_failed, #{error => Reason}), ?SLOG(warning, #{ - msg => "starting influxdb connector failed", + msg => "failed_to_start_influxdb_connector", connector => InstId, reason => Reason }), diff --git a/lib-ee/emqx_ee_connector/test/ee_connector_clickhouse_SUITE.erl b/lib-ee/emqx_ee_connector/test/ee_connector_clickhouse_SUITE.erl new file mode 100644 index 000000000..eab1aa054 --- /dev/null +++ b/lib-ee/emqx_ee_connector/test/ee_connector_clickhouse_SUITE.erl @@ -0,0 +1,198 @@ +% %%-------------------------------------------------------------------- +% %% Copyright (c) 2020-2023 EMQ Technologies Co., Ltd. All Rights Reserved. +% %% +% %% Licensed under the Apache License, Version 2.0 (the "License"); +% %% you may not use this file except in compliance with the License. +% %% You may obtain a copy of the License at +% %% http://www.apache.org/licenses/LICENSE-2.0 +% %% +% %% Unless required by applicable law or agreed to in writing, software +% %% distributed under the License is distributed on an "AS IS" BASIS, +% %% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +% %% See the License for the specific language governing permissions and +% %% limitations under the License. +% %%-------------------------------------------------------------------- + +-module(ee_connector_clickhouse_SUITE). + +-compile(nowarn_export_all). +-compile(export_all). + +-include("emqx_connector.hrl"). +-include_lib("eunit/include/eunit.hrl"). +-include_lib("emqx/include/emqx.hrl"). +-include_lib("stdlib/include/assert.hrl"). + +-define(CLICKHOUSE_HOST, "clickhouse"). +-define(CLICKHOUSE_RESOURCE_MOD, emqx_ee_connector_clickhouse). + +%% This test SUITE requires a running clickhouse instance. If you don't want to +%% bring up the whole CI infrastuctucture with the `scripts/ct/run.sh` script +%% you can create a clickhouse instance with the following command (execute it +%% from root of the EMQX directory.). You also need to set ?CLICKHOUSE_HOST and +%% ?CLICKHOUSE_PORT to appropriate values. +%% +%% docker run -d -p 18123:8123 -p19000:9000 --name some-clickhouse-server --ulimit nofile=262144:262144 -v "`pwd`/.ci/docker-compose-file/clickhouse/users.xml:/etc/clickhouse-server/users.xml" -v "`pwd`/.ci/docker-compose-file/clickhouse/config.xml:/etc/clickhouse-server/config.xml" clickhouse/clickhouse-server + +all() -> + emqx_common_test_helpers:all(?MODULE). + +groups() -> + []. + +clickhouse_url() -> + erlang:iolist_to_binary([ + <<"http://">>, + ?CLICKHOUSE_HOST, + ":", + erlang:integer_to_list(?CLICKHOUSE_DEFAULT_PORT) + ]). + +init_per_suite(Config) -> + case + emqx_common_test_helpers:is_tcp_server_available(?CLICKHOUSE_HOST, ?CLICKHOUSE_DEFAULT_PORT) + of + true -> + ok = emqx_common_test_helpers:start_apps([emqx_conf]), + ok = emqx_connector_test_helpers:start_apps([emqx_resource]), + {ok, _} = application:ensure_all_started(emqx_connector), + {ok, _} = application:ensure_all_started(emqx_ee_connector), + %% Create the db table + {ok, Conn} = + clickhouse:start_link([ + {url, clickhouse_url()}, + {user, <<"default">>}, + {key, "public"}, + {pool, tmp_pool} + ]), + {ok, _, _} = clickhouse:query(Conn, <<"CREATE DATABASE IF NOT EXISTS mqtt">>, #{}), + clickhouse:stop(Conn), + Config; + false -> + case os:getenv("IS_CI") of + "yes" -> + throw(no_clickhouse); + _ -> + {skip, no_clickhouse} + end + end. + +end_per_suite(_Config) -> + ok = emqx_common_test_helpers:stop_apps([emqx_conf]), + ok = emqx_connector_test_helpers:stop_apps([emqx_resource]), + _ = application:stop(emqx_connector). + +init_per_testcase(_, Config) -> + Config. + +end_per_testcase(_, _Config) -> + ok. + +% %%------------------------------------------------------------------------------ +% %% Testcases +% %%------------------------------------------------------------------------------ + +t_lifecycle(_Config) -> + perform_lifecycle_check( + <<"emqx_connector_clickhouse_SUITE">>, + clickhouse_config() + ). + +show(X) -> + erlang:display(X), + X. + +show(Label, What) -> + erlang:display({Label, What}), + What. + +perform_lifecycle_check(PoolName, InitialConfig) -> + {ok, #{config := CheckedConfig}} = + emqx_resource:check_config(?CLICKHOUSE_RESOURCE_MOD, InitialConfig), + {ok, #{ + state := #{poolname := ReturnedPoolName} = State, + status := InitialStatus + }} = + emqx_resource:create_local( + PoolName, + ?CONNECTOR_RESOURCE_GROUP, + ?CLICKHOUSE_RESOURCE_MOD, + CheckedConfig, + #{} + ), + ?assertEqual(InitialStatus, connected), + % Instance should match the state and status of the just started resource + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := InitialStatus + }} = + emqx_resource:get_instance(PoolName), + ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + % % Perform query as further check that the resource is working as expected + (fun() -> + erlang:display({pool_name, PoolName}), + QueryNoParamsResWrapper = emqx_resource:query(PoolName, test_query_no_params()), + ?assertMatch({ok, _}, QueryNoParamsResWrapper), + {_, QueryNoParamsRes} = QueryNoParamsResWrapper, + ?assertMatch(<<"1">>, string:trim(QueryNoParamsRes)) + end)(), + ?assertEqual(ok, emqx_resource:stop(PoolName)), + % Resource will be listed still, but state will be changed and healthcheck will fail + % as the worker no longer exists. + {ok, ?CONNECTOR_RESOURCE_GROUP, #{ + state := State, + status := StoppedStatus + }} = + emqx_resource:get_instance(PoolName), + ?assertEqual(stopped, StoppedStatus), + ?assertEqual({error, resource_is_stopped}, emqx_resource:health_check(PoolName)), + % Resource healthcheck shortcuts things by checking ets. Go deeper by checking pool itself. + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + % Can call stop/1 again on an already stopped instance + ?assertEqual(ok, emqx_resource:stop(PoolName)), + % Make sure it can be restarted and the healthchecks and queries work properly + ?assertEqual(ok, emqx_resource:restart(PoolName)), + % async restart, need to wait resource + timer:sleep(500), + {ok, ?CONNECTOR_RESOURCE_GROUP, #{status := InitialStatus}} = + emqx_resource:get_instance(PoolName), + ?assertEqual({ok, connected}, emqx_resource:health_check(PoolName)), + (fun() -> + QueryNoParamsResWrapper = + emqx_resource:query(PoolName, test_query_no_params()), + ?assertMatch({ok, _}, QueryNoParamsResWrapper), + {_, QueryNoParamsRes} = QueryNoParamsResWrapper, + ?assertMatch(<<"1">>, string:trim(QueryNoParamsRes)) + end)(), + % Stop and remove the resource in one go. + ?assertEqual(ok, emqx_resource:remove_local(PoolName)), + ?assertEqual({error, not_found}, ecpool:stop_sup_pool(ReturnedPoolName)), + % Should not even be able to get the resource data out of ets now unlike just stopping. + ?assertEqual({error, not_found}, emqx_resource:get_instance(PoolName)). + +% %%------------------------------------------------------------------------------ +% %% Helpers +% %%------------------------------------------------------------------------------ + +clickhouse_config() -> + Config = + #{ + auto_reconnect => true, + database => <<"mqtt">>, + username => <<"default">>, + password => <<"public">>, + pool_size => 8, + url => iolist_to_binary( + io_lib:format( + "http://~s:~b", + [ + ?CLICKHOUSE_HOST, + ?CLICKHOUSE_DEFAULT_PORT + ] + ) + ) + }, + #{<<"config">> => Config}. + +test_query_no_params() -> + {query, <<"SELECT 1">>}. diff --git a/lib-ee/emqx_license/src/emqx_license.app.src b/lib-ee/emqx_license/src/emqx_license.app.src index fdc701369..7a569c402 100644 --- a/lib-ee/emqx_license/src/emqx_license.app.src +++ b/lib-ee/emqx_license/src/emqx_license.app.src @@ -1,6 +1,6 @@ {application, emqx_license, [ {description, "EMQX License"}, - {vsn, "5.0.6"}, + {vsn, "5.0.7"}, {modules, []}, {registered, [emqx_license_sup]}, {applications, [kernel, stdlib, emqx_ctl]}, diff --git a/lib-ee/emqx_license/src/emqx_license_resources.erl b/lib-ee/emqx_license/src/emqx_license_resources.erl index 3f353064b..2cc62b8a3 100644 --- a/lib-ee/emqx_license/src/emqx_license_resources.erl +++ b/lib-ee/emqx_license/src/emqx_license_resources.erl @@ -127,7 +127,7 @@ ensure_timer(#{check_peer_interval := CheckInterval} = State) -> State#{timer => erlang:send_after(CheckInterval, self(), update_resources)}. remote_connection_count() -> - Nodes = mria_mnesia:running_nodes() -- [node()], + Nodes = mria:running_nodes() -- [node()], Results = emqx_license_proto_v2:remote_connection_counts(Nodes), Counts = [Count || {ok, Count} <- Results], lists:sum(Counts). diff --git a/mix.exs b/mix.exs index 3d3657ebb..d2b82266b 100644 --- a/mix.exs +++ b/mix.exs @@ -53,8 +53,8 @@ defmodule EMQXUmbrella.MixProject do {:jiffy, github: "emqx/jiffy", tag: "1.0.5", override: true}, {:cowboy, github: "emqx/cowboy", tag: "2.9.0", override: true}, {:esockd, github: "emqx/esockd", tag: "5.9.4", override: true}, - {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.7.2-emqx-7", override: true}, - {:ekka, github: "emqx/ekka", tag: "0.14.3", override: true}, + {:rocksdb, github: "emqx/erlang-rocksdb", tag: "1.7.2-emqx-9", override: true}, + {:ekka, github: "emqx/ekka", tag: "0.14.4", override: true}, {:gen_rpc, github: "emqx/gen_rpc", tag: "2.8.1", override: true}, {:grpc, github: "emqx/grpc-erl", tag: "0.6.7", override: true}, {:minirest, github: "emqx/minirest", tag: "1.3.8", override: true}, @@ -89,7 +89,8 @@ defmodule EMQXUmbrella.MixProject do {:ranch, github: "ninenines/ranch", ref: "a692f44567034dacf5efcaa24a24183788594eb7", override: true}, # in conflict by grpc and eetcd - {:gpb, "4.19.5", override: true, runtime: false} + {:gpb, "4.19.5", override: true, runtime: false}, + {:hackney, github: "benoitc/hackney", tag: "1.18.1", override: true} ] ++ umbrella_apps() ++ enterprise_apps(profile_info) ++ @@ -130,7 +131,7 @@ defmodule EMQXUmbrella.MixProject do defp enterprise_deps(_profile_info = %{edition_type: :enterprise}) do [ {:hstreamdb_erl, github: "hstreamdb/hstreamdb_erl", tag: "0.2.5"}, - {:influxdb, github: "emqx/influxdb-client-erl", tag: "1.1.7", override: true}, + {:influxdb, github: "emqx/influxdb-client-erl", tag: "1.1.9", override: true}, {:wolff, github: "kafka4beam/wolff", tag: "1.7.5"}, {:kafka_protocol, github: "kafka4beam/kafka_protocol", tag: "4.1.2", override: true}, {:brod_gssapi, github: "kafka4beam/brod_gssapi", tag: "v0.1.0-rc1"}, @@ -372,9 +373,11 @@ defmodule EMQXUmbrella.MixProject do Path.join(etc, "certs") ) + profile = System.get_env("MIX_ENV") + Mix.Generator.copy_file( - "apps/emqx_dashboard/etc/emqx.conf.en.example", - Path.join(etc, "emqx-example.conf"), + "_build/docgen/#{profile}/emqx.conf.en.example", + Path.join(etc, "emqx.conf.example"), force: overwrite? ) diff --git a/rebar.config b/rebar.config index 89cc7923d..7d6d0e404 100644 --- a/rebar.config +++ b/rebar.config @@ -55,8 +55,8 @@ , {jiffy, {git, "https://github.com/emqx/jiffy", {tag, "1.0.5"}}} , {cowboy, {git, "https://github.com/emqx/cowboy", {tag, "2.9.0"}}} , {esockd, {git, "https://github.com/emqx/esockd", {tag, "5.9.4"}}} - , {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.7.2-emqx-7"}}} - , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.3"}}} + , {rocksdb, {git, "https://github.com/emqx/erlang-rocksdb", {tag, "1.7.2-emqx-9"}}} + , {ekka, {git, "https://github.com/emqx/ekka", {tag, "0.14.4"}}} , {gen_rpc, {git, "https://github.com/emqx/gen_rpc", {tag, "2.8.1"}}} , {grpc, {git, "https://github.com/emqx/grpc-erl", {tag, "0.6.7"}}} , {minirest, {git, "https://github.com/emqx/minirest", {tag, "1.3.8"}}} @@ -74,6 +74,7 @@ , {esasl, {git, "https://github.com/emqx/esasl", {tag, "0.2.0"}}} , {jose, {git, "https://github.com/potatosalad/erlang-jose", {tag, "1.11.2"}}} , {telemetry, "1.1.0"} + , {hackney, {git, "https://github.com/benoitc/hackney", {tag, "1.18.1"}}} ]}. {xref_ignores, diff --git a/rebar.config.erl b/rebar.config.erl index 349770487..e976d7729 100644 --- a/rebar.config.erl +++ b/rebar.config.erl @@ -462,7 +462,8 @@ etc_overlay(ReleaseType, Edition) -> [ {mkdir, "etc/"}, {copy, "{{base_dir}}/lib/emqx/etc/certs", "etc/"}, - {copy, "apps/emqx_dashboard/etc/emqx.conf.en.example", "etc/emqx-example.conf"} + {copy, "_build/docgen/" ++ name(Edition) ++ "/emqx.conf.en.example", + "etc/emqx.conf.example"} ] ++ lists:map( fun @@ -598,3 +599,6 @@ list_dir(Dir) -> false -> [] end. + +name(ce) -> "emqx"; +name(ee) -> "emqx-enterprise". diff --git a/scripts/ct/run.sh b/scripts/ct/run.sh index ba6d1f91f..b3c424ea1 100755 --- a/scripts/ct/run.sh +++ b/scripts/ct/run.sh @@ -17,7 +17,7 @@ help() { echo "--only-up: Only start the testbed but do not run CT" echo "--keep-up: Keep the testbed running after CT" echo "--ci: Set this flag in GitHub action to enforce no tests are skipped" - echo "--" If any, all args after '--' are passed to rebar3 ct + echo "--: If any, all args after '--' are passed to rebar3 ct" echo " otherwise it runs the entire app's CT" } @@ -161,6 +161,12 @@ for dep in ${CT_DEPS}; do ;; tdengine) FILES+=( '.ci/docker-compose-file/docker-compose-tdengine-restful.yaml' ) + ;; + clickhouse) + FILES+=( '.ci/docker-compose-file/docker-compose-clickhouse.yaml' ) + ;; + dynamo) + FILES+=( '.ci/docker-compose-file/docker-compose-dynamo.yaml' ) ;; *) echo "unknown_ct_dependency $dep" @@ -194,7 +200,7 @@ if [[ -t 1 ]]; then fi function restore_ownership { - if ! sudo chown -R "$ORIG_UID_GID" . >/dev/null 2>&1; then + if [[ -n ${EMQX_TEST_DO_NOT_RUN_SUDO+x} ]] || ! sudo chown -R "$ORIG_UID_GID" . >/dev/null 2>&1; then docker exec -i $TTY -u root:root "$ERLANG_CONTAINER" bash -c "chown -R $ORIG_UID_GID /emqx" >/dev/null 2>&1 || true fi } diff --git a/scripts/rel/cut.sh b/scripts/rel/cut.sh index b12c86519..19a03a98d 100755 --- a/scripts/rel/cut.sh +++ b/scripts/rel/cut.sh @@ -35,11 +35,10 @@ options: in addition to regular : one -NOTE: For 5.0 series the current working branch must be 'release-50' for opensource edition - and 'release-e50' for enterprise edition. +NOTE: For 5.0 series the current working branch must be 'release-50' --.--[ master ]---------------------------.-----------.--- \\ / - \`---[release-50]----(v5.0.12 | e5.0.0) + \`---[release-50]----(v5.0.20 | e5.0.1) EOF } diff --git a/scripts/spellcheck/dicts/emqx.txt b/scripts/spellcheck/dicts/emqx.txt index 107ae1f53..5975ebd1b 100644 --- a/scripts/spellcheck/dicts/emqx.txt +++ b/scripts/spellcheck/dicts/emqx.txt @@ -267,3 +267,5 @@ keytab jq nif TDengine +clickhouse +FormatType