diff --git a/.github/problem-matchers/compiler-non-source.json b/.github/problem-matchers/compiler-non-source.json new file mode 100644 index 0000000000000..5a21a7dcbb594 --- /dev/null +++ b/.github/problem-matchers/compiler-non-source.json @@ -0,0 +1,17 @@ +{ + "problemMatcher": [ + { + "__comment_owner": "match compiler warning/error lines not from source", + "owner": "compiler-non-source", + "pattern": [ + { + "__comment_regexp1": "clang: warning: argument unused during compilation: '-march=armv7-a' [-Wunused-command-line-argument]", + "__comment_regexp2": "ld.lld: warning: lld uses blx instruction, no object with architecture supporting feature detected", + "regexp": "^(?:[^:]+): (?:fatal\\s+)?(warning|error):\\s+(.*)$", + "severity": 1, + "message": 2 + } + ] + } + ] +} diff --git a/.github/problem-matchers/compiler-source.json b/.github/problem-matchers/compiler-source.json new file mode 100644 index 0000000000000..e719f671e8dc7 --- /dev/null +++ b/.github/problem-matchers/compiler-source.json @@ -0,0 +1,17 @@ +{ + "problemMatcher": [ + { + "owner": "gcc-problem-matcher", + "pattern": [ + { + "regexp": "^(?:/linux/)?(.*):(\\d+):(\\d+):\\s+(?:fatal\\s+)?(warning|error):\\s+(.*)$", + "file": 1, + "line": 2, + "column": 3, + "severity": 4, + "message": 5 + } + ] + } + ] +} diff --git a/.github/problem-matchers/sparse.json b/.github/problem-matchers/sparse.json new file mode 100644 index 0000000000000..83c626e5448e4 --- /dev/null +++ b/.github/problem-matchers/sparse.json @@ -0,0 +1,17 @@ +{ + "problemMatcher": [ + { + "owner": "powerpc-sparse", + "pattern": [ + { + "regexp": "^\\+(?:/linux/)?(.*):(\\d+|XX):(\\d+|XX):\\s+(error|warning):\\s+(.*)$", + "file": 1, + "line": 2, + "column": 3, + "severity": 4, + "message": 5 + } + ] + } + ] +} diff --git a/.github/workflows/powerpc-allconfig.yml b/.github/workflows/powerpc-allconfig.yml new file mode 100644 index 0000000000000..d85a980eb4404 --- /dev/null +++ b/.github/workflows/powerpc-allconfig.yml @@ -0,0 +1,90 @@ +name: powerpc/allconfig + +# Controls when the action will run. +on: + # This allows the build to be triggered manually via the github UI. + workflow_dispatch: + + push: + # This triggers the build on a push to merge-test only + branches: + - 'merge-test' + +jobs: + kernel: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + include: + # ppc64le allmod and allyes + - subarch: ppc64le + defconfig: ppc64le_allmodconfig + image: fedora-37 + - subarch: ppc64le + defconfig: allyesconfig + image: fedora-37 + merge_config: /linux/arch/powerpc/configs/le.config + # ppc64 allmod and allyes + - subarch: ppc64 + defconfig: allmodconfig + image: fedora-37 + - subarch: ppc64 + defconfig: allyesconfig + image: fedora-37 + # ppc32 allmod + - subarch: ppc + defconfig: ppc32_allmodconfig + image: fedora-37 + # ppc64 book3e allmod + - subarch: ppc64 + defconfig: ppc64_book3e_allmodconfig + image: fedora-37 + + env: + ARCH: powerpc + TARGET: kernel + CCACHE: 1 + SUBARCH: ${{ matrix.subarch }} + IMAGE: ${{ matrix.image }} + DEFCONFIG: ${{ matrix.defconfig }} + MERGE_CONFIG: /linux/arch/powerpc/configs/disable-werror.config,${{ matrix.merge_config }} + + steps: + - uses: actions/checkout@v3 + + - name: Register problem matchers + run: | + echo "::add-matcher::.github/problem-matchers/compiler-source.json" + echo "::add-matcher::.github/problem-matchers/compiler-non-source.json" + + - name: Load ccache + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ${{ matrix.image }}-${{ matrix.subarch }}-${{ matrix.defconfig }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build + run: | + mkdir -p ~/.ccache + ./arch/powerpc/tools/ci-build.sh + + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.defconfig }}-${{ matrix.image }} + path: | + ~/output/vmlinux + ~/output/.config + ~/output/System.map + ~/output/modules.tar.bz2 + ~/output/arch/powerpc/boot/zImage + ~/output/arch/powerpc/boot/uImage diff --git a/.github/workflows/powerpc-clang.yml b/.github/workflows/powerpc-clang.yml new file mode 100644 index 0000000000000..e3cc6dbd3b3dd --- /dev/null +++ b/.github/workflows/powerpc-clang.yml @@ -0,0 +1,84 @@ +name: powerpc/clang + +# Controls when the action will run. +on: + # This allows the build to be triggered manually via the github UI. + workflow_dispatch: + + push: + # This triggers the build on a push to any branch + branches: + - '**' + # As long as one of these paths matches + paths: + - '!tools/**' # ignore tools + - '!samples/**' # ignore samples + - '!Documentation/**' # ignore Documentation + - '!arch/**' # ignore arch changes + - 'arch/powerpc/**' # but not arch/powerpc + - 'arch/Kconfig' # or common bits in arch + - '**' # anything else triggers a build + +jobs: + kernel: + runs-on: ubuntu-latest + + strategy: + fail-fast: false + matrix: + defconfig: [ppc64, corenet64_smp, pmac32, ppc40x, mpc885_ads] + image: [fedora-37] + subarch: [ppc64] + include: + - subarch: ppc64le + defconfig: ppc64le + image: fedora-37 + + env: + CLANG: 1 + LLVM_IAS: 0 + ARCH: powerpc + TARGET: kernel + CCACHE: 1 + SUBARCH: ${{ matrix.subarch }} + IMAGE: ${{ matrix.image }} + DEFCONFIG: ${{ matrix.defconfig }} + MERGE_CONFIG: /linux/arch/powerpc/configs/disable-werror.config + + steps: + - uses: actions/checkout@v3 + + - name: Register problem matchers + run: | + echo "::add-matcher::.github/problem-matchers/compiler-source.json" + echo "::add-matcher::.github/problem-matchers/compiler-non-source.json" + + - name: Load ccache + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ${{ matrix.image }}-${{ matrix.subarch }}-${{ matrix.defconfig }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build + run: | + mkdir -p ~/.ccache + ./arch/powerpc/tools/ci-build.sh + + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.defconfig }}-${{ matrix.image }} + path: | + ~/output/vmlinux + ~/output/.config + ~/output/System.map + ~/output/modules.tar.bz2 + ~/output/arch/powerpc/boot/zImage + ~/output/arch/powerpc/boot/uImage diff --git a/.github/workflows/powerpc-extrawarn.yml b/.github/workflows/powerpc-extrawarn.yml new file mode 100644 index 0000000000000..5ce7965dacf78 --- /dev/null +++ b/.github/workflows/powerpc-extrawarn.yml @@ -0,0 +1,71 @@ +name: powerpc/extrawarn + +# Controls when the action will run. +on: + # Only when triggered manually via the github UI. + workflow_dispatch: + +jobs: + kernel: + runs-on: ubuntu-latest + + strategy: + matrix: + defconfig: [ppc64, corenet64_smp, pmac32, ppc40x, ppc44x, mpc885_ads, corenet32_smp] + image: [fedora-37, korg-5.5.0] + subarch: [ppc64] + include: + - subarch: ppc64le + defconfig: ppc64le + image: korg-5.5.0 + - subarch: ppc64le + defconfig: ppc64le + image: fedora-37 + + env: + ARCH: powerpc + TARGET: kernel + CCACHE: 1 + SUBARCH: ${{ matrix.subarch }} + IMAGE: ${{ matrix.image }} + DEFCONFIG: ${{ matrix.defconfig }} + MERGE_CONFIG: /linux/arch/powerpc/configs/disable-werror.config + KBUILD_EXTRA_WARN: 1 + + steps: + - uses: actions/checkout@v3 + + - name: Register problem matchers + run: | + echo "::add-matcher::.github/problem-matchers/compiler-source.json" + echo "::add-matcher::.github/problem-matchers/compiler-non-source.json" + + - name: Load ccache + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ${{ matrix.image }}-${{ matrix.subarch }}-${{ matrix.defconfig }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build + run: | + mkdir -p ~/.ccache + ./arch/powerpc/tools/ci-build.sh + + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.defconfig }}-${{ matrix.image }} + path: | + ~/output/vmlinux + ~/output/.config + ~/output/System.map + ~/output/modules.tar.bz2 + ~/output/arch/powerpc/boot/zImage + ~/output/arch/powerpc/boot/uImage diff --git a/.github/workflows/powerpc-kernel+qemu.yml b/.github/workflows/powerpc-kernel+qemu.yml new file mode 100644 index 0000000000000..7b5c44e225c51 --- /dev/null +++ b/.github/workflows/powerpc-kernel+qemu.yml @@ -0,0 +1,219 @@ +name: powerpc/kernel+qemu + +# Controls when the action will run. +on: + # This allows the build to be triggered manually via the github UI. + workflow_dispatch: + + push: + # This triggers the build on a push to any branch + branches: + - '**' + # As long as one of these paths matches + paths: + - '!tools/**' # ignore tools + - '!samples/**' # ignore samples + - '!Documentation/**' # ignore Documentation + - '!arch/**' # ignore arch changes + - 'arch/powerpc/**' # but not arch/powerpc + - 'arch/Kconfig' # or common bits in arch + - '**' # anything else triggers a build + +jobs: + kernel: + runs-on: ubuntu-latest + + strategy: + matrix: + defconfig: [ppc64_defconfig, ppc40x_defconfig, mpc885_ads_defconfig] + image: [fedora-38, korg-5.5.0] + include: + # ppc64le_guest_defconfig + - subarch: ppc64le + defconfig: ppc64le_guest_defconfig + image: fedora-38 + - subarch: ppc64le + defconfig: ppc64le_guest_defconfig + image: korg-5.5.0 + + # ppc44x + - defconfig: ppc44x_defconfig + merge_config: /linux/arch/powerpc/configs/ppc44x-qemu.config + image: fedora-38 + - defconfig: ppc44x_defconfig + merge_config: /linux/arch/powerpc/configs/ppc44x-qemu.config + image: korg-5.5.0 + + # corenet64_smp + - defconfig: corenet64_smp_defconfig + image: fedora-38 + - defconfig: corenet64_smp_defconfig + image: korg-5.5.0 + + # g5 + - defconfig: g5_defconfig + merge_config: /linux/arch/powerpc/configs/g5-qemu.config + image: fedora-38 + - defconfig: g5_defconfig + merge_config: /linux/arch/powerpc/configs/g5-qemu.config + image: korg-5.5.0 + + # pmac32 + - defconfig: pmac32_defconfig + merge_config: /linux/arch/powerpc/configs/pmac32-qemu.config + image: fedora-38 + - defconfig: pmac32_defconfig + merge_config: /linux/arch/powerpc/configs/pmac32-qemu.config + image: korg-5.5.0 + + - defconfig: corenet32_smp_defconfig + image: fedora-38 + + env: + ARCH: powerpc + TARGET: kernel + CCACHE: 1 + SUBARCH: ${{ matrix.subarch }} + IMAGE: ${{ matrix.image }} + DEFCONFIG: ${{ matrix.defconfig }} + MERGE_CONFIG: ${{ matrix.merge_config }} + + steps: + - uses: actions/checkout@v3 + + - name: Register problem matchers + run: | + echo "::add-matcher::.github/problem-matchers/compiler-source.json" + echo "::add-matcher::.github/problem-matchers/compiler-non-source.json" + + - name: Load ccache + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ${{ matrix.image }}-${{ matrix.subarch }}-${{ matrix.defconfig }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build + run: | + mkdir -p ~/.ccache + ./arch/powerpc/tools/ci-build.sh + + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.defconfig }}-${{ matrix.image }} + path: | + ~/output/vmlinux + ~/output/.config + ~/output/System.map + ~/output/modules.tar.bz2 + ~/output/arch/powerpc/boot/zImage + ~/output/arch/powerpc/boot/uImage + ~/output/include/config/kernel.release + + boot: + runs-on: ubuntu-latest + needs: kernel + + strategy: + matrix: + include: + - defconfig: ppc64le_guest_defconfig + machine: pseries+p8+tcg + machine_2: pseries+p9+tcg + packages: qemu-system-ppc64 + rootfs: ppc64le-rootfs.cpio.gz + old-image: korg-5.5.0 + new-image: fedora-38 + + - defconfig: ppc64le_guest_defconfig + machine: powernv+p8+tcg + machine_2: powernv+p9+tcg + packages: qemu-system-ppc64 + rootfs: ppc64le-rootfs.cpio.gz + old-image: korg-5.5.0 + new-image: fedora-38 + + - defconfig: ppc44x_defconfig + machine: 44x + packages: qemu-system-ppc + rootfs: ppc-rootfs.cpio.gz + old-image: korg-5.5.0 + new-image: fedora-38 + + - defconfig: corenet64_smp_defconfig + machine: ppc64e + machine_2: ppc64e+compat + packages: qemu-system-ppc64 + rootfs: ppc64-novsx-rootfs.cpio.gz ppc-rootfs.cpio.gz + old-image: korg-5.5.0 + new-image: fedora-38 + + - defconfig: g5_defconfig + machine: g5 + packages: qemu-system-ppc64 openbios-ppc + rootfs: ppc64-rootfs.cpio.gz + old-image: korg-5.5.0 + new-image: fedora-38 + + - defconfig: pmac32_defconfig + machine: mac99 + packages: qemu-system-ppc openbios-ppc + rootfs: ppc-rootfs.cpio.gz + old-image: korg-5.5.0 + new-image: fedora-38 + + steps: + - uses: actions/checkout@v3 + with: + repository: linuxppc/ci-scripts + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Download root disk + run: make -C root-disks ${{ matrix.rootfs }} + + - name: Set root disk path + run: echo "ROOT_DISK_PATH=$PWD/root-disks" >> $GITHUB_ENV + + - name: APT update + run: sudo apt update + + - name: Install dependencies + run: sudo apt install -y ${{ matrix.packages }} python3-pexpect python3-termcolor python3-yaml + + - uses: actions/download-artifact@v3 + with: + name: ${{ matrix.defconfig }}-${{ matrix.new-image }} + + - name: Disable network tests + run: echo "QEMU_NET_TESTS=0" >> $GITHUB_ENV + + - name: Run qemu-${{ matrix.machine }} with ${{ matrix.new-image }} build kernel + run: ./scripts/boot/qemu-${{ matrix.machine }} + + - name: Run qemu-${{ matrix.machine_2 }} with ${{ matrix.new-image }} build kernel + run: ./scripts/boot/qemu-${{ matrix.machine_2 }} + if: matrix.machine_2 != '' + + - uses: actions/download-artifact@v3 + with: + name: ${{ matrix.defconfig }}-${{ matrix.old-image }} + + - name: Run qemu-${{ matrix.machine }} with ${{ matrix.old-image }} build kernel + run: ./scripts/boot/qemu-${{ matrix.machine }} + + - name: Run qemu-${{ matrix.machine_2 }} with ${{ matrix.old-image }} build kernel + run: ./scripts/boot/qemu-${{ matrix.machine_2 }} + if: matrix.machine_2 != '' diff --git a/.github/workflows/powerpc-perf.yml b/.github/workflows/powerpc-perf.yml new file mode 100644 index 0000000000000..e25f6270f5ed2 --- /dev/null +++ b/.github/workflows/powerpc-perf.yml @@ -0,0 +1,71 @@ +name: powerpc/perf + +# Controls when the action will run. +on: + # This allows the build to be triggered manually via the github UI. + workflow_dispatch: + + push: + # This triggers the build on a push to any branch + branches: + - '**' + # As long as one of these paths matches + paths: + # Only build if perf or other pieces it uses have been modified + - 'tools/perf/**' + - 'tools/arch/**' + - 'tools/build/**' + - 'tools/include/**' + - 'tools/lib/**' + - 'tools/scripts/**' + # Change to workflow triggers a build + - '.github/workflows/powerpc-perf.yml' + +jobs: + perf: + runs-on: ubuntu-latest + + strategy: + matrix: + image: [ubuntu-22.04, ubuntu-20.04, ubuntu-18.04] + subarch: [ppc64, ppc64le] + + env: + ARCH: powerpc + TARGET: perf + CCACHE: 1 + SUBARCH: ${{ matrix.subarch }} + IMAGE: ${{ matrix.image }} + + steps: + - uses: actions/checkout@v3 + + - name: Register problem matchers + run: | + echo "::add-matcher::.github/problem-matchers/compiler-source.json" + echo "::add-matcher::.github/problem-matchers/compiler-non-source.json" + + - name: Load ccache + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ${{ matrix.image }}-${{ matrix.subarch }}-${{ matrix.defconfig }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build + run: | + mkdir -p ~/.ccache + ./arch/powerpc/tools/ci-build.sh + + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.subarch }}-${{ matrix.image }} + path: | + ~/output diff --git a/.github/workflows/powerpc-ppctests.yml b/.github/workflows/powerpc-ppctests.yml new file mode 100644 index 0000000000000..9caa8fdddfc27 --- /dev/null +++ b/.github/workflows/powerpc-ppctests.yml @@ -0,0 +1,69 @@ +name: powerpc/ppctests + +# Controls when the action will run. +on: + # This allows the build to be triggered manually via the github UI. + workflow_dispatch: + + push: + # This triggers the build on a push to any branch + branches: + - '**' + # As long as one of these paths matches + paths: + # Generic selftests changes might affect us so match all of selftests + - 'tools/testing/selftests/**' + # Some files in arch are symlinked by selftests + - 'arch/powerpc/**' + # Change to workflow triggers a build + - '.github/workflows/powerpc-ppctests.yml' + +jobs: + ppctests: + runs-on: ubuntu-latest + + strategy: + matrix: + image: [ubuntu-22.04, ubuntu-20.04, ubuntu-18.04, ubuntu-16.04] + subarch: [ppc64, ppc64le] + + env: + ARCH: powerpc + TARGET: ppctests + CCACHE: 1 + INSTALL: 1 + SUBARCH: ${{ matrix.subarch }} + IMAGE: ${{ matrix.image }} + + steps: + - uses: actions/checkout@v3 + + - name: Register problem matchers + run: | + echo "::add-matcher::.github/problem-matchers/compiler-source.json" + echo "::add-matcher::.github/problem-matchers/compiler-non-source.json" + + - name: Load ccache + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ${{ matrix.image }}-${{ matrix.subarch }}-${{ matrix.defconfig }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build + run: | + mkdir -p ~/.ccache + ./arch/powerpc/tools/ci-build.sh + + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.subarch }}-${{ matrix.image }} + path: | + ~/output/install diff --git a/.github/workflows/powerpc-selftests.yml b/.github/workflows/powerpc-selftests.yml new file mode 100644 index 0000000000000..64c1b4e7b7cf0 --- /dev/null +++ b/.github/workflows/powerpc-selftests.yml @@ -0,0 +1,69 @@ +name: powerpc/selftests + +# Controls when the action will run. +on: + # This allows the build to be triggered manually via the github UI. + workflow_dispatch: + + push: + # This triggers the build on a push to any branch + branches: + - '**' + # As long as one of these paths matches + paths: + # Generic selftests changes might affect us so match all of selftests + - 'tools/testing/selftests/**' + # Some files in arch are symlinked by selftests + - 'arch/powerpc/**' + # Change to workflow triggers a build + - '.github/workflows/powerpc-selftests.yml' + +jobs: + selftests: + runs-on: ubuntu-latest + + strategy: + matrix: + image: [ubuntu-22.04, ubuntu-20.04, ubuntu-18.04, ubuntu-16.04] + subarch: [ppc64, ppc64le] + + env: + ARCH: powerpc + TARGET: selftests + CCACHE: 1 + INSTALL: 1 + SUBARCH: ${{ matrix.subarch }} + IMAGE: ${{ matrix.image }} + + steps: + - uses: actions/checkout@v3 + + - name: Register problem matchers + run: | + echo "::add-matcher::.github/problem-matchers/compiler-source.json" + echo "::add-matcher::.github/problem-matchers/compiler-non-source.json" + + - name: Load ccache + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ${{ matrix.image }}-${{ matrix.subarch }}-${{ matrix.defconfig }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build + run: | + mkdir -p ~/.ccache + ./arch/powerpc/tools/ci-build.sh + + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: ${{ matrix.subarch }}-${{ matrix.image }} + path: | + ~/output/install diff --git a/.github/workflows/powerpc-sparse.yml b/.github/workflows/powerpc-sparse.yml new file mode 100644 index 0000000000000..e8561818740e4 --- /dev/null +++ b/.github/workflows/powerpc-sparse.yml @@ -0,0 +1,103 @@ +name: powerpc/sparse + +# Controls when the action will run. +on: + # This allows the build to be triggered manually via the github UI. + workflow_dispatch: + + push: + # This triggers the build on a push to any branch named ci/powerpc/ + branches: + - '**' + # As long as one of these paths matches + paths: + - '!tools/**' # ignore tools + - '!samples/**' # ignore samples + - '!Documentation/**' # ignore Documentation + - '!arch/**' # ignore arch changes + - 'arch/powerpc/**' # but not arch/powerpc + - 'arch/Kconfig' # or common bits in arch + - '**' # anything else triggers a build + +jobs: + sparse: + runs-on: ubuntu-latest + + strategy: + matrix: + defconfig: [ppc64, pmac32, mpc885_ads_defconfig] + image: [fedora-37] + subarch: [ppc64] + include: + - subarch: ppc64le + defconfig: ppc64le + image: fedora-37 + + env: + ARCH: powerpc + TARGET: kernel + CCACHE: 1 + SPARSE: 2 + SUBARCH: ${{ matrix.subarch }} + IMAGE: ${{ matrix.image }} + DEFCONFIG: ${{ matrix.defconfig }} + + steps: + - name: Checkout + uses: actions/checkout@v3 + + # NB this is before the build on merge, so all errors are detected + # On other branches we ignore these and only report sparse diff results below + - name: Register problem matchers + if: github.ref_name == 'merge' + run: | + echo "::add-matcher::.github/problem-matchers/compiler-source.json" + echo "::add-matcher::.github/problem-matchers/compiler-non-source.json" + + - name: Load ccache + uses: actions/cache@v3 + with: + path: ~/.ccache + key: ${{ matrix.image }}-${{ matrix.subarch }}-${{ matrix.defconfig }} + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Build + run: | + mkdir -p ~/.ccache + ./arch/powerpc/tools/ci-build.sh + + - name: Get sparse results from base tree + if: github.ref_name != 'merge' + continue-on-error: true + uses: dawidd6/action-download-artifact@v2 + with: + workflow: powerpc-sparse.yml + workflow_conclusion: success + branch: merge # Requires the merge branch to be built once before this will work + name: sparse-${{ matrix.defconfig }}-${{ matrix.image }}.log + + # NB this is after the build on non-merge, so only errors from the sparse diff are detected + - name: Register problem matcher + if: github.ref_name != 'merge' + run: | + echo "::add-matcher::.github/problem-matchers/sparse.json" + + - name: Compare sparse results with base + if: github.ref_name != 'merge' + run: | + bash -c 'if [ ! -f sparse.log ]; then cp ~/output/sparse.log .; fi' + ./arch/powerpc/tools/smart-sparse-diff.py sparse.log ~/output/sparse.log | tee -a ~/output/sparse-diff.log + + - name: Archive artifacts + uses: actions/upload-artifact@v3 + with: + name: sparse-${{ matrix.defconfig }}-${{ matrix.image }}.log + path: | + ~/output/sparse.log + ~/output/sparse-diff.log diff --git a/.gitignore b/.gitignore index 0bbae167bf93e..b2c1d38a19889 100644 --- a/.gitignore +++ b/.gitignore @@ -95,6 +95,7 @@ modules.order # # We don't want to ignore the following even if they are dot-files # +!.github !.clang-format !.cocciconfig !.get_maintainer.ignore diff --git a/arch/powerpc/configs/disable-werror.config b/arch/powerpc/configs/disable-werror.config index 7776b91da37f1..ca1019e1e7223 100644 --- a/arch/powerpc/configs/disable-werror.config +++ b/arch/powerpc/configs/disable-werror.config @@ -1,2 +1,3 @@ # Help: Disable -Werror CONFIG_PPC_DISABLE_WERROR=y +CONFIG_WERROR=n diff --git a/arch/powerpc/configs/g5-qemu.config b/arch/powerpc/configs/g5-qemu.config new file mode 100644 index 0000000000000..3ae1c8a8bc61f --- /dev/null +++ b/arch/powerpc/configs/g5-qemu.config @@ -0,0 +1,2 @@ +CONFIG_SERIAL_PMACZILOG=y +CONFIG_SERIAL_PMACZILOG_CONSOLE=y diff --git a/arch/powerpc/configs/pmac32-qemu.config b/arch/powerpc/configs/pmac32-qemu.config new file mode 100644 index 0000000000000..81d3a24b2169e --- /dev/null +++ b/arch/powerpc/configs/pmac32-qemu.config @@ -0,0 +1,5 @@ +CONFIG_SERIAL_PMACZILOG=y +CONFIG_SERIAL_PMACZILOG_CONSOLE=y +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y +CONFIG_DEBUG_ATOMIC_SLEEP=y diff --git a/arch/powerpc/configs/ppc44x-qemu.config b/arch/powerpc/configs/ppc44x-qemu.config new file mode 100644 index 0000000000000..5e9cf983acc7d --- /dev/null +++ b/arch/powerpc/configs/ppc44x-qemu.config @@ -0,0 +1,2 @@ +CONFIG_DEVTMPFS=y +CONFIG_DEVTMPFS_MOUNT=y diff --git a/arch/powerpc/tools/ci-build.sh b/arch/powerpc/tools/ci-build.sh new file mode 100755 index 0000000000000..1b3f6a391b4f5 --- /dev/null +++ b/arch/powerpc/tools/ci-build.sh @@ -0,0 +1,84 @@ +#!/bin/bash + +if [[ -z "$TARGET" || -z "$IMAGE" ]]; then + echo "Error: required environment variables not set!" + exit 1 +fi + +cmd="docker run --rm " +cmd+="--network none " +cmd+="-w /linux " + +linux_dir=$(realpath $(dirname $0))/../../../ +cmd+="-v $linux_dir:/linux:ro " + +cmd+="-e ARCH " +cmd+="-e JFACTOR=$(nproc) " +cmd+="-e KBUILD_BUILD_TIMESTAMP=$(date +%Y-%m-%d) " +cmd+="-e CLANG " +cmd+="-e LLVM_IAS " +cmd+="-e SPARSE " + +if [[ -n "$MODULES" ]]; then + cmd+="-e MODULES=$MODULES " +fi + +if [[ -n "$DEFCONFIG" ]]; then + if [[ $DEFCONFIG != *config ]]; then + DEFCONFIG=${DEFCONFIG}_defconfig + fi + + cmd+="-e DEFCONFIG=${DEFCONFIG} " +fi + +if [[ -n "$MERGE_CONFIG" ]]; then + cmd+="-e MERGE_CONFIG=$MERGE_CONFIG " +fi + +if [[ "$SUBARCH" == "ppc64le" ]]; then + cross="powerpc64le-linux-gnu-" +else + cross="powerpc-linux-gnu-" +fi +cmd+="-e CROSS_COMPILE=$cross " + +mkdir -p $HOME/output +cmd+="-v $HOME/output:/output:rw " + +user=$(stat -c "%u:%g" $HOME/output) +cmd+="-u $user " + +if [[ -n "$CCACHE" ]]; then + cmd+="-v $HOME/.ccache:/ccache:rw " + cmd+="-e CCACHE_DIR=/ccache " + cmd+="-e CCACHE=1 " +fi + +if [[ -n "$TARGETS" ]]; then + cmd+="-e TARGETS=$TARGETS " +fi + +if [[ -n "$INSTALL" ]]; then + cmd+="-e INSTALL=$INSTALL " +fi + +if [[ "$TARGET" == "kernel" ]]; then + cmd+="-e QUIET=1 " +fi + +if [[ -n $KBUILD_EXTRA_WARN ]]; then + cmd+="-e KBUILD_EXTRA_WARN=$KBUILD_EXTRA_WARN " +fi + +cmd+="ghcr.io/linuxppc/build:$IMAGE-$(uname -m) " +cmd+="/bin/container-build.sh $TARGET" + +(set -x; $cmd) + +rc=$? + +if [[ -n "$SPARSE" ]]; then + cat $HOME/output/sparse.log +fi + +exit $rc diff --git a/arch/powerpc/tools/smart-sparse-diff.py b/arch/powerpc/tools/smart-sparse-diff.py new file mode 100755 index 0000000000000..9991fde5a5a0d --- /dev/null +++ b/arch/powerpc/tools/smart-sparse-diff.py @@ -0,0 +1,314 @@ +#!/usr/bin/python3 +import sys +from typing import Dict, List, Tuple, Any + +verbose = False +def vprint(*args, **kwargs): + if verbose: + print(*args, **kwargs) + +def deinterleave_by_file(log: str) -> Dict[str, List[List[str]]]: + # zeroeth pass: things get interleaved with multiprocess compilation + # so deinterleave it first + lines_by_file = {} # type: Dict[str, List[List[str]]] + for line in log.split("\n"): + parts = line.split(":") + + filename = parts[0] + + if filename not in lines_by_file: + lines_by_file[filename] = [] + + lines_by_file[filename] += [parts] + + return lines_by_file + +def concat_multi_line_warnings(split_lines: List[List[str]]) -> List[List[str]]: + # first pass: concatenate irritating things like: + #drivers/scsi/lpfc/lpfc_scsi.c:5606:30: warning: incorrect type in assignment (different base types) + #drivers/scsi/lpfc/lpfc_scsi.c:5606:30: expected int [signed] memory_flags + #drivers/scsi/lpfc/lpfc_scsi.c:5606:30: got restricted gfp_t + lines = [] # type: List[List[str]] + last_column = "" + last_line = "" + for parts in split_lines: + + if len(parts) < 4: + # this doesn't have enough parts to be a 'real' line. + # store it, don't attempt to process it now + # hopefully it will be removed in deduplication + lines += [parts] + continue + + (linenum, columnnum) = parts[1:3] + final_mandatory_part = parts[3].strip() + final_parts = ":".join(parts[3:]).strip() + + #vprint(line) + if (linenum != last_line) or \ + (last_column != columnnum): + # this is a different line and column, it cannot be a concatenation + lines += [parts] + #vprint("different f/l/c") + elif (final_mandatory_part == "warning") or \ + (final_mandatory_part == "error"): + # this has an explicit type: it is a new message + lines += [parts] + #vprint("explicit type") + else: + # looks like this is a continuation + last_line_parts = lines[-1] + last_line_parts[-1] += " " + final_parts + lines[-1] = last_line_parts + #vprint("concat: new last: " + str(lines[-1])) + + last_line = linenum + last_column = columnnum + + return lines + +def parse_log_by_file(log: str) -> Dict[str, List[List[str]]]: + + lines_by_file = deinterleave_by_file(log) + + concat_lines_by_file = {} + for filename in lines_by_file: + concat_lines_by_file[filename] = concat_multi_line_warnings(lines_by_file[filename]) + + return concat_lines_by_file + +def smart_filter(a: List[Any], + b: List[Any]) -> List[Any]: + res = [] # type: List[Any] + # two reasons we'd want to keep a line: + # it does not appear in the other at all + # it appears an unequal number of times (think headers) + # (to manage this in the report, only include it where it appears + # more times) + for l in a: + if l not in b: + res += [l] + else: + if len([ll for ll in a if ll == l]) > \ + len([ll for ll in b if ll == l]): + + # save only once + if l not in res: + res += [l] + return res + +def remove_exact_matching_lines(old_lines: List[List[str]], + new_lines: List[List[str]]) \ + -> Tuple[List[List[str]], List[List[str]]]: + + new_old = smart_filter(old_lines, new_lines) + new_new = smart_filter(new_lines, old_lines) + + if new_old == []: + new_old = None + + if new_new == []: + new_new = None + + return (new_old, new_new) + +def remove_lines_diff_by_only_line_no(old_lines, new_lines): + + # drop weird short lines + safe_old_lines = [] + for parts in old_lines: + if len(parts) < 4: + # this doesn't have enough parts to be a 'real' line. warn and proceed. + print('Found odd line "%s" in old file, ignoring.' % ':'.join(parts)) + else: + safe_old_lines += [parts] + safe_new_lines = [] + for parts in new_lines: + if len(parts) < 4: + # this doesn't have enough parts to be a 'real' line. warn and proceed. + print('Found odd line "%s" in new file, ignoring.' % ':'.join(parts)) + else: + safe_new_lines += [parts] + + old_wo_line = [":".join([l[0]] + l[2:]) for l in safe_old_lines] + new_wo_line = [":".join([l[0]] + l[2:]) for l in safe_new_lines] + + new_old = smart_filter(old_wo_line, new_wo_line) + new_new = smart_filter(new_wo_line, old_wo_line) + + old_parts = [l.split(':') for l in new_old] + new_parts = [l.split(':') for l in new_new] + + old_parts = [[l[0], 'XX'] + l[1:] for l in old_parts] + new_parts = [[l[0], 'XX'] + l[1:] for l in new_parts] + + if old_parts == []: + old_parts = None + if new_parts == []: + new_parts = None + + return (old_parts, new_parts) + +def format_one_warning(parts: List[str]) -> str: + return ":".join(parts) + + +def smart_diff(old_log: str, new_log: str + ) -> Tuple[List[str], List[str]]: + old_by_file = parse_log_by_file(old_log) + new_by_file = parse_log_by_file(new_log) + + # todo - this structure is helpful for progressive development and + # debugging, but is not very efficient + + # we now have 2x { filename: [list of warnings] } + # go to 1x { filename: (old warnings, new warnings) } + combined_warnings = {} + for filename in set(old_by_file.keys()) | set(new_by_file.keys()): + olds = None + if filename in old_by_file: + olds = old_by_file[filename] + + news = None + if filename in new_by_file: + news = new_by_file[filename] + + combined_warnings[filename] = (olds, news) + + only_new = {} + only_old = {} + # lets winnow out our lists a bit + changed = {} + for filename in combined_warnings: + (olds, news) = combined_warnings[filename] + + if news and not olds: + only_new[filename] = (olds, news) + elif olds and not news: + only_old[filename] = (olds, news) + elif not olds and not news: + print("Something weird going on with: " + filename) + else: + changed[filename] = (olds, news) + + vprint("After parsing:") + vprint("Only new warnings: " + str(len(only_new.keys()))) + vprint("Only old warnings: " + str(len(only_old.keys()))) + vprint("Changed: " + str(len(changed.keys()))) + + + # remove entire duplicated files + changed_1 = {} + for filename in changed: + (olds, news) = changed[filename] + if olds == news: + vprint("exact complete match drops: " + filename) + else: + changed_1[filename] = (olds, news) + + vprint("After removing exact file matches:") + vprint("Only new warnings: " + str(len(only_new.keys()))) + vprint("Only old warnings: " + str(len(only_old.keys()))) + vprint("Changed: " + str(len(changed_1.keys()))) + + # now, lets just try removing exact matching lines + changed_2 = {} + for filename in changed_1: + (olds, news) = changed_1[filename] + (olds, news) = remove_exact_matching_lines(olds, news) + if not olds and not news: + vprint("remove_exact_matching_lines completely matched: " + filename) + elif olds and not news: + only_old[filename] = (olds, news) + elif not olds and news: + only_new[filename] = (olds, news) + else: + changed_2[filename] = (olds, news) + + vprint("After removing exact line matches:") + vprint("Only new warnings: " + str(len(only_new.keys()))) + vprint("Only old warnings: " + str(len(only_old.keys()))) + vprint("Changed: " + str(len(changed_2.keys()))) + + # now, lets just try removing lines w/ matching column, diff line + changed_3 = {} + for filename in changed_2: + (olds, news) = remove_lines_diff_by_only_line_no(*changed_2[filename]) + if olds and news: + changed_3[filename] = (olds, news) + elif olds and not news: + only_old[filename] = (olds, news) + elif not olds and news: + only_new[filename] = (olds, news) + else: + vprint("diff by only line no removed: " + filename) + + vprint("After removing warnings differing in line number only (same column, message):") + vprint("Only new warnings: " + str(len(only_new.keys()))) + vprint("Only old warnings: " + str(len(only_old.keys()))) + vprint("Changed: " + str(len(changed_3.keys()))) + + #fn = list(changed_3.keys())[0] + #ch = changed_3[fn] + + # now lets format data for return + # I assume consumers (so far, just pretty-printing) is pretty unconcerned with + # getting the messages split up by file name. So let's flatten our dictionaries + # note that this doesn't flatten them properly yet - we get a list where each + # item represents a file, and each item is a list of warnings, and each warning + # is a list of parts. + removed_msgs = [only_old[fn][0] for fn in only_old] + added_msgs = [only_new[fn][1] for fn in only_new] + + # also, the whole concept of 'changed' - files with changed messages - + # is pretty unique to our analysis, so just flatten them out too + removed_msgs += [changed_3[fn][0] for fn in changed_3] + added_msgs += [changed_3[fn][1] for fn in changed_3] + + # lastly, rejoin on ":", flattening out the lists as we go. + removed_warns = [] # type: List[str] + for sublist in removed_msgs: + for msg in sublist: + removed_warns += [format_one_warning(msg)] + added_warns = [] # type: List[str] + for sublist in added_msgs: + for msg in sublist: + added_warns += [format_one_warning(msg)] + + return (removed_warns, added_warns) + + +def usage(exec_name: str) -> None: + print("Usage: %s " % exec_name) + print(" attempt a smart diff between sparse logs in oldfile and newfile") + exit(1) + +if __name__ == '__main__': + if len(sys.argv) != 3: + usage(sys.argv[0]) + + try: + with open(sys.argv[1], 'r') as old_file: + old_log = old_file.read() + except: + print("Error reading old log file %s" % old_file) + exit(1) + + try: + with open(sys.argv[2], 'r') as new_file: + new_log = new_file.read() + except: + print("Error reading new log file %s" % new_file) + exit(1) + + (removed, added) = smart_diff(old_log, new_log) + + lines = [] # type: List[str] + lines += ['-' + w for w in removed] + lines += ['+' + w for w in added] + + # sort by message, not including +/- + lines.sort(key=lambda x: x[1:]) + for l in lines: + print(l) + diff --git a/block/blk-mq.c b/block/blk-mq.c index ec922c6bccbe2..1fafd54dce3cb 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -4405,11 +4405,8 @@ static int blk_mq_realloc_tag_set_tags(struct blk_mq_tag_set *set, struct blk_mq_tags **new_tags; int i; - if (set->nr_hw_queues >= new_nr_hw_queues) { - for (i = new_nr_hw_queues; i < set->nr_hw_queues; i++) - __blk_mq_free_map_and_rqs(set, i); + if (set->nr_hw_queues >= new_nr_hw_queues) goto done; - } new_tags = kcalloc_node(new_nr_hw_queues, sizeof(struct blk_mq_tags *), GFP_KERNEL, set->numa_node); @@ -4719,7 +4716,8 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, { struct request_queue *q; LIST_HEAD(head); - int prev_nr_hw_queues; + int prev_nr_hw_queues = set->nr_hw_queues; + int i; lockdep_assert_held(&set->tag_list_lock); @@ -4746,7 +4744,6 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, blk_mq_sysfs_unregister_hctxs(q); } - prev_nr_hw_queues = set->nr_hw_queues; if (blk_mq_realloc_tag_set_tags(set, nr_hw_queues) < 0) goto reregister; @@ -4781,6 +4778,10 @@ static void __blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, list_for_each_entry(q, &set->tag_list, tag_set_list) blk_mq_unfreeze_queue(q); + + /* Free the excess tags when nr_hw_queues shrink. */ + for (i = set->nr_hw_queues; i < prev_nr_hw_queues; i++) + __blk_mq_free_map_and_rqs(set, i); } void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues) diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c index a3104e35412c1..aa597cda0d887 100644 --- a/drivers/firewire/core-device.c +++ b/drivers/firewire/core-device.c @@ -1211,7 +1211,7 @@ void fw_node_event(struct fw_card *card, struct fw_node *node, int event) * without actually having a link. */ create: - device = kzalloc(sizeof(*device), GFP_KERNEL); + device = kzalloc(sizeof(*device), GFP_ATOMIC); if (device == NULL) break; diff --git a/drivers/firewire/core-topology.c b/drivers/firewire/core-topology.c index 88466b663482f..f40c815343812 100644 --- a/drivers/firewire/core-topology.c +++ b/drivers/firewire/core-topology.c @@ -101,7 +101,7 @@ static struct fw_node *fw_node_create(u32 sid, int port_count, int color) { struct fw_node *node; - node = kzalloc(struct_size(node, ports, port_count), GFP_KERNEL); + node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC); if (node == NULL) return NULL; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8e98dda1e0844..434f298f779c3 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2837,10 +2837,6 @@ static int dm_resume(void *handle) int i, r, j, ret; bool need_hotplug = false; - if (dm->dc->caps.ips_support) { - dc_dmub_srv_exit_low_power_state(dm->dc); - } - if (amdgpu_in_reset(adev)) { dc_state = dm->cached_dc_state; diff --git a/drivers/md/dm-core.h b/drivers/md/dm-core.h index 0d93661f88d30..095b9b49aa825 100644 --- a/drivers/md/dm-core.h +++ b/drivers/md/dm-core.h @@ -214,6 +214,7 @@ struct dm_table { /* a list of devices used by this table */ struct list_head devices; + struct rw_semaphore devices_lock; /* events get handed up using this callback */ void (*event_fn)(void *data); diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c index f5ed729a8e0cd..21ebb6c39394b 100644 --- a/drivers/md/dm-ioctl.c +++ b/drivers/md/dm-ioctl.c @@ -1630,6 +1630,8 @@ static void retrieve_deps(struct dm_table *table, struct dm_dev_internal *dd; struct dm_target_deps *deps; + down_read(&table->devices_lock); + deps = get_result_buffer(param, param_size, &len); /* @@ -1644,7 +1646,7 @@ static void retrieve_deps(struct dm_table *table, needed = struct_size(deps, dev, count); if (len < needed) { param->flags |= DM_BUFFER_FULL_FLAG; - return; + goto out; } /* @@ -1656,6 +1658,9 @@ static void retrieve_deps(struct dm_table *table, deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev); param->data_size = param->data_start + needed; + +out: + up_read(&table->devices_lock); } static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size) diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c index 7d208b2b1a192..37b48f63ae6a5 100644 --- a/drivers/md/dm-table.c +++ b/drivers/md/dm-table.c @@ -135,6 +135,7 @@ int dm_table_create(struct dm_table **result, blk_mode_t mode, return -ENOMEM; INIT_LIST_HEAD(&t->devices); + init_rwsem(&t->devices_lock); if (!num_targets) num_targets = KEYS_PER_NODE; @@ -359,16 +360,20 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, if (dev == disk_devt(t->md->disk)) return -EINVAL; + down_write(&t->devices_lock); + dd = find_device(&t->devices, dev); if (!dd) { dd = kmalloc(sizeof(*dd), GFP_KERNEL); - if (!dd) - return -ENOMEM; + if (!dd) { + r = -ENOMEM; + goto unlock_ret_r; + } r = dm_get_table_device(t->md, dev, mode, &dd->dm_dev); if (r) { kfree(dd); - return r; + goto unlock_ret_r; } refcount_set(&dd->count, 1); @@ -378,12 +383,17 @@ int __ref dm_get_device(struct dm_target *ti, const char *path, blk_mode_t mode, } else if (dd->dm_dev->mode != (mode | dd->dm_dev->mode)) { r = upgrade_mode(dd, mode, t->md); if (r) - return r; + goto unlock_ret_r; } refcount_inc(&dd->count); out: + up_write(&t->devices_lock); *result = dd->dm_dev; return 0; + +unlock_ret_r: + up_write(&t->devices_lock); + return r; } EXPORT_SYMBOL(dm_get_device); @@ -419,9 +429,12 @@ static int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, void dm_put_device(struct dm_target *ti, struct dm_dev *d) { int found = 0; - struct list_head *devices = &ti->table->devices; + struct dm_table *t = ti->table; + struct list_head *devices = &t->devices; struct dm_dev_internal *dd; + down_write(&t->devices_lock); + list_for_each_entry(dd, devices, list) { if (dd->dm_dev == d) { found = 1; @@ -430,14 +443,17 @@ void dm_put_device(struct dm_target *ti, struct dm_dev *d) } if (!found) { DMERR("%s: device %s not in table devices list", - dm_device_name(ti->table->md), d->name); - return; + dm_device_name(t->md), d->name); + goto unlock_ret; } if (refcount_dec_and_test(&dd->count)) { - dm_put_table_device(ti->table->md, d); + dm_put_table_device(t->md, d); list_del(&dd->list); kfree(dd); } + +unlock_ret: + up_write(&t->devices_lock); } EXPORT_SYMBOL(dm_put_device); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index f0f118ab20fa2..64a1f306c96c1 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -715,24 +715,6 @@ static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU) rcu_read_unlock(); } -static inline struct dm_table *dm_get_live_table_bio(struct mapped_device *md, - int *srcu_idx, blk_opf_t bio_opf) -{ - if (bio_opf & REQ_NOWAIT) - return dm_get_live_table_fast(md); - else - return dm_get_live_table(md, srcu_idx); -} - -static inline void dm_put_live_table_bio(struct mapped_device *md, int srcu_idx, - blk_opf_t bio_opf) -{ - if (bio_opf & REQ_NOWAIT) - dm_put_live_table_fast(md); - else - dm_put_live_table(md, srcu_idx); -} - static char *_dm_claim_ptr = "I belong to device-mapper"; /* @@ -1833,9 +1815,8 @@ static void dm_submit_bio(struct bio *bio) struct mapped_device *md = bio->bi_bdev->bd_disk->private_data; int srcu_idx; struct dm_table *map; - blk_opf_t bio_opf = bio->bi_opf; - map = dm_get_live_table_bio(md, &srcu_idx, bio_opf); + map = dm_get_live_table(md, &srcu_idx); /* If suspended, or map not yet available, queue this IO for later */ if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) || @@ -1851,7 +1832,7 @@ static void dm_submit_bio(struct bio *bio) dm_split_and_process_bio(md, map, bio); out: - dm_put_live_table_bio(md, srcu_idx, bio_opf); + dm_put_live_table(md, srcu_idx); } static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob, diff --git a/drivers/md/md.c b/drivers/md/md.c index 0fe7ab6e8ab9f..a104a025084dc 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -798,14 +798,14 @@ void mddev_unlock(struct mddev *mddev) } else mutex_unlock(&mddev->reconfig_mutex); + md_wakeup_thread(mddev->thread); + wake_up(&mddev->sb_wait); + list_for_each_entry_safe(rdev, tmp, &delete, same_set) { list_del_init(&rdev->same_set); kobject_del(&rdev->kobj); export_rdev(rdev, mddev); } - - md_wakeup_thread(mddev->thread); - wake_up(&mddev->sb_wait); } EXPORT_SYMBOL_GPL(mddev_unlock); @@ -2452,7 +2452,8 @@ static void export_rdev(struct md_rdev *rdev, struct mddev *mddev) if (test_bit(AutoDetected, &rdev->flags)) md_autodetect_dev(rdev->bdev->bd_dev); #endif - blkdev_put(rdev->bdev, mddev->external ? &claim_rdev : rdev); + blkdev_put(rdev->bdev, + test_bit(Holder, &rdev->flags) ? rdev : &claim_rdev); rdev->bdev = NULL; kobject_put(&rdev->kobj); } @@ -3632,6 +3633,7 @@ EXPORT_SYMBOL_GPL(md_rdev_init); static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) { struct md_rdev *rdev; + struct md_rdev *holder; sector_t size; int err; @@ -3646,8 +3648,15 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe if (err) goto out_clear_rdev; + if (super_format == -2) { + holder = &claim_rdev; + } else { + holder = rdev; + set_bit(Holder, &rdev->flags); + } + rdev->bdev = blkdev_get_by_dev(newdev, BLK_OPEN_READ | BLK_OPEN_WRITE, - super_format == -2 ? &claim_rdev : rdev, NULL); + holder, NULL); if (IS_ERR(rdev->bdev)) { pr_warn("md: could not open device unknown-block(%u,%u).\n", MAJOR(newdev), MINOR(newdev)); @@ -3684,7 +3693,7 @@ static struct md_rdev *md_import_device(dev_t newdev, int super_format, int supe return rdev; out_blkdev_put: - blkdev_put(rdev->bdev, super_format == -2 ? &claim_rdev : rdev); + blkdev_put(rdev->bdev, holder); out_clear_rdev: md_rdev_clear(rdev); out_free_rdev: @@ -8256,7 +8265,7 @@ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) spin_unlock(&all_mddevs_lock); if (to_put) - mddev_put(mddev); + mddev_put(to_put); return next_mddev; } diff --git a/drivers/md/md.h b/drivers/md/md.h index 9bcb77bca9639..7c9c13abd7cac 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h @@ -211,6 +211,9 @@ enum flag_bits { * check if there is collision between raid1 * serial bios. */ + Holder, /* rdev is used as holder while opening + * underlying disk exclusively. + */ }; static inline int is_badblock(struct md_rdev *rdev, sector_t s, int sectors, diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 4b30a17421623..2aabac773fe72 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -1837,12 +1837,11 @@ static int raid1_remove_disk(struct mddev *mddev, struct md_rdev *rdev) struct r1conf *conf = mddev->private; int err = 0; int number = rdev->raid_disk; + struct raid1_info *p = conf->mirrors + number; if (unlikely(number >= conf->raid_disks)) goto abort; - struct raid1_info *p = conf->mirrors + number; - if (rdev != p->rdev) p = conf->mirrors + conf->raid_disks + number; diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f3a01b79148cb..21783aa2ee8e1 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -2245,25 +2245,8 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) else ctrl->ctrl_config = NVME_CC_CSS_NVM; - if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { - u32 crto; - - ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); - if (ret) { - dev_err(ctrl->device, "Reading CRTO failed (%d)\n", - ret); - return ret; - } - - if (ctrl->cap & NVME_CAP_CRMS_CRIMS) { - ctrl->ctrl_config |= NVME_CC_CRIME; - timeout = NVME_CRTO_CRIMT(crto); - } else { - timeout = NVME_CRTO_CRWMT(crto); - } - } else { - timeout = NVME_CAP_TIMEOUT(ctrl->cap); - } + if (ctrl->cap & NVME_CAP_CRMS_CRWMS && ctrl->cap & NVME_CAP_CRMS_CRIMS) + ctrl->ctrl_config |= NVME_CC_CRIME; ctrl->ctrl_config |= (NVME_CTRL_PAGE_SHIFT - 12) << NVME_CC_MPS_SHIFT; ctrl->ctrl_config |= NVME_CC_AMS_RR | NVME_CC_SHN_NONE; @@ -2277,6 +2260,39 @@ int nvme_enable_ctrl(struct nvme_ctrl *ctrl) if (ret) return ret; + /* CAP value may change after initial CC write */ + ret = ctrl->ops->reg_read64(ctrl, NVME_REG_CAP, &ctrl->cap); + if (ret) + return ret; + + timeout = NVME_CAP_TIMEOUT(ctrl->cap); + if (ctrl->cap & NVME_CAP_CRMS_CRWMS) { + u32 crto, ready_timeout; + + ret = ctrl->ops->reg_read32(ctrl, NVME_REG_CRTO, &crto); + if (ret) { + dev_err(ctrl->device, "Reading CRTO failed (%d)\n", + ret); + return ret; + } + + /* + * CRTO should always be greater or equal to CAP.TO, but some + * devices are known to get this wrong. Use the larger of the + * two values. + */ + if (ctrl->ctrl_config & NVME_CC_CRIME) + ready_timeout = NVME_CRTO_CRIMT(crto); + else + ready_timeout = NVME_CRTO_CRWMT(crto); + + if (ready_timeout < timeout) + dev_warn_once(ctrl->device, "bad crto:%x cap:%llx\n", + crto, ctrl->cap); + else + timeout = ready_timeout; + } + ctrl->ctrl_config |= NVME_CC_ENABLE; ret = ctrl->ops->reg_write32(ctrl, NVME_REG_CC, ctrl->ctrl_config); if (ret) diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index 1cd2bf82319a9..a15b37750d6e9 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -1924,7 +1924,7 @@ char *nvme_fc_io_getuuid(struct nvmefc_fcp_req *req) struct nvme_fc_fcp_op *op = fcp_req_to_fcp_op(req); struct request *rq = op->rq; - if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq->bio) + if (!IS_ENABLED(CONFIG_BLK_CGROUP_FC_APPID) || !rq || !rq->bio) return NULL; return blkcg_get_fc_appid(rq->bio); } diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 316f3e4ca7cc6..8df73a0b3980c 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -187,7 +187,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data, return 0; } -static const struct hwmon_channel_info *nvme_hwmon_info[] = { +static const struct hwmon_channel_info *const nvme_hwmon_info[] = { HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MIN | diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 2f57da12d9836..347cb5daebc3c 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -2916,9 +2916,6 @@ static struct nvme_dev *nvme_pci_alloc_dev(struct pci_dev *pdev, struct nvme_dev *dev; int ret = -ENOMEM; - if (node == NUMA_NO_NODE) - set_dev_node(&pdev->dev, first_memory_node); - dev = kzalloc_node(sizeof(*dev), GFP_KERNEL, node); if (!dev) return ERR_PTR(-ENOMEM); diff --git a/drivers/nvme/target/tcp.c b/drivers/nvme/target/tcp.c index 868aa4de2e4c4..cd92d7ddf5ed1 100644 --- a/drivers/nvme/target/tcp.c +++ b/drivers/nvme/target/tcp.c @@ -348,7 +348,7 @@ static void nvmet_tcp_build_pdu_iovec(struct nvmet_tcp_cmd *cmd) while (length) { u32 iov_len = min_t(u32, length, sg->length - sg_offset); - bvec_set_page(iov, sg_page(sg), sg->length, + bvec_set_page(iov, sg_page(sg), iov_len, sg->offset + sg_offset); length -= iov_len; diff --git a/io_uring/net.c b/io_uring/net.c index 3d07bf79c1e02..7a8e298af81b3 100644 --- a/io_uring/net.c +++ b/io_uring/net.c @@ -183,6 +183,10 @@ static int io_setup_async_msg(struct io_kiocb *req, memcpy(async_msg, kmsg, sizeof(*kmsg)); if (async_msg->msg.msg_name) async_msg->msg.msg_name = &async_msg->addr; + + if ((req->flags & REQ_F_BUFFER_SELECT) && !async_msg->msg.msg_iter.nr_segs) + return -EAGAIN; + /* if were using fast_iov, set it to the new one */ if (iter_is_iovec(&kmsg->msg.msg_iter) && !kmsg->free_iov) { size_t fast_idx = iter_iov(&kmsg->msg.msg_iter) - kmsg->fast_iov; @@ -542,6 +546,7 @@ static int io_recvmsg_copy_hdr(struct io_kiocb *req, struct io_async_msghdr *iomsg) { iomsg->msg.msg_name = &iomsg->addr; + iomsg->msg.msg_iter.nr_segs = 0; #ifdef CONFIG_COMPAT if (req->ctx->compat) diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 195db92ac99c2..feda711c6b7b8 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -2775,14 +2775,20 @@ static int selinux_umount(struct vfsmount *mnt, int flags) static int selinux_fs_context_submount(struct fs_context *fc, struct super_block *reference) { - const struct superblock_security_struct *sbsec; + const struct superblock_security_struct *sbsec = selinux_superblock(reference); struct selinux_mnt_opts *opts; + /* + * Ensure that fc->security remains NULL when no options are set + * as expected by selinux_set_mnt_opts(). + */ + if (!(sbsec->flags & (FSCONTEXT_MNT|CONTEXT_MNT|DEFCONTEXT_MNT))) + return 0; + opts = kzalloc(sizeof(*opts), GFP_KERNEL); if (!opts) return -ENOMEM; - sbsec = selinux_superblock(reference); if (sbsec->flags & FSCONTEXT_MNT) opts->fscontext_sid = sbsec->sid; if (sbsec->flags & CONTEXT_MNT)