2
0
Эх сурвалжийг харах

Merge tag 'v9.1.0' into utm-edition

v9.1.0 release
osy 11 сар өмнө
parent
commit
d6441003a3
100 өөрчлөгдсөн 5483 нэмэгдсэн , 4499 устгасан
  1. 65 8
      .gitlab-ci.d/base.yml
  2. 25 16
      .gitlab-ci.d/buildtest-template.yml
  3. 222 36
      .gitlab-ci.d/buildtest.yml
  4. 23 53
      .gitlab-ci.d/cirrus.yml
  5. 4 2
      .gitlab-ci.d/cirrus/build.yml
  6. 0 16
      .gitlab-ci.d/cirrus/freebsd-12.vars
  7. 1 1
      .gitlab-ci.d/cirrus/freebsd-13.vars
  8. 0 31
      .gitlab-ci.d/cirrus/kvm-build.yml
  9. 3 3
      .gitlab-ci.d/cirrus/macos-13.vars
  10. 16 0
      .gitlab-ci.d/cirrus/macos-14.vars
  11. 2 2
      .gitlab-ci.d/container-core.yml
  12. 11 57
      .gitlab-ci.d/container-cross.yml
  13. 4 4
      .gitlab-ci.d/container-template.yml
  14. 1 1
      .gitlab-ci.d/containers.yml
  15. 33 4
      .gitlab-ci.d/crossbuild-template.yml
  16. 29 22
      .gitlab-ci.d/crossbuilds.yml
  17. 7 5
      .gitlab-ci.d/custom-runners.yml
  18. 0 24
      .gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml
  19. 2 2
      .gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml
  20. 26 4
      .gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml
  21. 21 23
      .gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml
  22. 8 5
      .gitlab-ci.d/opensbi.yml
  23. 7 0
      .gitlab-ci.d/qemu-project.yml
  24. 2 2
      .gitlab-ci.d/static_checks.yml
  25. 70 89
      .gitlab-ci.d/windows.yml
  26. 0 21
      .gitmodules
  27. 29 1
      .mailmap
  28. 12 7
      .readthedocs.yml
  29. 22 23
      .travis.yml
  30. 9 3
      Kconfig.host
  31. 236 88
      MAINTAINERS
  32. 51 42
      Makefile
  33. 3 3
      README.rst
  34. 1 1
      VERSION
  35. 1 3
      accel/Kconfig
  36. 5 5
      accel/accel-blocker.c
  37. 4 4
      accel/accel-system.c
  38. 4 4
      accel/accel-system.h
  39. 26 5
      accel/accel-target.c
  40. 4 8
      accel/dummy-cpus.c
  41. 147 38
      accel/hvf/hvf-accel-ops.c
  42. 42 23
      accel/hvf/hvf-all.c
  43. 5 9
      accel/kvm/kvm-accel-ops.c
  44. 322 314
      accel/kvm/kvm-all.c
  45. 0 1
      accel/kvm/kvm-cpus.h
  46. 12 2
      accel/kvm/trace-events
  47. 3 3
      accel/meson.build
  48. 1 1
      accel/qtest/meson.build
  49. 13 0
      accel/qtest/qtest.c
  50. 0 24
      accel/stubs/hax-stub.c
  51. 17 15
      accel/stubs/kvm-stub.c
  52. 5 6
      accel/stubs/meson.build
  53. 0 22
      accel/stubs/tcg-stub.c
  54. 8 42
      accel/tcg/atomic_common.c.inc
  55. 28 85
      accel/tcg/atomic_template.h
  56. 4 32
      accel/tcg/cpu-exec-common.c
  57. 208 208
      accel/tcg/cpu-exec.c
  58. 258 345
      accel/tcg/cputlb.c
  59. 32 28
      accel/tcg/icount-common.c
  60. 59 0
      accel/tcg/internal-common.h
  61. 118 0
      accel/tcg/internal-target.h
  62. 0 70
      accel/tcg/internal.h
  63. 1111 0
      accel/tcg/ldst_atomicity.c.inc
  64. 269 26
      accel/tcg/ldst_common.c.inc
  65. 18 11
      accel/tcg/meson.build
  66. 170 32
      accel/tcg/monitor.c
  67. 308 781
      accel/tcg/plugin-gen.c
  68. 0 4
      accel/tcg/plugin-helpers.h
  69. 8 8
      accel/tcg/tb-hash.h
  70. 11 6
      accel/tcg/tb-jmp-cache.h
  71. 150 153
      accel/tcg/tb-maint.c
  72. 24 11
      accel/tcg/tcg-accel-ops-icount.c
  73. 2 1
      accel/tcg/tcg-accel-ops-icount.h
  74. 13 26
      accel/tcg/tcg-accel-ops-mttcg.c
  75. 66 30
      accel/tcg/tcg-accel-ops-rr.c
  76. 17 16
      accel/tcg/tcg-accel-ops.c
  77. 2 2
      accel/tcg/tcg-accel-ops.h
  78. 37 30
      accel/tcg/tcg-all.c
  79. 38 1
      accel/tcg/tcg-runtime-gvec.c
  80. 5 1
      accel/tcg/tcg-runtime.c
  81. 53 26
      accel/tcg/tcg-runtime.h
  82. 93 222
      accel/tcg/translate-all.c
  83. 337 109
      accel/tcg/translator.c
  84. 4 2
      accel/tcg/user-exec-stub.c
  85. 249 240
      accel/tcg/user-exec.c
  86. 18 0
      accel/tcg/vcpu-state.h
  87. 5 88
      accel/tcg/watchpoint.c
  88. 1 0
      accel/xen/xen-all.c
  89. 1 2
      audio/alsaaudio.c
  90. 4 2
      audio/audio-hmp-cmds.c
  91. 110 136
      audio/audio.c
  92. 6 4
      audio/audio.h
  93. 1 6
      audio/audio_int.h
  94. 0 591
      audio/audio_legacy.c
  95. 12 1
      audio/audio_template.h
  96. 5 11
      audio/coreaudio.m
  97. 41 7
      audio/dbusaudio.c
  98. 1 2
      audio/dsoundaudio.c
  99. 15 9
      audio/jackaudio.c
  100. 7 6
      audio/meson.build

+ 65 - 8
.gitlab-ci.d/base.yml

@@ -1,15 +1,33 @@
 
 
+variables:
+  # On stable branches this is changed by later rules. Should also
+  # be overridden per pipeline if running pipelines concurrently
+  # for different branches in contributor forks.
+  QEMU_CI_CONTAINER_TAG: latest
+
+  # For purposes of CI rules, upstream is the gitlab.com/qemu-project
+  # namespace. When testing CI, it might be usefult to override this
+  # to point to a fork repo
+  QEMU_CI_UPSTREAM: qemu-project
+
 # The order of rules defined here is critically important.
 # The order of rules defined here is critically important.
 # They are evaluated in order and first match wins.
 # They are evaluated in order and first match wins.
 #
 #
 # Thus we group them into a number of stages, ordered from
 # Thus we group them into a number of stages, ordered from
 # most restrictive to least restrictive
 # most restrictive to least restrictive
 #
 #
+# For pipelines running for stable "staging-X.Y" branches
+# we must override QEMU_CI_CONTAINER_TAG
+#
 .base_job_template:
 .base_job_template:
   variables:
   variables:
     # Each script line from will be in a collapsible section in the job output
     # Each script line from will be in a collapsible section in the job output
     # and show the duration of each line.
     # and show the duration of each line.
     FF_SCRIPT_SECTIONS: 1
     FF_SCRIPT_SECTIONS: 1
+    # The project has a fairly fat GIT repo so we try and avoid bringing in things
+    # we don't need. The --filter options avoid blobs and tree references we aren't going to use
+    # and we also avoid fetching tags.
+    GIT_FETCH_EXTRA_FLAGS: --filter=blob:none --filter=tree:0 --no-tags --prune --quiet
 
 
   interruptible: true
   interruptible: true
 
 
@@ -19,48 +37,72 @@
     # want jobs to run
     # want jobs to run
     #############################################################
     #############################################################
 
 
+    # Never run jobs upstream on stable branch, staging branch jobs already ran
+    - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /^stable-/'
+      when: never
+
+    # Never run jobs upstream on tags, staging branch jobs already ran
+    - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_TAG'
+      when: never
+
+    # Scheduled runs on mainline don't get pipelines except for the special Coverity job
+    - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
+      when: never
+
     # Cirrus jobs can't run unless the creds / target repo are set
     # Cirrus jobs can't run unless the creds / target repo are set
     - if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
     - if: '$QEMU_JOB_CIRRUS && ($CIRRUS_GITHUB_REPO == null || $CIRRUS_API_TOKEN == null)'
       when: never
       when: never
 
 
     # Publishing jobs should only run on the default branch in upstream
     # Publishing jobs should only run on the default branch in upstream
-    - if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
+    - if: '$QEMU_JOB_PUBLISH == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH'
       when: never
       when: never
 
 
     # Non-publishing jobs should only run on staging branches in upstream
     # Non-publishing jobs should only run on staging branches in upstream
-    - if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH !~ /staging/'
+    - if: '$QEMU_JOB_PUBLISH != "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH !~ /staging/'
       when: never
       when: never
 
 
     # Jobs only intended for forks should always be skipped on upstream
     # Jobs only intended for forks should always be skipped on upstream
-    - if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == "qemu-project"'
+    - if: '$QEMU_JOB_ONLY_FORKS == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
       when: never
       when: never
 
 
     # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
     # Forks don't get pipelines unless QEMU_CI=1 or QEMU_CI=2 is set
-    - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
+    - if: '$QEMU_CI != "1" && $QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
       when: never
       when: never
 
 
     # Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
     # Avocado jobs don't run in forks unless $QEMU_CI_AVOCADO_TESTING is set
-    - if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != "qemu-project"'
+    - if: '$QEMU_JOB_AVOCADO && $QEMU_CI_AVOCADO_TESTING != "1" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
       when: never
       when: never
 
 
 
 
     #############################################################
     #############################################################
     # Stage 2: fine tune execution of jobs in specific scenarios
     # Stage 2: fine tune execution of jobs in specific scenarios
-    # where the catch all logic is inapprorpaite
+    # where the catch all logic is inappropriate
     #############################################################
     #############################################################
 
 
     # Optional jobs should not be run unless manually triggered
     # Optional jobs should not be run unless manually triggered
+    - if: '$QEMU_JOB_OPTIONAL && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
+      when: manual
+      allow_failure: true
+      variables:
+        QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
+
     - if: '$QEMU_JOB_OPTIONAL'
     - if: '$QEMU_JOB_OPTIONAL'
       when: manual
       when: manual
       allow_failure: true
       allow_failure: true
 
 
     # Skipped jobs should not be run unless manually triggered
     # Skipped jobs should not be run unless manually triggered
+    - if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
+      when: manual
+      allow_failure: true
+      variables:
+        QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
+
     - if: '$QEMU_JOB_SKIPPED'
     - if: '$QEMU_JOB_SKIPPED'
       when: manual
       when: manual
       allow_failure: true
       allow_failure: true
 
 
     # Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
     # Avocado jobs can be manually start in forks if $QEMU_CI_AVOCADO_TESTING is unset
-    - if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != "qemu-project"'
+    - if: '$QEMU_JOB_AVOCADO && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
       when: manual
       when: manual
       allow_failure: true
       allow_failure: true
 
 
@@ -72,8 +114,23 @@
 
 
     # Forks pipeline jobs don't start automatically unless
     # Forks pipeline jobs don't start automatically unless
     # QEMU_CI=2 is set
     # QEMU_CI=2 is set
-    - if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != "qemu-project"'
+    - if: '$QEMU_CI != "2" && $CI_PROJECT_NAMESPACE != $QEMU_CI_UPSTREAM'
+      when: manual
+
+    # Upstream pipeline jobs start automatically unless told not to
+    # by setting QEMU_CI=1
+    - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
+      when: manual
+      variables:
+        QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
+
+    - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
       when: manual
       when: manual
 
 
     # Jobs can run if any jobs they depend on were successful
     # Jobs can run if any jobs they depend on were successful
+    - if: '$QEMU_JOB_SKIPPED && $CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_COMMIT_BRANCH =~ /staging-[[:digit:]]+\.[[:digit:]]/'
+      when: on_success
+      variables:
+        QEMU_CI_CONTAINER_TAG: $CI_COMMIT_REF_SLUG
+
     - when: on_success
     - when: on_success

+ 25 - 16
.gitlab-ci.d/buildtest-template.yml

@@ -1,34 +1,44 @@
 .native_build_job_template:
 .native_build_job_template:
   extends: .base_job_template
   extends: .base_job_template
   stage: build
   stage: build
-  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
+  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
+  cache:
+    paths:
+      - ccache
+    key: "$CI_JOB_NAME"
+    when: always
   before_script:
   before_script:
     - JOBS=$(expr $(nproc) + 1)
     - JOBS=$(expr $(nproc) + 1)
+    - cat /packages.txt
   script:
   script:
-    - if test -n "$LD_JOBS";
-      then
-        scripts/git-submodule.sh update meson ;
-      fi
+    - export CCACHE_BASEDIR="$(pwd)"
+    - export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
+    - export CCACHE_MAXSIZE="500M"
+    - export PATH="$CCACHE_WRAPPERSDIR:$PATH"
+    - du -sh .git
     - mkdir build
     - mkdir build
     - cd build
     - cd build
+    - ccache --zero-stats
     - ../configure --enable-werror --disable-docs --enable-fdt=system
     - ../configure --enable-werror --disable-docs --enable-fdt=system
-          ${LD_JOBS:+--meson=git} ${TARGETS:+--target-list="$TARGETS"}
+          ${TARGETS:+--target-list="$TARGETS"}
           $CONFIGURE_ARGS ||
           $CONFIGURE_ARGS ||
       { cat config.log meson-logs/meson-log.txt && exit 1; }
       { cat config.log meson-logs/meson-log.txt && exit 1; }
     - if test -n "$LD_JOBS";
     - if test -n "$LD_JOBS";
       then
       then
-        ../meson/meson.py configure . -Dbackend_max_links="$LD_JOBS" ;
+        pyvenv/bin/meson configure . -Dbackend_max_links="$LD_JOBS" ;
       fi || exit 1;
       fi || exit 1;
-    - make -j"$JOBS"
+    - $MAKE -j"$JOBS"
     - if test -n "$MAKE_CHECK_ARGS";
     - if test -n "$MAKE_CHECK_ARGS";
       then
       then
-        make -j"$JOBS" $MAKE_CHECK_ARGS ;
+        $MAKE -j"$JOBS" $MAKE_CHECK_ARGS ;
       fi
       fi
+    - ccache --show-stats
 
 
 # We jump some hoops in common_test_job_template to avoid
 # We jump some hoops in common_test_job_template to avoid
 # rebuilding all the object files we skip in the artifacts
 # rebuilding all the object files we skip in the artifacts
 .native_build_artifact_template:
 .native_build_artifact_template:
   artifacts:
   artifacts:
+    when: on_success
     expire_in: 2 days
     expire_in: 2 days
     paths:
     paths:
       - build
       - build
@@ -36,27 +46,26 @@
     exclude:
     exclude:
       - build/**/*.p
       - build/**/*.p
       - build/**/*.a.p
       - build/**/*.a.p
-      - build/**/*.fa.p
       - build/**/*.c.o
       - build/**/*.c.o
       - build/**/*.c.o.d
       - build/**/*.c.o.d
-      - build/**/*.fa
 
 
 .common_test_job_template:
 .common_test_job_template:
   extends: .base_job_template
   extends: .base_job_template
   stage: test
   stage: test
-  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
+  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
   script:
   script:
-    - scripts/git-submodule.sh update
-        $(sed -n '/GIT_SUBMODULES=/ s/.*=// p' build/config-host.mak)
+    - scripts/git-submodule.sh update roms/SLOF
+    - meson subprojects download $(cd build/subprojects && echo *)
     - cd build
     - cd build
     - find . -type f -exec touch {} +
     - find . -type f -exec touch {} +
     # Avoid recompiling by hiding ninja with NINJA=":"
     # Avoid recompiling by hiding ninja with NINJA=":"
-    - make NINJA=":" $MAKE_CHECK_ARGS
+    - $MAKE NINJA=":" $MAKE_CHECK_ARGS
 
 
 .native_test_job_template:
 .native_test_job_template:
   extends: .common_test_job_template
   extends: .common_test_job_template
   artifacts:
   artifacts:
     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
+    when: always
     expire_in: 7 days
     expire_in: 7 days
     paths:
     paths:
       - build/meson-logs/testlog.txt
       - build/meson-logs/testlog.txt
@@ -72,7 +81,7 @@
     policy: pull-push
     policy: pull-push
   artifacts:
   artifacts:
     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
-    when: on_failure
+    when: always
     expire_in: 7 days
     expire_in: 7 days
     paths:
     paths:
       - build/tests/results/latest/results.xml
       - build/tests/results/latest/results.xml

+ 222 - 36
.gitlab-ci.d/buildtest.yml

@@ -30,6 +30,7 @@ avocado-system-alpine:
   variables:
   variables:
     IMAGE: alpine
     IMAGE: alpine
     MAKE_CHECK_ARGS: check-avocado
     MAKE_CHECK_ARGS: check-avocado
+    AVOCADO_TAGS: arch:avr arch:loongarch64 arch:mips64 arch:mipsel
 
 
 build-system-ubuntu:
 build-system-ubuntu:
   extends:
   extends:
@@ -40,8 +41,7 @@ build-system-ubuntu:
   variables:
   variables:
     IMAGE: ubuntu2204
     IMAGE: ubuntu2204
     CONFIGURE_ARGS: --enable-docs
     CONFIGURE_ARGS: --enable-docs
-    TARGETS: alpha-softmmu cris-softmmu hppa-softmmu
-      microblazeel-softmmu mips64el-softmmu
+    TARGETS: alpha-softmmu microblazeel-softmmu mips64el-softmmu
     MAKE_CHECK_ARGS: check-build
     MAKE_CHECK_ARGS: check-build
 
 
 check-system-ubuntu:
 check-system-ubuntu:
@@ -61,6 +61,7 @@ avocado-system-ubuntu:
   variables:
   variables:
     IMAGE: ubuntu2204
     IMAGE: ubuntu2204
     MAKE_CHECK_ARGS: check-avocado
     MAKE_CHECK_ARGS: check-avocado
+    AVOCADO_TAGS: arch:alpha arch:microblazeel arch:mips64el
 
 
 build-system-debian:
 build-system-debian:
   extends:
   extends:
@@ -69,10 +70,10 @@ build-system-debian:
   needs:
   needs:
     job: amd64-debian-container
     job: amd64-debian-container
   variables:
   variables:
-    IMAGE: debian-amd64
+    IMAGE: debian
     CONFIGURE_ARGS: --with-coroutine=sigaltstack
     CONFIGURE_ARGS: --with-coroutine=sigaltstack
     TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
     TARGETS: arm-softmmu i386-softmmu riscv64-softmmu sh4eb-softmmu
-      sparc-softmmu xtensaeb-softmmu
+      sparc-softmmu xtensa-softmmu
     MAKE_CHECK_ARGS: check-build
     MAKE_CHECK_ARGS: check-build
 
 
 check-system-debian:
 check-system-debian:
@@ -81,7 +82,7 @@ check-system-debian:
     - job: build-system-debian
     - job: build-system-debian
       artifacts: true
       artifacts: true
   variables:
   variables:
-    IMAGE: debian-amd64
+    IMAGE: debian
     MAKE_CHECK_ARGS: check
     MAKE_CHECK_ARGS: check
 
 
 avocado-system-debian:
 avocado-system-debian:
@@ -90,8 +91,9 @@ avocado-system-debian:
     - job: build-system-debian
     - job: build-system-debian
       artifacts: true
       artifacts: true
   variables:
   variables:
-    IMAGE: debian-amd64
+    IMAGE: debian
     MAKE_CHECK_ARGS: check-avocado
     MAKE_CHECK_ARGS: check-avocado
+    AVOCADO_TAGS: arch:arm arch:i386 arch:riscv64 arch:sh4 arch:sparc arch:xtensa
 
 
 crash-test-debian:
 crash-test-debian:
   extends: .native_test_job_template
   extends: .native_test_job_template
@@ -99,11 +101,11 @@ crash-test-debian:
     - job: build-system-debian
     - job: build-system-debian
       artifacts: true
       artifacts: true
   variables:
   variables:
-    IMAGE: debian-amd64
+    IMAGE: debian
   script:
   script:
     - cd build
     - cd build
-    - make check-venv
-    - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-i386
+    - make NINJA=":" check-venv
+    - pyvenv/bin/python3 scripts/device-crash-test -q --tcg-only ./qemu-system-i386
 
 
 build-system-fedora:
 build-system-fedora:
   extends:
   extends:
@@ -114,7 +116,7 @@ build-system-fedora:
   variables:
   variables:
     IMAGE: fedora
     IMAGE: fedora
     CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
     CONFIGURE_ARGS: --disable-gcrypt --enable-nettle --enable-docs
-    TARGETS: tricore-softmmu microblaze-softmmu mips-softmmu
+    TARGETS: microblaze-softmmu mips-softmmu
       xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
       xtensa-softmmu m68k-softmmu riscv32-softmmu ppc-softmmu sparc64-softmmu
     MAKE_CHECK_ARGS: check-build
     MAKE_CHECK_ARGS: check-build
 
 
@@ -135,6 +137,8 @@ avocado-system-fedora:
   variables:
   variables:
     IMAGE: fedora
     IMAGE: fedora
     MAKE_CHECK_ARGS: check-avocado
     MAKE_CHECK_ARGS: check-avocado
+    AVOCADO_TAGS: arch:microblaze arch:mips arch:xtensa arch:m68k
+      arch:riscv32 arch:ppc arch:sparc64
 
 
 crash-test-fedora:
 crash-test-fedora:
   extends: .native_test_job_template
   extends: .native_test_job_template
@@ -145,31 +149,98 @@ crash-test-fedora:
     IMAGE: fedora
     IMAGE: fedora
   script:
   script:
     - cd build
     - cd build
-    - make check-venv
-    - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
-    - tests/venv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
+    - make NINJA=":" check-venv
+    - pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-ppc
+    - pyvenv/bin/python3 scripts/device-crash-test -q ./qemu-system-riscv32
 
 
 build-system-centos:
 build-system-centos:
   extends:
   extends:
     - .native_build_job_template
     - .native_build_job_template
     - .native_build_artifact_template
     - .native_build_artifact_template
   needs:
   needs:
-    job: amd64-centos8-container
+    job: amd64-centos9-container
   variables:
   variables:
-    IMAGE: centos8
+    IMAGE: centos9
     CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-vfio-user-server
     CONFIGURE_ARGS: --disable-nettle --enable-gcrypt --enable-vfio-user-server
       --enable-modules --enable-trace-backends=dtrace --enable-docs
       --enable-modules --enable-trace-backends=dtrace --enable-docs
     TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
     TARGETS: ppc64-softmmu or1k-softmmu s390x-softmmu
-      x86_64-softmmu rx-softmmu sh4-softmmu nios2-softmmu
+      x86_64-softmmu rx-softmmu sh4-softmmu
     MAKE_CHECK_ARGS: check-build
     MAKE_CHECK_ARGS: check-build
 
 
+# Previous QEMU release. Used for cross-version migration tests.
+build-previous-qemu:
+  extends: .native_build_job_template
+  artifacts:
+    when: on_success
+    expire_in: 2 days
+    paths:
+      - build-previous
+    exclude:
+      - build-previous/**/*.p
+      - build-previous/**/*.a.p
+      - build-previous/**/*.c.o
+      - build-previous/**/*.c.o.d
+  needs:
+    job: amd64-opensuse-leap-container
+  variables:
+    IMAGE: opensuse-leap
+    TARGETS: x86_64-softmmu aarch64-softmmu
+    # Override the default flags as we need more to grab the old version
+    GIT_FETCH_EXTRA_FLAGS: --prune --quiet
+  before_script:
+    - export QEMU_PREV_VERSION="$(sed 's/\([0-9.]*\)\.[0-9]*/v\1.0/' VERSION)"
+    - git remote add upstream https://gitlab.com/qemu-project/qemu
+    - git fetch upstream refs/tags/$QEMU_PREV_VERSION:refs/tags/$QEMU_PREV_VERSION
+    - git checkout $QEMU_PREV_VERSION
+  after_script:
+    - mv build build-previous
+
+.migration-compat-common:
+  extends: .common_test_job_template
+  needs:
+    - job: build-previous-qemu
+    - job: build-system-opensuse
+  # The old QEMU could have bugs unrelated to migration that are
+  # already fixed in the current development branch, so this test
+  # might fail.
+  allow_failure: true
+  variables:
+    IMAGE: opensuse-leap
+    MAKE_CHECK_ARGS: check-build
+  script:
+    # Use the migration-tests from the older QEMU tree. This avoids
+    # testing an old QEMU against new features/tests that it is not
+    # compatible with.
+    - cd build-previous
+    # old to new
+    - QTEST_QEMU_BINARY_SRC=./qemu-system-${TARGET}
+          QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
+    # new to old
+    - QTEST_QEMU_BINARY_DST=./qemu-system-${TARGET}
+          QTEST_QEMU_BINARY=../build/qemu-system-${TARGET} ./tests/qtest/migration-test
+
+# This job needs to be disabled until we can have an aarch64 CPU model that
+# will both (1) support both KVM and TCG, and (2) provide a stable ABI.
+# Currently only "-cpu max" can provide (1), however it doesn't guarantee
+# (2).  Mark this test skipped until later.
+migration-compat-aarch64:
+  extends: .migration-compat-common
+  variables:
+    TARGET: aarch64
+    QEMU_JOB_SKIPPED: 1
+
+migration-compat-x86_64:
+  extends: .migration-compat-common
+  variables:
+    TARGET: x86_64
+
 check-system-centos:
 check-system-centos:
   extends: .native_test_job_template
   extends: .native_test_job_template
   needs:
   needs:
     - job: build-system-centos
     - job: build-system-centos
       artifacts: true
       artifacts: true
   variables:
   variables:
-    IMAGE: centos8
+    IMAGE: centos9
     MAKE_CHECK_ARGS: check
     MAKE_CHECK_ARGS: check
 
 
 avocado-system-centos:
 avocado-system-centos:
@@ -178,8 +249,10 @@ avocado-system-centos:
     - job: build-system-centos
     - job: build-system-centos
       artifacts: true
       artifacts: true
   variables:
   variables:
-    IMAGE: centos8
+    IMAGE: centos9
     MAKE_CHECK_ARGS: check-avocado
     MAKE_CHECK_ARGS: check-avocado
+    AVOCADO_TAGS: arch:ppc64 arch:or1k arch:s390x arch:x86_64 arch:rx
+      arch:sh4
 
 
 build-system-opensuse:
 build-system-opensuse:
   extends:
   extends:
@@ -209,7 +282,38 @@ avocado-system-opensuse:
   variables:
   variables:
     IMAGE: opensuse-leap
     IMAGE: opensuse-leap
     MAKE_CHECK_ARGS: check-avocado
     MAKE_CHECK_ARGS: check-avocado
+    AVOCADO_TAGS: arch:s390x arch:x86_64 arch:aarch64
+
+#
+# Flaky tests. We don't run these by default and they are allow fail
+# but often the CI system is the only way to trigger the failures.
+#
+
+build-system-flaky:
+  extends:
+    - .native_build_job_template
+    - .native_build_artifact_template
+  needs:
+    job: amd64-debian-container
+  variables:
+    IMAGE: debian
+    QEMU_JOB_OPTIONAL: 1
+    TARGETS: aarch64-softmmu arm-softmmu mips64el-softmmu
+      ppc64-softmmu rx-softmmu s390x-softmmu sh4-softmmu x86_64-softmmu
+    MAKE_CHECK_ARGS: check-build
 
 
+avocado-system-flaky:
+  extends: .avocado_test_job_template
+  needs:
+    - job: build-system-flaky
+      artifacts: true
+  allow_failure: true
+  variables:
+    IMAGE: debian
+    MAKE_CHECK_ARGS: check-avocado
+    QEMU_JOB_OPTIONAL: 1
+    QEMU_TEST_FLAKY_TESTS: 1
+    AVOCADO_TAGS: flaky
 
 
 # This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
 # This jobs explicitly disable TCG (--disable-tcg), KVM is detected by
 # the configure script. The container doesn't contain Xen headers so
 # the configure script. The container doesn't contain Xen headers so
@@ -221,9 +325,9 @@ avocado-system-opensuse:
 build-tcg-disabled:
 build-tcg-disabled:
   extends: .native_build_job_template
   extends: .native_build_job_template
   needs:
   needs:
-    job: amd64-centos8-container
+    job: amd64-centos9-container
   variables:
   variables:
-    IMAGE: centos8
+    IMAGE: centos9
   script:
   script:
     - mkdir build
     - mkdir build
     - cd build
     - cd build
@@ -236,7 +340,7 @@ build-tcg-disabled:
     - cd tests/qemu-iotests/
     - cd tests/qemu-iotests/
     - ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
     - ./check -raw 001 002 003 004 005 008 009 010 011 012 021 025 032 033 048
             052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
             052 063 077 086 101 104 106 113 148 150 151 152 157 159 160 163
-            170 171 183 184 192 194 208 221 226 227 236 253 277 image-fleecing
+            170 171 184 192 194 208 221 226 227 236 253 277 image-fleecing
     - ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122
     - ./check -qcow2 028 051 056 057 058 065 068 082 085 091 095 096 102 122
             124 132 139 142 144 145 151 152 155 157 165 194 196 200 202
             124 132 139 142 144 145 151 152 155 157 165 194 196 200 202
             208 209 216 218 227 234 246 247 248 250 254 255 257 258
             208 209 216 218 227 234 246 247 248 250 254 255 257 258
@@ -249,6 +353,7 @@ build-user:
   variables:
   variables:
     IMAGE: debian-all-test-cross
     IMAGE: debian-all-test-cross
     CONFIGURE_ARGS: --disable-tools --disable-system
     CONFIGURE_ARGS: --disable-tools --disable-system
+      --target-list-exclude=alpha-linux-user,sh4-linux-user
     MAKE_CHECK_ARGS: check-tcg
     MAKE_CHECK_ARGS: check-tcg
 
 
 build-user-static:
 build-user-static:
@@ -258,6 +363,18 @@ build-user-static:
   variables:
   variables:
     IMAGE: debian-all-test-cross
     IMAGE: debian-all-test-cross
     CONFIGURE_ARGS: --disable-tools --disable-system --static
     CONFIGURE_ARGS: --disable-tools --disable-system --static
+      --target-list-exclude=alpha-linux-user,sh4-linux-user
+    MAKE_CHECK_ARGS: check-tcg
+
+# targets stuck on older compilers
+build-legacy:
+  extends: .native_build_job_template
+  needs:
+    job: amd64-debian-legacy-cross-container
+  variables:
+    IMAGE: debian-legacy-test-cross
+    TARGETS: alpha-linux-user alpha-softmmu sh4-linux-user
+    CONFIGURE_ARGS: --disable-tools
     MAKE_CHECK_ARGS: check-tcg
     MAKE_CHECK_ARGS: check-tcg
 
 
 build-user-hexagon:
 build-user-hexagon:
@@ -270,7 +387,9 @@ build-user-hexagon:
     CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg
     CONFIGURE_ARGS: --disable-tools --disable-docs --enable-debug-tcg
     MAKE_CHECK_ARGS: check-tcg
     MAKE_CHECK_ARGS: check-tcg
 
 
-# Only build the softmmu targets we have check-tcg tests for
+# Build the softmmu targets we have check-tcg tests and compilers in
+# our omnibus all-test-cross container. Those targets that haven't got
+# Debian cross compiler support need to use special containers.
 build-some-softmmu:
 build-some-softmmu:
   extends: .native_build_job_template
   extends: .native_build_job_template
   needs:
   needs:
@@ -278,7 +397,18 @@ build-some-softmmu:
   variables:
   variables:
     IMAGE: debian-all-test-cross
     IMAGE: debian-all-test-cross
     CONFIGURE_ARGS: --disable-tools --enable-debug
     CONFIGURE_ARGS: --disable-tools --enable-debug
-    TARGETS: xtensa-softmmu arm-softmmu aarch64-softmmu alpha-softmmu
+    TARGETS: arm-softmmu aarch64-softmmu i386-softmmu riscv64-softmmu
+      s390x-softmmu x86_64-softmmu
+    MAKE_CHECK_ARGS: check-tcg
+
+build-loongarch64:
+  extends: .native_build_job_template
+  needs:
+    job: loongarch-debian-cross-container
+  variables:
+    IMAGE: debian-loongarch-cross
+    CONFIGURE_ARGS: --disable-tools --enable-debug
+    TARGETS: loongarch64-linux-user loongarch64-softmmu
     MAKE_CHECK_ARGS: check-tcg
     MAKE_CHECK_ARGS: check-tcg
 
 
 # We build tricore in a very minimal tricore only container
 # We build tricore in a very minimal tricore only container
@@ -300,6 +430,7 @@ clang-system:
     IMAGE: fedora
     IMAGE: fedora
     CONFIGURE_ARGS: --cc=clang --cxx=clang++
     CONFIGURE_ARGS: --cc=clang --cxx=clang++
       --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
       --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
+      --extra-cflags=-fno-sanitize=function
     TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu s390x-softmmu
     TARGETS: alpha-softmmu arm-softmmu m68k-softmmu mips64-softmmu s390x-softmmu
     MAKE_CHECK_ARGS: check-qtest check-tcg
     MAKE_CHECK_ARGS: check-qtest check-tcg
 
 
@@ -311,8 +442,9 @@ clang-user:
   variables:
   variables:
     IMAGE: debian-all-test-cross
     IMAGE: debian-all-test-cross
     CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
     CONFIGURE_ARGS: --cc=clang --cxx=clang++ --disable-system
-      --target-list-exclude=microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
+      --target-list-exclude=alpha-linux-user,microblazeel-linux-user,aarch64_be-linux-user,i386-linux-user,m68k-linux-user,mipsn32el-linux-user,xtensaeb-linux-user
       --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
       --extra-cflags=-fsanitize=undefined --extra-cflags=-fno-sanitize-recover=undefined
+      --extra-cflags=-fno-sanitize=function
     MAKE_CHECK_ARGS: check-unit check-tcg
     MAKE_CHECK_ARGS: check-unit check-tcg
 
 
 # Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory.
 # Set LD_JOBS=1 because this requires LTO and ld consumes a large amount of memory.
@@ -443,6 +575,9 @@ tsan-build:
     CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++
     CONFIGURE_ARGS: --enable-tsan --cc=clang --cxx=clang++
           --enable-trace-backends=ust --disable-slirp
           --enable-trace-backends=ust --disable-slirp
     TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
     TARGETS: x86_64-softmmu ppc64-softmmu riscv64-softmmu x86_64-linux-user
+    # Remove when we switch to a distro with clang >= 18
+    # https://github.com/google/sanitizers/issues/1716
+    MAKE: setarch -R make
 
 
 # gcov is a GCC features
 # gcov is a GCC features
 gcov:
 gcov:
@@ -454,7 +589,7 @@ gcov:
     IMAGE: ubuntu2204
     IMAGE: ubuntu2204
     CONFIGURE_ARGS: --enable-gcov
     CONFIGURE_ARGS: --enable-gcov
     TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
     TARGETS: aarch64-softmmu ppc64-softmmu s390x-softmmu x86_64-softmmu
-    MAKE_CHECK_ARGS: check
+    MAKE_CHECK_ARGS: check-unit check-softfloat
   after_script:
   after_script:
     - cd build
     - cd build
     - gcovr --xml-pretty --exclude-unreachable-branches --print-summary
     - gcovr --xml-pretty --exclude-unreachable-branches --print-summary
@@ -462,8 +597,12 @@ gcov:
   coverage: /^\s*lines:\s*\d+.\d+\%/
   coverage: /^\s*lines:\s*\d+.\d+\%/
   artifacts:
   artifacts:
     name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
     name: ${CI_JOB_NAME}-${CI_COMMIT_REF_NAME}-${CI_COMMIT_SHA}
+    when: always
     expire_in: 2 days
     expire_in: 2 days
+    paths:
+      - build/meson-logs/testlog.txt
     reports:
     reports:
+      junit: build/meson-logs/testlog.junit.xml
       coverage_report:
       coverage_report:
         coverage_format: cobertura
         coverage_format: cobertura
         path: build/coverage.xml
         path: build/coverage.xml
@@ -494,10 +633,10 @@ build-tci:
   variables:
   variables:
     IMAGE: debian-all-test-cross
     IMAGE: debian-all-test-cross
   script:
   script:
-    - TARGETS="aarch64 alpha arm hppa m68k microblaze ppc64 s390x x86_64"
+    - TARGETS="aarch64 arm hppa m68k microblaze ppc64 s390x x86_64"
     - mkdir build
     - mkdir build
     - cd build
     - cd build
-    - ../configure --enable-tcg-interpreter --disable-docs --disable-gtk --disable-vnc
+    - ../configure --enable-tcg-interpreter --disable-kvm --disable-docs --disable-gtk --disable-vnc
         --target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
         --target-list="$(for tg in $TARGETS; do echo -n ${tg}'-softmmu '; done)"
         || { cat config.log meson-logs/meson-log.txt && exit 1; }
         || { cat config.log meson-logs/meson-log.txt && exit 1; }
     - make -j"$JOBS"
     - make -j"$JOBS"
@@ -512,12 +651,15 @@ build-tci:
     - make check-tcg
     - make check-tcg
 
 
 # Check our reduced build configurations
 # Check our reduced build configurations
+# requires libfdt: aarch64, arm, loongarch64, microblaze, microblazeel,
+#   or1k, ppc64, riscv32, riscv64, rx
+# fails qtest without boards: i386, x86_64
 build-without-defaults:
 build-without-defaults:
   extends: .native_build_job_template
   extends: .native_build_job_template
   needs:
   needs:
-    job: amd64-centos8-container
+    job: amd64-centos9-container
   variables:
   variables:
-    IMAGE: centos8
+    IMAGE: centos9
     CONFIGURE_ARGS:
     CONFIGURE_ARGS:
       --without-default-devices
       --without-default-devices
       --without-default-features
       --without-default-features
@@ -525,14 +667,17 @@ build-without-defaults:
       --disable-pie
       --disable-pie
       --disable-qom-cast-debug
       --disable-qom-cast-debug
       --disable-strip
       --disable-strip
-    TARGETS: avr-softmmu mips64-softmmu s390x-softmmu sh4-softmmu
-      sparc64-softmmu hexagon-linux-user i386-linux-user s390x-linux-user
-    MAKE_CHECK_ARGS: check-unit check-qtest-avr check-qtest-mips64
+    TARGETS: alpha-softmmu avr-softmmu cris-softmmu hppa-softmmu m68k-softmmu
+      mips-softmmu mips64-softmmu mipsel-softmmu mips64el-softmmu
+      ppc-softmmu s390x-softmmu sh4-softmmu sh4eb-softmmu sparc-softmmu
+      sparc64-softmmu tricore-softmmu xtensa-softmmu xtensaeb-softmmu
+      hexagon-linux-user i386-linux-user s390x-linux-user
+    MAKE_CHECK_ARGS: check
 
 
 build-libvhost-user:
 build-libvhost-user:
   extends: .base_job_template
   extends: .base_job_template
   stage: build
   stage: build
-  image: $CI_REGISTRY_IMAGE/qemu/fedora:latest
+  image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
   needs:
   needs:
     job: amd64-fedora-container
     job: amd64-fedora-container
   script:
   script:
@@ -552,7 +697,7 @@ build-tools-and-docs-debian:
     # when running on 'master' we use pre-existing container
     # when running on 'master' we use pre-existing container
     optional: true
     optional: true
   variables:
   variables:
-    IMAGE: debian-amd64
+    IMAGE: debian
     MAKE_CHECK_ARGS: check-unit ctags TAGS cscope
     MAKE_CHECK_ARGS: check-unit ctags TAGS cscope
     CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
     CONFIGURE_ARGS: --disable-system --disable-user --enable-docs --enable-tools
     QEMU_JOB_PUBLISH: 1
     QEMU_JOB_PUBLISH: 1
@@ -572,7 +717,7 @@ build-tools-and-docs-debian:
 # of what topic branch they're currently using
 # of what topic branch they're currently using
 pages:
 pages:
   extends: .base_job_template
   extends: .base_job_template
-  image: $CI_REGISTRY_IMAGE/qemu/debian-amd64:latest
+  image: $CI_REGISTRY_IMAGE/qemu/debian:$QEMU_CI_CONTAINER_TAG
   stage: test
   stage: test
   needs:
   needs:
     - job: build-tools-and-docs-debian
     - job: build-tools-and-docs-debian
@@ -580,14 +725,55 @@ pages:
     - mkdir -p public
     - mkdir -p public
     # HTML-ised source tree
     # HTML-ised source tree
     - make gtags
     - make gtags
-    - htags -anT --tree-view=filetree -m qemu_init
+    # We unset variables to work around a bug in some htags versions
+    # which causes it to fail when the environment is large
+    - CI_COMMIT_MESSAGE= CI_COMMIT_TAG_MESSAGE= htags
+        -anT --tree-view=filetree -m qemu_init
         -t "Welcome to the QEMU sourcecode"
         -t "Welcome to the QEMU sourcecode"
     - mv HTML public/src
     - mv HTML public/src
     # Project documentation
     # Project documentation
     - make -C build install DESTDIR=$(pwd)/temp-install
     - make -C build install DESTDIR=$(pwd)/temp-install
     - mv temp-install/usr/local/share/doc/qemu/* public/
     - mv temp-install/usr/local/share/doc/qemu/* public/
   artifacts:
   artifacts:
+    when: on_success
     paths:
     paths:
       - public
       - public
   variables:
   variables:
     QEMU_JOB_PUBLISH: 1
     QEMU_JOB_PUBLISH: 1
+
+coverity:
+  image: $CI_REGISTRY_IMAGE/qemu/fedora:$QEMU_CI_CONTAINER_TAG
+  stage: build
+  allow_failure: true
+  timeout: 3h
+  needs:
+    - job: amd64-fedora-container
+      optional: true
+  before_script:
+    - dnf install -y curl wget
+  script:
+    # would be nice to cancel the job if over quota (https://gitlab.com/gitlab-org/gitlab/-/issues/256089)
+    # for example:
+    #   curl --request POST --header "PRIVATE-TOKEN: $CI_JOB_TOKEN" "${CI_SERVER_URL}/api/v4/projects/${CI_PROJECT_ID}/jobs/${CI_JOB_ID}/cancel
+    - 'scripts/coverity-scan/run-coverity-scan --check-upload-only || { exitcode=$?; if test $exitcode = 1; then
+        exit 0;
+      else
+        exit $exitcode;
+      fi; };
+      scripts/coverity-scan/run-coverity-scan --update-tools-only > update-tools.log 2>&1 || { cat update-tools.log; exit 1; };
+      scripts/coverity-scan/run-coverity-scan --no-update-tools'
+  rules:
+    - if: '$COVERITY_TOKEN == null'
+      when: never
+    - if: '$COVERITY_EMAIL == null'
+      when: never
+    # Never included on upstream pipelines, except for schedules
+    - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
+      when: on_success
+    - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM'
+      when: never
+    # Forks don't get any pipeline unless QEMU_CI=1 or QEMU_CI=2 is set
+    - if: '$QEMU_CI != "1" && $QEMU_CI != "2"'
+      when: never
+    # Always manual on forks even if $QEMU_CI == "2"
+    - when: manual

+ 23 - 53
.gitlab-ci.d/cirrus.yml

@@ -13,10 +13,12 @@
 .cirrus_build_job:
 .cirrus_build_job:
   extends: .base_job_template
   extends: .base_job_template
   stage: build
   stage: build
-  image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
+  image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:latest
   needs: []
   needs: []
+  # 20 mins larger than "timeout_in" in cirrus/build.yml
+  # as there's often a 5-10 minute delay before Cirrus CI
+  # actually starts the task
   timeout: 80m
   timeout: 80m
-  allow_failure: true
   script:
   script:
     - source .gitlab-ci.d/cirrus/$NAME.vars
     - source .gitlab-ci.d/cirrus/$NAME.vars
     - sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
     - sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
@@ -44,80 +46,48 @@
   variables:
   variables:
     QEMU_JOB_CIRRUS: 1
     QEMU_JOB_CIRRUS: 1
 
 
-x64-freebsd-12-build:
-  extends: .cirrus_build_job
-  variables:
-    NAME: freebsd-12
-    CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
-    CIRRUS_VM_IMAGE_SELECTOR: image_family
-    CIRRUS_VM_IMAGE_NAME: freebsd-12-4
-    CIRRUS_VM_CPUS: 8
-    CIRRUS_VM_RAM: 8G
-    UPDATE_COMMAND: pkg update; pkg upgrade -y
-    INSTALL_COMMAND: pkg install -y
-    TEST_TARGETS: check
-
 x64-freebsd-13-build:
 x64-freebsd-13-build:
   extends: .cirrus_build_job
   extends: .cirrus_build_job
   variables:
   variables:
     NAME: freebsd-13
     NAME: freebsd-13
     CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
     CIRRUS_VM_INSTANCE_TYPE: freebsd_instance
     CIRRUS_VM_IMAGE_SELECTOR: image_family
     CIRRUS_VM_IMAGE_SELECTOR: image_family
-    CIRRUS_VM_IMAGE_NAME: freebsd-13-1
+    CIRRUS_VM_IMAGE_NAME: freebsd-13-3
     CIRRUS_VM_CPUS: 8
     CIRRUS_VM_CPUS: 8
     CIRRUS_VM_RAM: 8G
     CIRRUS_VM_RAM: 8G
     UPDATE_COMMAND: pkg update; pkg upgrade -y
     UPDATE_COMMAND: pkg update; pkg upgrade -y
     INSTALL_COMMAND: pkg install -y
     INSTALL_COMMAND: pkg install -y
+    CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblaze-softmmu,mips64el-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4eb-softmmu,xtensa-softmmu
     TEST_TARGETS: check
     TEST_TARGETS: check
 
 
-aarch64-macos-12-base-build:
+aarch64-macos-13-base-build:
   extends: .cirrus_build_job
   extends: .cirrus_build_job
   variables:
   variables:
-    NAME: macos-12
+    NAME: macos-13
     CIRRUS_VM_INSTANCE_TYPE: macos_instance
     CIRRUS_VM_INSTANCE_TYPE: macos_instance
     CIRRUS_VM_IMAGE_SELECTOR: image
     CIRRUS_VM_IMAGE_SELECTOR: image
-    CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-monterey-base:latest
+    CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-ventura-base:latest
     CIRRUS_VM_CPUS: 12
     CIRRUS_VM_CPUS: 12
     CIRRUS_VM_RAM: 24G
     CIRRUS_VM_RAM: 24G
     UPDATE_COMMAND: brew update
     UPDATE_COMMAND: brew update
     INSTALL_COMMAND: brew install
     INSTALL_COMMAND: brew install
     PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin
     PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin
     PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig
     PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig
+    CONFIGURE_ARGS: --target-list-exclude=arm-softmmu,i386-softmmu,microblazeel-softmmu,mips64-softmmu,mipsel-softmmu,mips-softmmu,ppc-softmmu,sh4-softmmu,xtensaeb-softmmu
     TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64
     TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64
 
 
-
-# The following jobs run VM-based tests via KVM on a Linux-based Cirrus-CI job
-.cirrus_kvm_job:
-  extends: .base_job_template
-  stage: build
-  image: registry.gitlab.com/libvirt/libvirt-ci/cirrus-run:master
-  needs: []
-  timeout: 80m
-  script:
-    - sed -e "s|[@]CI_REPOSITORY_URL@|$CI_REPOSITORY_URL|g"
-          -e "s|[@]CI_COMMIT_REF_NAME@|$CI_COMMIT_REF_NAME|g"
-          -e "s|[@]CI_COMMIT_SHA@|$CI_COMMIT_SHA|g"
-          -e "s|[@]NAME@|$NAME|g"
-          -e "s|[@]CONFIGURE_ARGS@|$CONFIGURE_ARGS|g"
-          -e "s|[@]TEST_TARGETS@|$TEST_TARGETS|g"
-      <.gitlab-ci.d/cirrus/kvm-build.yml >.gitlab-ci.d/cirrus/$NAME.yml
-    - cat .gitlab-ci.d/cirrus/$NAME.yml
-    - cirrus-run -v --show-build-log always .gitlab-ci.d/cirrus/$NAME.yml
+aarch64-macos-14-base-build:
+  extends: .cirrus_build_job
   variables:
   variables:
-    QEMU_JOB_CIRRUS: 1
+    NAME: macos-14
+    CIRRUS_VM_INSTANCE_TYPE: macos_instance
+    CIRRUS_VM_IMAGE_SELECTOR: image
+    CIRRUS_VM_IMAGE_NAME: ghcr.io/cirruslabs/macos-sonoma-base:latest
+    CIRRUS_VM_CPUS: 12
+    CIRRUS_VM_RAM: 24G
+    UPDATE_COMMAND: brew update
+    INSTALL_COMMAND: brew install
+    PATH_EXTRA: /opt/homebrew/ccache/libexec:/opt/homebrew/gettext/bin
+    PKG_CONFIG_PATH: /opt/homebrew/curl/lib/pkgconfig:/opt/homebrew/ncurses/lib/pkgconfig:/opt/homebrew/readline/lib/pkgconfig
+    TEST_TARGETS: check-unit check-block check-qapi-schema check-softfloat check-qtest-x86_64
     QEMU_JOB_OPTIONAL: 1
     QEMU_JOB_OPTIONAL: 1
-
-
-x86-netbsd:
-  extends: .cirrus_kvm_job
-  variables:
-    NAME: netbsd
-    CONFIGURE_ARGS: --target-list=x86_64-softmmu,ppc64-softmmu,aarch64-softmmu
-    TEST_TARGETS: check
-
-x86-openbsd:
-  extends: .cirrus_kvm_job
-  variables:
-    NAME: openbsd
-    CONFIGURE_ARGS: --target-list=i386-softmmu,riscv64-softmmu,mips64-softmmu
-    TEST_TARGETS: check

+ 4 - 2
.gitlab-ci.d/cirrus/build.yml

@@ -16,15 +16,17 @@ env:
   TEST_TARGETS: "@TEST_TARGETS@"
   TEST_TARGETS: "@TEST_TARGETS@"
 
 
 build_task:
 build_task:
+  # A little shorter than GitLab timeout in ../cirrus.yml
+  timeout_in: 60m
   install_script:
   install_script:
     - @UPDATE_COMMAND@
     - @UPDATE_COMMAND@
     - @INSTALL_COMMAND@ @PKGS@
     - @INSTALL_COMMAND@ @PKGS@
-    - if test -n "@PYPI_PKGS@" ; then @PIP3@ install @PYPI_PKGS@ ; fi
+    - if test -n "@PYPI_PKGS@" ; then PYLIB=$(@PYTHON@ -c 'import sysconfig; print(sysconfig.get_path("stdlib"))'); rm -f $PYLIB/EXTERNALLY-MANAGED; @PIP3@ install @PYPI_PKGS@ ; fi
   clone_script:
   clone_script:
     - git clone --depth 100 "$CI_REPOSITORY_URL" .
     - git clone --depth 100 "$CI_REPOSITORY_URL" .
     - git fetch origin "$CI_COMMIT_REF_NAME"
     - git fetch origin "$CI_COMMIT_REF_NAME"
     - git reset --hard "$CI_COMMIT_SHA"
     - git reset --hard "$CI_COMMIT_SHA"
-  build_script:
+  step_script:
     - mkdir build
     - mkdir build
     - cd build
     - cd build
     - ../configure --enable-werror $CONFIGURE_ARGS
     - ../configure --enable-werror $CONFIGURE_ARGS

+ 0 - 16
.gitlab-ci.d/cirrus/freebsd-12.vars

@@ -1,16 +0,0 @@
-# THIS FILE WAS AUTO-GENERATED
-#
-#  $ lcitool variables freebsd-12 qemu
-#
-# https://gitlab.com/libvirt/libvirt-ci
-
-CCACHE='/usr/local/bin/ccache'
-CPAN_PKGS=''
-CROSS_PKGS=''
-MAKE='/usr/local/bin/gmake'
-NINJA='/usr/local/bin/ninja'
-PACKAGING_COMMAND='pkg'
-PIP3='/usr/local/bin/pip-3.8'
-PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 zstd'
-PYPI_PKGS=''
-PYTHON='/usr/local/bin/python3'

+ 1 - 1
.gitlab-ci.d/cirrus/freebsd-13.vars

@@ -11,6 +11,6 @@ MAKE='/usr/local/bin/gmake'
 NINJA='/usr/local/bin/ninja'
 NINJA='/usr/local/bin/ninja'
 PACKAGING_COMMAND='pkg'
 PACKAGING_COMMAND='pkg'
 PIP3='/usr/local/bin/pip-3.8'
 PIP3='/usr/local/bin/pip-3.8'
-PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cdrkit-genisoimage cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson ncurses nettle ninja opencv pixman pkgconf png py39-numpy py39-pillow py39-pip py39-sphinx py39-sphinx_rtd_theme py39-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 zstd'
+PKGS='alsa-lib bash bison bzip2 ca_root_nss capstone4 ccache cmocka ctags curl cyrus-sasl dbus diffutils dtc flex fusefs-libs3 gettext git glib gmake gnutls gsed gtk-vnc gtk3 json-c libepoxy libffi libgcrypt libjpeg-turbo libnfs libslirp libspice-server libssh libtasn1 llvm lzo2 meson mtools ncurses nettle ninja opencv pixman pkgconf png py311-numpy py311-pillow py311-pip py311-sphinx py311-sphinx_rtd_theme py311-tomli py311-yaml python3 rpm2cpio sdl2 sdl2_image snappy sndio socat spice-protocol tesseract usbredir virglrenderer vte3 xorriso zstd'
 PYPI_PKGS=''
 PYPI_PKGS=''
 PYTHON='/usr/local/bin/python3'
 PYTHON='/usr/local/bin/python3'

+ 0 - 31
.gitlab-ci.d/cirrus/kvm-build.yml

@@ -1,31 +0,0 @@
-container:
-  image: fedora:35
-  cpu: 4
-  memory: 8Gb
-  kvm: true
-
-env:
-  CIRRUS_CLONE_DEPTH: 1
-  CI_REPOSITORY_URL: "@CI_REPOSITORY_URL@"
-  CI_COMMIT_REF_NAME: "@CI_COMMIT_REF_NAME@"
-  CI_COMMIT_SHA: "@CI_COMMIT_SHA@"
-
-@NAME@_task:
-  @NAME@_vm_cache:
-    folder: $HOME/.cache/qemu-vm
-  install_script:
-    - dnf update -y
-    - dnf install -y git make openssh-clients qemu-img qemu-system-x86 wget
-  clone_script:
-    - git clone --depth 100 "$CI_REPOSITORY_URL" .
-    - git fetch origin "$CI_COMMIT_REF_NAME"
-    - git reset --hard "$CI_COMMIT_SHA"
-  build_script:
-    - if [ -f $HOME/.cache/qemu-vm/images/@NAME@.img ]; then
-        make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN)
-          EXTRA_CONFIGURE_OPTS="@CONFIGURE_ARGS@"
-          BUILD_TARGET="@TEST_TARGETS@" ;
-      else
-        make vm-build-@NAME@ J=$(getconf _NPROCESSORS_ONLN) BUILD_TARGET=help
-          EXTRA_CONFIGURE_OPTS="--disable-system --disable-user --disable-tools" ;
-      fi

+ 3 - 3
.gitlab-ci.d/cirrus/macos-12.vars → .gitlab-ci.d/cirrus/macos-13.vars

@@ -1,6 +1,6 @@
 # THIS FILE WAS AUTO-GENERATED
 # THIS FILE WAS AUTO-GENERATED
 #
 #
-#  $ lcitool variables macos-12 qemu
+#  $ lcitool variables macos-13 qemu
 #
 #
 # https://gitlab.com/libvirt/libvirt-ci
 # https://gitlab.com/libvirt/libvirt-ci
 
 
@@ -11,6 +11,6 @@ MAKE='/opt/homebrew/bin/gmake'
 NINJA='/opt/homebrew/bin/ninja'
 NINJA='/opt/homebrew/bin/ninja'
 PACKAGING_COMMAND='brew'
 PACKAGING_COMMAND='brew'
 PIP3='/opt/homebrew/bin/pip3'
 PIP3='/opt/homebrew/bin/pip3'
-PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol tesseract usbredir vde vte3 zlib zstd'
-PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme'
+PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 gtk-vnc jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd'
+PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
 PYTHON='/opt/homebrew/bin/python3'
 PYTHON='/opt/homebrew/bin/python3'

+ 16 - 0
.gitlab-ci.d/cirrus/macos-14.vars

@@ -0,0 +1,16 @@
+# THIS FILE WAS AUTO-GENERATED
+#
+#  $ lcitool variables macos-14 qemu
+#
+# https://gitlab.com/libvirt/libvirt-ci
+
+CCACHE='/opt/homebrew/bin/ccache'
+CPAN_PKGS=''
+CROSS_PKGS=''
+MAKE='/opt/homebrew/bin/gmake'
+NINJA='/opt/homebrew/bin/ninja'
+PACKAGING_COMMAND='brew'
+PIP3='/opt/homebrew/bin/pip3'
+PKGS='bash bc bison bzip2 capstone ccache cmocka ctags curl dbus diffutils dtc flex gcovr gettext git glib gnu-sed gnutls gtk+3 gtk-vnc jemalloc jpeg-turbo json-c libepoxy libffi libgcrypt libiscsi libnfs libpng libslirp libssh libtasn1 libusb llvm lzo make meson mtools ncurses nettle ninja pixman pkg-config python3 rpm2cpio sdl2 sdl2_image snappy socat sparse spice-protocol swtpm tesseract usbredir vde vte3 xorriso zlib zstd'
+PYPI_PKGS='PyYAML numpy pillow sphinx sphinx-rtd-theme tomli'
+PYTHON='/opt/homebrew/bin/python3'

+ 2 - 2
.gitlab-ci.d/container-core.yml

@@ -1,10 +1,10 @@
 include:
 include:
   - local: '/.gitlab-ci.d/container-template.yml'
   - local: '/.gitlab-ci.d/container-template.yml'
 
 
-amd64-centos8-container:
+amd64-centos9-container:
   extends: .container_job_template
   extends: .container_job_template
   variables:
   variables:
-    NAME: centos8
+    NAME: centos9
 
 
 amd64-fedora-container:
 amd64-fedora-container:
   extends: .container_job_template
   extends: .container_job_template

+ 11 - 57
.gitlab-ci.d/container-cross.yml

@@ -1,9 +1,3 @@
-alpha-debian-cross-container:
-  extends: .container_job_template
-  stage: containers
-  variables:
-    NAME: debian-alpha-cross
-
 amd64-debian-cross-container:
 amd64-debian-cross-container:
   extends: .container_job_template
   extends: .container_job_template
   stage: containers
   stage: containers
@@ -16,6 +10,12 @@ amd64-debian-user-cross-container:
   variables:
   variables:
     NAME: debian-all-test-cross
     NAME: debian-all-test-cross
 
 
+amd64-debian-legacy-cross-container:
+  extends: .container_job_template
+  stage: containers
+  variables:
+    NAME: debian-legacy-test-cross
+
 arm64-debian-cross-container:
 arm64-debian-cross-container:
   extends: .container_job_template
   extends: .container_job_template
   stage: containers
   stage: containers
@@ -40,23 +40,17 @@ hexagon-cross-container:
   variables:
   variables:
     NAME: debian-hexagon-cross
     NAME: debian-hexagon-cross
 
 
-hppa-debian-cross-container:
+loongarch-debian-cross-container:
   extends: .container_job_template
   extends: .container_job_template
   stage: containers
   stage: containers
   variables:
   variables:
-    NAME: debian-hppa-cross
+    NAME: debian-loongarch-cross
 
 
-m68k-debian-cross-container:
+i686-debian-cross-container:
   extends: .container_job_template
   extends: .container_job_template
   stage: containers
   stage: containers
   variables:
   variables:
-    NAME: debian-m68k-cross
-
-mips64-debian-cross-container:
-  extends: .container_job_template
-  stage: containers
-  variables:
-    NAME: debian-mips64-cross
+    NAME: debian-i686-cross
 
 
 mips64el-debian-cross-container:
 mips64el-debian-cross-container:
   extends: .container_job_template
   extends: .container_job_template
@@ -64,24 +58,12 @@ mips64el-debian-cross-container:
   variables:
   variables:
     NAME: debian-mips64el-cross
     NAME: debian-mips64el-cross
 
 
-mips-debian-cross-container:
-  extends: .container_job_template
-  stage: containers
-  variables:
-    NAME: debian-mips-cross
-
 mipsel-debian-cross-container:
 mipsel-debian-cross-container:
   extends: .container_job_template
   extends: .container_job_template
   stage: containers
   stage: containers
   variables:
   variables:
     NAME: debian-mipsel-cross
     NAME: debian-mipsel-cross
 
 
-powerpc-test-cross-container:
-  extends: .container_job_template
-  stage: containers
-  variables:
-    NAME: debian-powerpc-test-cross
-
 ppc64el-debian-cross-container:
 ppc64el-debian-cross-container:
   extends: .container_job_template
   extends: .container_job_template
   stage: containers
   stage: containers
@@ -95,13 +77,7 @@ riscv64-debian-cross-container:
   allow_failure: true
   allow_failure: true
   variables:
   variables:
     NAME: debian-riscv64-cross
     NAME: debian-riscv64-cross
-
-# we can however build TCG tests using a non-sid base
-riscv64-debian-test-cross-container:
-  extends: .container_job_template
-  stage: containers
-  variables:
-    NAME: debian-riscv64-test-cross
+    QEMU_JOB_OPTIONAL: 1
 
 
 s390x-debian-cross-container:
 s390x-debian-cross-container:
   extends: .container_job_template
   extends: .container_job_template
@@ -109,18 +85,6 @@ s390x-debian-cross-container:
   variables:
   variables:
     NAME: debian-s390x-cross
     NAME: debian-s390x-cross
 
 
-sh4-debian-cross-container:
-  extends: .container_job_template
-  stage: containers
-  variables:
-    NAME: debian-sh4-cross
-
-sparc64-debian-cross-container:
-  extends: .container_job_template
-  stage: containers
-  variables:
-    NAME: debian-sparc64-cross
-
 tricore-debian-cross-container:
 tricore-debian-cross-container:
   extends: .container_job_template
   extends: .container_job_template
   stage: containers
   stage: containers
@@ -137,16 +101,6 @@ cris-fedora-cross-container:
   variables:
   variables:
     NAME: fedora-cris-cross
     NAME: fedora-cris-cross
 
 
-i386-fedora-cross-container:
-  extends: .container_job_template
-  variables:
-    NAME: fedora-i386-cross
-
-win32-fedora-cross-container:
-  extends: .container_job_template
-  variables:
-    NAME: fedora-win32-cross
-
 win64-fedora-cross-container:
 win64-fedora-cross-container:
   extends: .container_job_template
   extends: .container_job_template
   variables:
   variables:

+ 4 - 4
.gitlab-ci.d/container-template.yml

@@ -1,15 +1,15 @@
 .container_job_template:
 .container_job_template:
   extends: .base_job_template
   extends: .base_job_template
-  image: docker:stable
+  image: docker:latest
   stage: containers
   stage: containers
   services:
   services:
     - docker:dind
     - docker:dind
   before_script:
   before_script:
-    - export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:latest"
+    - export TAG="$CI_REGISTRY_IMAGE/qemu/$NAME:$QEMU_CI_CONTAINER_TAG"
+    # Always ':latest' because we always use upstream as a common cache source
     - export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
     - export COMMON_TAG="$CI_REGISTRY/qemu-project/qemu/qemu/$NAME:latest"
-    - apk add python3
-    - docker info
     - docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
     - docker login $CI_REGISTRY -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD"
+    - until docker info; do sleep 1; done
   script:
   script:
     - echo "TAG:$TAG"
     - echo "TAG:$TAG"
     - echo "COMMON_TAG:$COMMON_TAG"
     - echo "COMMON_TAG:$COMMON_TAG"

+ 1 - 1
.gitlab-ci.d/containers.yml

@@ -11,7 +11,7 @@ amd64-debian-container:
   extends: .container_job_template
   extends: .container_job_template
   stage: containers
   stage: containers
   variables:
   variables:
-    NAME: debian-amd64
+    NAME: debian
 
 
 amd64-ubuntu2204-container:
 amd64-ubuntu2204-container:
   extends: .container_job_template
   extends: .container_job_template

+ 33 - 4
.gitlab-ci.d/crossbuild-template.yml

@@ -1,11 +1,23 @@
 .cross_system_build_job:
 .cross_system_build_job:
   extends: .base_job_template
   extends: .base_job_template
   stage: build
   stage: build
-  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
+  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
+  cache:
+    paths:
+      - ccache
+    key: "$CI_JOB_NAME"
+    when: always
   timeout: 80m
   timeout: 80m
+  before_script:
+    - cat /packages.txt
   script:
   script:
+    - export CCACHE_BASEDIR="$(pwd)"
+    - export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
+    - export CCACHE_MAXSIZE="500M"
+    - export PATH="$CCACHE_WRAPPERSDIR:$PATH"
     - mkdir build
     - mkdir build
     - cd build
     - cd build
+    - ccache --zero-stats
     - ../configure --enable-werror --disable-docs --enable-fdt=system
     - ../configure --enable-werror --disable-docs --enable-fdt=system
         --disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
         --disable-user $QEMU_CONFIGURE_OPTS $EXTRA_CONFIGURE_OPTS
         --target-list-exclude="arm-softmmu cris-softmmu
         --target-list-exclude="arm-softmmu cris-softmmu
@@ -18,6 +30,7 @@
       version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
       version="$(git describe --match v[0-9]* 2>/dev/null || git rev-parse --short HEAD)";
       mv -v qemu-setup*.exe qemu-setup-${version}.exe;
       mv -v qemu-setup*.exe qemu-setup-${version}.exe;
       fi
       fi
+    - ccache --show-stats
 
 
 # Job to cross-build specific accelerators.
 # Job to cross-build specific accelerators.
 #
 #
@@ -27,9 +40,17 @@
 .cross_accel_build_job:
 .cross_accel_build_job:
   extends: .base_job_template
   extends: .base_job_template
   stage: build
   stage: build
-  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
+  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
   timeout: 30m
   timeout: 30m
+  cache:
+    paths:
+      - ccache/
+    key: "$CI_JOB_NAME"
   script:
   script:
+    - export CCACHE_BASEDIR="$(pwd)"
+    - export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
+    - export CCACHE_MAXSIZE="500M"
+    - export PATH="$CCACHE_WRAPPERSDIR:$PATH"
     - mkdir build
     - mkdir build
     - cd build
     - cd build
     - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
     - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
@@ -39,14 +60,21 @@
 .cross_user_build_job:
 .cross_user_build_job:
   extends: .base_job_template
   extends: .base_job_template
   stage: build
   stage: build
-  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:latest
+  image: $CI_REGISTRY_IMAGE/qemu/$IMAGE:$QEMU_CI_CONTAINER_TAG
+  cache:
+    paths:
+      - ccache/
+    key: "$CI_JOB_NAME"
   script:
   script:
+    - export CCACHE_BASEDIR="$(pwd)"
+    - export CCACHE_DIR="$CCACHE_BASEDIR/ccache"
+    - export CCACHE_MAXSIZE="500M"
     - mkdir build
     - mkdir build
     - cd build
     - cd build
     - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
     - ../configure --enable-werror --disable-docs $QEMU_CONFIGURE_OPTS
         --disable-system --target-list-exclude="aarch64_be-linux-user
         --disable-system --target-list-exclude="aarch64_be-linux-user
           alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user
           alpha-linux-user cris-linux-user m68k-linux-user microblazeel-linux-user
-          nios2-linux-user or1k-linux-user ppc-linux-user sparc-linux-user
+          or1k-linux-user ppc-linux-user sparc-linux-user
           xtensa-linux-user $CROSS_SKIP_TARGETS"
           xtensa-linux-user $CROSS_SKIP_TARGETS"
     - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
     - make -j$(expr $(nproc) + 1) all check-build $MAKE_CHECK_ARGS
 
 
@@ -55,6 +83,7 @@
 .cross_test_artifacts:
 .cross_test_artifacts:
   artifacts:
   artifacts:
     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
+    when: always
     expire_in: 7 days
     expire_in: 7 days
     paths:
     paths:
       - build/meson-logs/testlog.txt
       - build/meson-logs/testlog.txt

+ 29 - 22
.gitlab-ci.d/crossbuilds.yml

@@ -29,27 +29,46 @@ cross-arm64-user:
   variables:
   variables:
     IMAGE: debian-arm64-cross
     IMAGE: debian-arm64-cross
 
 
-cross-i386-user:
+cross-arm64-kvm-only:
+  extends: .cross_accel_build_job
+  needs:
+    job: arm64-debian-cross-container
+  variables:
+    IMAGE: debian-arm64-cross
+    EXTRA_CONFIGURE_OPTS: --disable-tcg --without-default-features
+
+cross-i686-system:
+  extends:
+    - .cross_system_build_job
+    - .cross_test_artifacts
+  needs:
+    job: i686-debian-cross-container
+  variables:
+    IMAGE: debian-i686-cross
+    EXTRA_CONFIGURE_OPTS: --disable-kvm
+    MAKE_CHECK_ARGS: check-qtest
+
+cross-i686-user:
   extends:
   extends:
     - .cross_user_build_job
     - .cross_user_build_job
     - .cross_test_artifacts
     - .cross_test_artifacts
   needs:
   needs:
-    job: i386-fedora-cross-container
+    job: i686-debian-cross-container
   variables:
   variables:
-    IMAGE: fedora-i386-cross
+    IMAGE: debian-i686-cross
     MAKE_CHECK_ARGS: check
     MAKE_CHECK_ARGS: check
 
 
-cross-i386-tci:
+cross-i686-tci:
   extends:
   extends:
     - .cross_accel_build_job
     - .cross_accel_build_job
     - .cross_test_artifacts
     - .cross_test_artifacts
   timeout: 60m
   timeout: 60m
   needs:
   needs:
-    job: i386-fedora-cross-container
+    job: i686-debian-cross-container
   variables:
   variables:
-    IMAGE: fedora-i386-cross
+    IMAGE: debian-i686-cross
     ACCEL: tcg-interpreter
     ACCEL: tcg-interpreter
-    EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user
+    EXTRA_CONFIGURE_OPTS: --target-list=i386-softmmu,i386-linux-user,aarch64-softmmu,aarch64-linux-user,ppc-softmmu,ppc-linux-user --disable-plugins --disable-kvm
     MAKE_CHECK_ARGS: check check-tcg
     MAKE_CHECK_ARGS: check check-tcg
 
 
 cross-mipsel-system:
 cross-mipsel-system:
@@ -151,31 +170,19 @@ cross-mips64el-kvm-only:
     IMAGE: debian-mips64el-cross
     IMAGE: debian-mips64el-cross
     EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu
     EXTRA_CONFIGURE_OPTS: --disable-tcg --target-list=mips64el-softmmu
 
 
-cross-win32-system:
-  extends: .cross_system_build_job
-  needs:
-    job: win32-fedora-cross-container
-  variables:
-    IMAGE: fedora-win32-cross
-    EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
-    CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu m68k-softmmu
-                        microblazeel-softmmu mips64el-softmmu nios2-softmmu
-  artifacts:
-    paths:
-      - build/qemu-setup*.exe
-
 cross-win64-system:
 cross-win64-system:
   extends: .cross_system_build_job
   extends: .cross_system_build_job
   needs:
   needs:
     job: win64-fedora-cross-container
     job: win64-fedora-cross-container
   variables:
   variables:
     IMAGE: fedora-win64-cross
     IMAGE: fedora-win64-cross
-    EXTRA_CONFIGURE_OPTS: --enable-fdt=internal
+    EXTRA_CONFIGURE_OPTS: --enable-fdt=internal --disable-plugins
     CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu
     CROSS_SKIP_TARGETS: alpha-softmmu avr-softmmu hppa-softmmu
-                        m68k-softmmu microblazeel-softmmu nios2-softmmu
+                        m68k-softmmu microblazeel-softmmu
                         or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
                         or1k-softmmu rx-softmmu sh4eb-softmmu sparc64-softmmu
                         tricore-softmmu xtensaeb-softmmu
                         tricore-softmmu xtensaeb-softmmu
   artifacts:
   artifacts:
+    when: on_success
     paths:
     paths:
       - build/qemu-setup*.exe
       - build/qemu-setup*.exe
 
 

+ 7 - 5
.gitlab-ci.d/custom-runners.yml

@@ -10,23 +10,25 @@
 # gitlab-runner.  To avoid problems that gitlab-runner can cause while
 # gitlab-runner.  To avoid problems that gitlab-runner can cause while
 # reusing the GIT repository, let's enable the clone strategy, which
 # reusing the GIT repository, let's enable the clone strategy, which
 # guarantees a fresh repository on each job run.
 # guarantees a fresh repository on each job run.
-variables:
-  GIT_STRATEGY: clone
 
 
 # All custom runners can extend this template to upload the testlog
 # All custom runners can extend this template to upload the testlog
 # data as an artifact and also feed the junit report
 # data as an artifact and also feed the junit report
 .custom_runner_template:
 .custom_runner_template:
   extends: .base_job_template
   extends: .base_job_template
+  variables:
+    GIT_STRATEGY: clone
+    GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
   artifacts:
   artifacts:
     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
     name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
     expire_in: 7 days
     expire_in: 7 days
+    when: always
     paths:
     paths:
-      - build/meson-logs/testlog.txt
+      - build/build.ninja
+      - build/meson-logs
     reports:
     reports:
       junit: build/meson-logs/testlog.junit.xml
       junit: build/meson-logs/testlog.junit.xml
 
 
 include:
 include:
-  - local: '/.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml'
+  - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml'
   - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml'
   - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml'
   - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml'
   - local: '/.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml'
-  - local: '/.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml'

+ 0 - 24
.gitlab-ci.d/custom-runners/centos-stream-8-x86_64.yml

@@ -1,24 +0,0 @@
-# All centos-stream-8 jobs should run successfully in an environment
-# setup by the scripts/ci/setup/stream/8/build-environment.yml task
-# "Installation of extra packages to build QEMU"
-
-centos-stream-8-x86_64:
- extends: .custom_runner_template
- allow_failure: true
- needs: []
- stage: build
- tags:
- - centos_stream_8
- - x86_64
- rules:
- - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
- - if: "$CENTOS_STREAM_8_x86_64_RUNNER_AVAILABLE"
- before_script:
- - JOBS=$(expr $(nproc) + 1)
- script:
- - mkdir build
- - cd build
- - ../scripts/ci/org.centos/stream/8/x86_64/configure
-   || { cat config.log meson-logs/meson-log.txt; exit 1; }
- - make -j"$JOBS"
- - make NINJA=":" check check-avocado

+ 2 - 2
.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch32.yml

@@ -1,6 +1,6 @@
 # All ubuntu-22.04 jobs should run successfully in an environment
 # All ubuntu-22.04 jobs should run successfully in an environment
-# setup by the scripts/ci/setup/qemu/build-environment.yml task
-# "Install basic packages to build QEMU on Ubuntu 20.04"
+# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
+# "Install basic packages to build QEMU on Ubuntu 22.04"
 
 
 ubuntu-22.04-aarch32-all:
 ubuntu-22.04-aarch32-all:
  extends: .custom_runner_template
  extends: .custom_runner_template

+ 26 - 4
.gitlab-ci.d/custom-runners/ubuntu-22.04-aarch64.yml

@@ -1,6 +1,6 @@
-# All ubuntu-20.04 jobs should run successfully in an environment
-# setup by the scripts/ci/setup/qemu/build-environment.yml task
-# "Install basic packages to build QEMU on Ubuntu 20.04"
+# All ubuntu-22.04 jobs should run successfully in an environment
+# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
+# "Install basic packages to build QEMU on Ubuntu 22.04"
 
 
 ubuntu-22.04-aarch64-all-linux-static:
 ubuntu-22.04-aarch64-all-linux-static:
  extends: .custom_runner_template
  extends: .custom_runner_template
@@ -45,6 +45,28 @@ ubuntu-22.04-aarch64-all:
  - make --output-sync -j`nproc --ignore=40`
  - make --output-sync -j`nproc --ignore=40`
  - make --output-sync -j`nproc --ignore=40` check
  - make --output-sync -j`nproc --ignore=40` check
 
 
+ubuntu-22.04-aarch64-without-defaults:
+ extends: .custom_runner_template
+ needs: []
+ stage: build
+ tags:
+ - ubuntu_22.04
+ - aarch64
+ rules:
+ - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
+   when: manual
+   allow_failure: true
+ - if: "$AARCH64_RUNNER_AVAILABLE"
+   when: manual
+   allow_failure: true
+ script:
+ - mkdir build
+ - cd build
+ - ../configure --disable-user --without-default-devices --without-default-features
+   || { cat config.log meson-logs/meson-log.txt; exit 1; }
+ - make --output-sync -j`nproc --ignore=40`
+ - make --output-sync -j`nproc --ignore=40` check
+
 ubuntu-22.04-aarch64-alldbg:
 ubuntu-22.04-aarch64-alldbg:
  extends: .custom_runner_template
  extends: .custom_runner_template
  needs: []
  needs: []
@@ -123,7 +145,7 @@ ubuntu-22.04-aarch64-notcg:
  script:
  script:
  - mkdir build
  - mkdir build
  - cd build
  - cd build
- - ../configure --disable-tcg
+ - ../configure --disable-tcg --with-devices-aarch64=minimal
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
  - make --output-sync -j`nproc --ignore=40`
  - make --output-sync -j`nproc --ignore=40`
  - make --output-sync -j`nproc --ignore=40` check
  - make --output-sync -j`nproc --ignore=40` check

+ 21 - 23
.gitlab-ci.d/custom-runners/ubuntu-20.04-s390x.yml → .gitlab-ci.d/custom-runners/ubuntu-22.04-s390x.yml

@@ -1,34 +1,32 @@
-# All ubuntu-20.04 jobs should run successfully in an environment
-# setup by the scripts/ci/setup/build-environment.yml task
-# "Install basic packages to build QEMU on Ubuntu 20.04/20.04"
+# All ubuntu-22.04 jobs should run successfully in an environment
+# setup by the scripts/ci/setup/ubuntu/build-environment.yml task
+# "Install basic packages to build QEMU on Ubuntu 22.04"
 
 
-ubuntu-20.04-s390x-all-linux-static:
+ubuntu-22.04-s390x-all-linux:
  extends: .custom_runner_template
  extends: .custom_runner_template
  needs: []
  needs: []
  stage: build
  stage: build
  tags:
  tags:
- - ubuntu_20.04
+ - ubuntu_22.04
  - s390x
  - s390x
  rules:
  rules:
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
  - if: "$S390X_RUNNER_AVAILABLE"
  - if: "$S390X_RUNNER_AVAILABLE"
  script:
  script:
- # --disable-libssh is needed because of https://bugs.launchpad.net/qemu/+bug/1838763
- # --disable-glusterfs is needed because there's no static version of those libs in distro supplied packages
  - mkdir build
  - mkdir build
  - cd build
  - cd build
- - ../configure --enable-debug --static --disable-system --disable-glusterfs --disable-libssh
+ - ../configure --enable-debug --disable-system --disable-tools --disable-docs
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc`
  - make --output-sync check-tcg
  - make --output-sync check-tcg
  - make --output-sync -j`nproc` check
  - make --output-sync -j`nproc` check
 
 
-ubuntu-20.04-s390x-all:
+ubuntu-22.04-s390x-all-system:
  extends: .custom_runner_template
  extends: .custom_runner_template
  needs: []
  needs: []
  stage: build
  stage: build
  tags:
  tags:
- - ubuntu_20.04
+ - ubuntu_22.04
  - s390x
  - s390x
  timeout: 75m
  timeout: 75m
  rules:
  rules:
@@ -37,17 +35,17 @@ ubuntu-20.04-s390x-all:
  script:
  script:
  - mkdir build
  - mkdir build
  - cd build
  - cd build
- - ../configure --disable-libssh
+ - ../configure --disable-user
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc` check
  - make --output-sync -j`nproc` check
 
 
-ubuntu-20.04-s390x-alldbg:
+ubuntu-22.04-s390x-alldbg:
  extends: .custom_runner_template
  extends: .custom_runner_template
  needs: []
  needs: []
  stage: build
  stage: build
  tags:
  tags:
- - ubuntu_20.04
+ - ubuntu_22.04
  - s390x
  - s390x
  rules:
  rules:
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@@ -59,18 +57,18 @@ ubuntu-20.04-s390x-alldbg:
  script:
  script:
  - mkdir build
  - mkdir build
  - cd build
  - cd build
- - ../configure --enable-debug --disable-libssh
+ - ../configure --enable-debug
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
  - make clean
  - make clean
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc` check
  - make --output-sync -j`nproc` check
 
 
-ubuntu-20.04-s390x-clang:
+ubuntu-22.04-s390x-clang:
  extends: .custom_runner_template
  extends: .custom_runner_template
  needs: []
  needs: []
  stage: build
  stage: build
  tags:
  tags:
- - ubuntu_20.04
+ - ubuntu_22.04
  - s390x
  - s390x
  rules:
  rules:
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@@ -82,16 +80,16 @@ ubuntu-20.04-s390x-clang:
  script:
  script:
  - mkdir build
  - mkdir build
  - cd build
  - cd build
- - ../configure --disable-libssh --cc=clang --cxx=clang++ --enable-sanitizers
+ - ../configure --cc=clang --cxx=clang++ --enable-sanitizers
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc` check
  - make --output-sync -j`nproc` check
 
 
-ubuntu-20.04-s390x-tci:
+ubuntu-22.04-s390x-tci:
  needs: []
  needs: []
  stage: build
  stage: build
  tags:
  tags:
- - ubuntu_20.04
+ - ubuntu_22.04
  - s390x
  - s390x
  rules:
  rules:
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@@ -103,16 +101,16 @@ ubuntu-20.04-s390x-tci:
  script:
  script:
  - mkdir build
  - mkdir build
  - cd build
  - cd build
- - ../configure --disable-libssh --enable-tcg-interpreter
+ - ../configure --enable-tcg-interpreter
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc`
 
 
-ubuntu-20.04-s390x-notcg:
+ubuntu-22.04-s390x-notcg:
  extends: .custom_runner_template
  extends: .custom_runner_template
  needs: []
  needs: []
  stage: build
  stage: build
  tags:
  tags:
- - ubuntu_20.04
+ - ubuntu_22.04
  - s390x
  - s390x
  rules:
  rules:
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
  - if: '$CI_PROJECT_NAMESPACE == "qemu-project" && $CI_COMMIT_BRANCH =~ /^staging/'
@@ -124,7 +122,7 @@ ubuntu-20.04-s390x-notcg:
  script:
  script:
  - mkdir build
  - mkdir build
  - cd build
  - cd build
- - ../configure --disable-libssh --disable-tcg
+ - ../configure --disable-tcg
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
    || { cat config.log meson-logs/meson-log.txt; exit 1; }
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc`
  - make --output-sync -j`nproc` check
  - make --output-sync -j`nproc` check

+ 8 - 5
.gitlab-ci.d/opensbi.yml

@@ -24,6 +24,10 @@
     - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i'
     - if: '$QEMU_CI == "1" && $CI_PROJECT_NAMESPACE != "qemu-project" && $CI_COMMIT_MESSAGE =~ /opensbi/i'
       when: manual
       when: manual
 
 
+    # Scheduled runs on mainline don't get pipelines except for the special Coverity job
+    - if: '$CI_PROJECT_NAMESPACE == $QEMU_CI_UPSTREAM && $CI_PIPELINE_SOURCE == "schedule"'
+      when: never
+
     # Run if any files affecting the build output are touched
     # Run if any files affecting the build output are touched
     - changes:
     - changes:
         - .gitlab-ci.d/opensbi.yml
         - .gitlab-ci.d/opensbi.yml
@@ -42,17 +46,15 @@
 docker-opensbi:
 docker-opensbi:
   extends: .opensbi_job_rules
   extends: .opensbi_job_rules
   stage: containers
   stage: containers
-  image: docker:stable
+  image: docker:latest
   services:
   services:
-    - docker:stable-dind
+    - docker:dind
   variables:
   variables:
     GIT_DEPTH: 3
     GIT_DEPTH: 3
     IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
     IMAGE_TAG: $CI_REGISTRY_IMAGE:opensbi-cross-build
-    # We don't use TLS
-    DOCKER_HOST: tcp://docker:2375
-    DOCKER_TLS_CERTDIR: ""
   before_script:
   before_script:
     - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
     - docker login -u $CI_REGISTRY_USER -p $CI_REGISTRY_PASSWORD $CI_REGISTRY
+    - until docker info; do sleep 1; done
   script:
   script:
     - docker pull $IMAGE_TAG || true
     - docker pull $IMAGE_TAG || true
     - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
     - docker build --cache-from $IMAGE_TAG --tag $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
@@ -65,6 +67,7 @@ build-opensbi:
   stage: build
   stage: build
   needs: ['docker-opensbi']
   needs: ['docker-opensbi']
   artifacts:
   artifacts:
+    when: on_success
     paths: # 'artifacts.zip' will contains the following files:
     paths: # 'artifacts.zip' will contains the following files:
       - pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
       - pc-bios/opensbi-riscv32-generic-fw_dynamic.bin
       - pc-bios/opensbi-riscv64-generic-fw_dynamic.bin
       - pc-bios/opensbi-riscv64-generic-fw_dynamic.bin

+ 7 - 0
.gitlab-ci.d/qemu-project.yml

@@ -1,6 +1,13 @@
 # This file contains the set of jobs run by the QEMU project:
 # This file contains the set of jobs run by the QEMU project:
 # https://gitlab.com/qemu-project/qemu/-/pipelines
 # https://gitlab.com/qemu-project/qemu/-/pipelines
 
 
+variables:
+  RUNNER_TAG: ""
+
+default:
+  tags:
+    - $RUNNER_TAG
+
 include:
 include:
   - local: '/.gitlab-ci.d/base.yml'
   - local: '/.gitlab-ci.d/base.yml'
   - local: '/.gitlab-ci.d/stages.yml'
   - local: '/.gitlab-ci.d/stages.yml'

+ 2 - 2
.gitlab-ci.d/static_checks.yml

@@ -26,7 +26,7 @@ check-dco:
 check-python-minreqs:
 check-python-minreqs:
   extends: .base_job_template
   extends: .base_job_template
   stage: test
   stage: test
-  image: $CI_REGISTRY_IMAGE/qemu/python:latest
+  image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
   script:
   script:
     - make -C python check-minreqs
     - make -C python check-minreqs
   variables:
   variables:
@@ -37,7 +37,7 @@ check-python-minreqs:
 check-python-tox:
 check-python-tox:
   extends: .base_job_template
   extends: .base_job_template
   stage: test
   stage: test
-  image: $CI_REGISTRY_IMAGE/qemu/python:latest
+  image: $CI_REGISTRY_IMAGE/qemu/python:$QEMU_CI_CONTAINER_TAG
   script:
   script:
     - make -C python check-tox
     - make -C python check-tox
   variables:
   variables:

+ 70 - 89
.gitlab-ci.d/windows.yml

@@ -1,25 +1,67 @@
-.shared_msys2_builder:
+msys2-64bit:
   extends: .base_job_template
   extends: .base_job_template
   tags:
   tags:
-  - shared-windows
-  - windows
-  - windows-1809
+  - saas-windows-medium-amd64
   cache:
   cache:
-    key: "${CI_JOB_NAME}-cache"
+    key: "$CI_JOB_NAME"
     paths:
     paths:
-      - ${CI_PROJECT_DIR}/msys64/var/cache
+      - msys64/var/cache
+      - ccache
+    when: always
   needs: []
   needs: []
   stage: build
   stage: build
-  timeout: 80m
+  timeout: 100m
+  variables:
+    # Select the "64 bit, gcc and MSVCRT" MSYS2 environment
+    MSYSTEM: MINGW64
+    # This feature doesn't (currently) work with PowerShell, it stops
+    # the echo'ing of commands being run and doesn't show any timing
+    FF_SCRIPT_SECTIONS: 0
+    CONFIGURE_ARGS: --disable-system --enable-tools -Ddebug=false -Doptimization=0
+    # The Windows git is a bit older so override the default
+    GIT_FETCH_EXTRA_FLAGS: --no-tags --prune --quiet
+  artifacts:
+    name: "$CI_JOB_NAME-$CI_COMMIT_REF_SLUG"
+    expire_in: 7 days
+    paths:
+      - build/meson-logs/testlog.txt
+    reports:
+      junit: "build/meson-logs/testlog.junit.xml"
   before_script:
   before_script:
+  - Write-Output "Acquiring msys2.exe installer at $(Get-Date -Format u)"
   - If ( !(Test-Path -Path msys64\var\cache ) ) {
   - If ( !(Test-Path -Path msys64\var\cache ) ) {
       mkdir msys64\var\cache
       mkdir msys64\var\cache
     }
     }
-  - If ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
+  - Invoke-WebRequest
+    "https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe.sig"
+    -outfile "msys2.exe.sig"
+  - if ( Test-Path -Path msys64\var\cache\msys2.exe.sig ) {
+      Write-Output "Cached installer sig" ;
+      if ( ((Get-FileHash msys2.exe.sig).Hash -ne (Get-FileHash msys64\var\cache\msys2.exe.sig).Hash) ) {
+        Write-Output "Mis-matched installer sig, new installer download required" ;
+        Remove-Item -Path msys64\var\cache\msys2.exe.sig ;
+        if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
+          Remove-Item -Path msys64\var\cache\msys2.exe
+        }
+      } else {
+        Write-Output "Matched installer sig, cached installer still valid"
+      }
+    } else {
+      Write-Output "No cached installer sig, new installer download required" ;
+      if ( Test-Path -Path msys64\var\cache\msys2.exe ) {
+        Remove-Item -Path msys64\var\cache\msys2.exe
+      }
+    }
+  - if ( !(Test-Path -Path msys64\var\cache\msys2.exe ) ) {
+      Write-Output "Fetching latest installer" ;
       Invoke-WebRequest
       Invoke-WebRequest
-      "https://github.com/msys2/msys2-installer/releases/download/2022-06-03/msys2-base-x86_64-20220603.sfx.exe"
-      -outfile "msys64\var\cache\msys2.exe"
+      "https://repo.msys2.org/distrib/msys2-x86_64-latest.sfx.exe"
+      -outfile "msys64\var\cache\msys2.exe" ;
+      Copy-Item -Path msys2.exe.sig -Destination msys64\var\cache\msys2.exe.sig
+    } else {
+      Write-Output "Using cached installer"
     }
     }
+  - Write-Output "Invoking msys2.exe installer at $(Get-Date -Format u)"
   - msys64\var\cache\msys2.exe -y
   - msys64\var\cache\msys2.exe -y
   - ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw)
   - ((Get-Content -path .\msys64\etc\\post-install\\07-pacman-key.post -Raw)
       -replace '--refresh-keys', '--version') |
       -replace '--refresh-keys', '--version') |
@@ -28,97 +70,36 @@
   - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu'  # Core update
   - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu'  # Core update
   - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu'  # Normal update
   - .\msys64\usr\bin\bash -lc 'pacman --noconfirm -Syuu'  # Normal update
   - taskkill /F /FI "MODULES eq msys-2.0.dll"
   - taskkill /F /FI "MODULES eq msys-2.0.dll"
-
-msys2-64bit:
-  extends: .shared_msys2_builder
   script:
   script:
+  - Write-Output "Installing mingw packages at $(Get-Date -Format u)"
   - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
   - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
       bison diffutils flex
       bison diffutils flex
       git grep make sed
       git grep make sed
-      mingw-w64-x86_64-capstone
+      mingw-w64-x86_64-binutils
+      mingw-w64-x86_64-ccache
       mingw-w64-x86_64-curl
       mingw-w64-x86_64-curl
-      mingw-w64-x86_64-cyrus-sasl
-      mingw-w64-x86_64-dtc
       mingw-w64-x86_64-gcc
       mingw-w64-x86_64-gcc
       mingw-w64-x86_64-glib2
       mingw-w64-x86_64-glib2
-      mingw-w64-x86_64-gnutls
-      mingw-w64-x86_64-gtk3
-      mingw-w64-x86_64-libgcrypt
-      mingw-w64-x86_64-libjpeg-turbo
       mingw-w64-x86_64-libnfs
       mingw-w64-x86_64-libnfs
-      mingw-w64-x86_64-libpng
       mingw-w64-x86_64-libssh
       mingw-w64-x86_64-libssh
-      mingw-w64-x86_64-libtasn1
-      mingw-w64-x86_64-libusb
-      mingw-w64-x86_64-lzo2
-      mingw-w64-x86_64-nettle
       mingw-w64-x86_64-ninja
       mingw-w64-x86_64-ninja
       mingw-w64-x86_64-pixman
       mingw-w64-x86_64-pixman
       mingw-w64-x86_64-pkgconf
       mingw-w64-x86_64-pkgconf
       mingw-w64-x86_64-python
       mingw-w64-x86_64-python
-      mingw-w64-x86_64-SDL2
-      mingw-w64-x86_64-SDL2_image
-      mingw-w64-x86_64-snappy
-      mingw-w64-x86_64-spice
-      mingw-w64-x86_64-usbredir
-      mingw-w64-x86_64-zstd "
-  - $env:CHERE_INVOKING = 'yes'  # Preserve the current working directory
-  - $env:MSYSTEM = 'MINGW64'     # Start a 64-bit MinGW environment
-  - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
-  - mkdir output
-  - cd output
-  # Note: do not remove "--without-default-devices"!
-  # commit 9f8e6cad65a6 ("gitlab-ci: Speed up the msys2-64bit job by using --without-default-devices"
-  # changed to compile QEMU with the --without-default-devices switch
-  # for the msys2 64-bit job, due to the build could not complete within
-  # the project timeout.
-  - ..\msys64\usr\bin\bash -lc '../configure --target-list=x86_64-softmmu
-      --without-default-devices --enable-fdt=system'
-  - ..\msys64\usr\bin\bash -lc 'make'
-  # qTests don't run successfully with "--without-default-devices",
-  # so let's exclude the qtests from CI for now.
-  - ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" || { cat meson-logs/testlog.txt; exit 1; } ;'
-
-msys2-32bit:
-  extends: .shared_msys2_builder
-  script:
-  - .\msys64\usr\bin\bash -lc "pacman -Sy --noconfirm --needed
-      bison diffutils flex
-      git grep make sed
-      mingw-w64-i686-capstone
-      mingw-w64-i686-curl
-      mingw-w64-i686-cyrus-sasl
-      mingw-w64-i686-dtc
-      mingw-w64-i686-gcc
-      mingw-w64-i686-glib2
-      mingw-w64-i686-gnutls
-      mingw-w64-i686-gtk3
-      mingw-w64-i686-libgcrypt
-      mingw-w64-i686-libjpeg-turbo
-      mingw-w64-i686-libnfs
-      mingw-w64-i686-libpng
-      mingw-w64-i686-libssh
-      mingw-w64-i686-libtasn1
-      mingw-w64-i686-libusb
-      mingw-w64-i686-lzo2
-      mingw-w64-i686-nettle
-      mingw-w64-i686-ninja
-      mingw-w64-i686-pixman
-      mingw-w64-i686-pkgconf
-      mingw-w64-i686-python
-      mingw-w64-i686-SDL2
-      mingw-w64-i686-SDL2_image
-      mingw-w64-i686-snappy
-      mingw-w64-i686-spice
-      mingw-w64-i686-usbredir
-      mingw-w64-i686-zstd "
+      mingw-w64-x86_64-zstd"
+  - Write-Output "Running build at $(Get-Date -Format u)"
   - $env:CHERE_INVOKING = 'yes'  # Preserve the current working directory
   - $env:CHERE_INVOKING = 'yes'  # Preserve the current working directory
-  - $env:MSYSTEM = 'MINGW32'     # Start a 32-bit MinGW environment
   - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
   - $env:MSYS = 'winsymlinks:native' # Enable native Windows symlink
-  - mkdir output
-  - cd output
-  - ..\msys64\usr\bin\bash -lc '../configure --target-list=ppc64-softmmu
-                                --enable-fdt=system'
-  - ..\msys64\usr\bin\bash -lc 'make'
-  - ..\msys64\usr\bin\bash -lc 'make check MTESTARGS=\"--no-suite qtest\" ||
-                                { cat meson-logs/testlog.txt; exit 1; }'
+  - $env:CCACHE_BASEDIR = "$env:CI_PROJECT_DIR"
+  - $env:CCACHE_DIR = "$env:CCACHE_BASEDIR/ccache"
+  - $env:CCACHE_MAXSIZE = "500M"
+  - $env:CCACHE_DEPEND = 1 # cache misses are too expensive with preprocessor mode
+  - $env:CC = "ccache gcc"
+  - mkdir build
+  - cd build
+  - ..\msys64\usr\bin\bash -lc "ccache --zero-stats"
+  - ..\msys64\usr\bin\bash -lc "../configure $CONFIGURE_ARGS"
+  - ..\msys64\usr\bin\bash -lc "make"
+  - ..\msys64\usr\bin\bash -lc "make check MTESTARGS='$TEST_ARGS' || { cat meson-logs/testlog.txt; exit 1; } ;"
+  - ..\msys64\usr\bin\bash -lc "ccache --show-stats"
+  - Write-Output "Finished build at $(Get-Date -Format u)"

+ 0 - 21
.gitmodules

@@ -13,9 +13,6 @@
 [submodule "roms/qemu-palcode"]
 [submodule "roms/qemu-palcode"]
 	path = roms/qemu-palcode
 	path = roms/qemu-palcode
 	url = https://gitlab.com/qemu-project/qemu-palcode.git
 	url = https://gitlab.com/qemu-project/qemu-palcode.git
-[submodule "dtc"]
-	path = dtc
-	url = https://gitlab.com/qemu-project/dtc.git
 [submodule "roms/u-boot"]
 [submodule "roms/u-boot"]
 	path = roms/u-boot
 	path = roms/u-boot
 	url = https://gitlab.com/qemu-project/u-boot.git
 	url = https://gitlab.com/qemu-project/u-boot.git
@@ -25,21 +22,12 @@
 [submodule "roms/QemuMacDrivers"]
 [submodule "roms/QemuMacDrivers"]
 	path = roms/QemuMacDrivers
 	path = roms/QemuMacDrivers
 	url = https://gitlab.com/qemu-project/QemuMacDrivers.git
 	url = https://gitlab.com/qemu-project/QemuMacDrivers.git
-[submodule "ui/keycodemapdb"]
-	path = ui/keycodemapdb
-	url = https://gitlab.com/qemu-project/keycodemapdb.git
 [submodule "roms/seabios-hppa"]
 [submodule "roms/seabios-hppa"]
 	path = roms/seabios-hppa
 	path = roms/seabios-hppa
 	url = https://gitlab.com/qemu-project/seabios-hppa.git
 	url = https://gitlab.com/qemu-project/seabios-hppa.git
 [submodule "roms/u-boot-sam460ex"]
 [submodule "roms/u-boot-sam460ex"]
 	path = roms/u-boot-sam460ex
 	path = roms/u-boot-sam460ex
 	url = https://gitlab.com/qemu-project/u-boot-sam460ex.git
 	url = https://gitlab.com/qemu-project/u-boot-sam460ex.git
-[submodule "tests/fp/berkeley-testfloat-3"]
-	path = tests/fp/berkeley-testfloat-3
-	url = https://gitlab.com/qemu-project/berkeley-testfloat-3.git
-[submodule "tests/fp/berkeley-softfloat-3"]
-	path = tests/fp/berkeley-softfloat-3
-	url = https://gitlab.com/qemu-project/berkeley-softfloat-3.git
 [submodule "roms/edk2"]
 [submodule "roms/edk2"]
 	path = roms/edk2
 	path = roms/edk2
 	url = https://gitlab.com/qemu-project/edk2.git
 	url = https://gitlab.com/qemu-project/edk2.git
@@ -49,18 +37,9 @@
 [submodule "roms/qboot"]
 [submodule "roms/qboot"]
 	path = roms/qboot
 	path = roms/qboot
 	url = https://gitlab.com/qemu-project/qboot.git
 	url = https://gitlab.com/qemu-project/qboot.git
-[submodule "meson"]
-	path = meson
-	url = https://gitlab.com/qemu-project/meson.git
 [submodule "roms/vbootrom"]
 [submodule "roms/vbootrom"]
 	path = roms/vbootrom
 	path = roms/vbootrom
 	url = https://gitlab.com/qemu-project/vbootrom.git
 	url = https://gitlab.com/qemu-project/vbootrom.git
-[submodule "libucontext"]
-	path = subprojects/libucontext
-	url = https://github.com/utmapp/libucontext.git
 [submodule "tests/lcitool/libvirt-ci"]
 [submodule "tests/lcitool/libvirt-ci"]
 	path = tests/lcitool/libvirt-ci
 	path = tests/lcitool/libvirt-ci
 	url = https://gitlab.com/libvirt/libvirt-ci.git
 	url = https://gitlab.com/libvirt/libvirt-ci.git
-[submodule "subprojects/libvfio-user"]
-	path = subprojects/libvfio-user
-	url = https://gitlab.com/qemu-project/libvfio-user.git

+ 29 - 1
.mailmap

@@ -30,22 +30,41 @@ malc <av1474@comtv.ru> malc <malc@c046a42c-6fe2-441c-8c8c-71466251a162>
 # Corrupted Author fields
 # Corrupted Author fields
 Aaron Larson <alarson@ddci.com> alarson@ddci.com
 Aaron Larson <alarson@ddci.com> alarson@ddci.com
 Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber>
 Andreas Färber <andreas.faerber@web.de> Andreas Färber <andreas.faerber>
+fanwenjie <fanwj@mail.ustc.edu.cn> fanwj@mail.ustc.edu.cn <fanwj@mail.ustc.edu.cn>
 Jason Wang <jasowang@redhat.com> Jason Wang <jasowang>
 Jason Wang <jasowang@redhat.com> Jason Wang <jasowang>
 Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
 Marek Dolata <mkdolata@us.ibm.com> mkdolata@us.ibm.com <mkdolata@us.ibm.com>
 Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
 Michael Ellerman <mpe@ellerman.id.au> michael@ozlabs.org <michael@ozlabs.org>
 Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
 Nick Hudson <hnick@vmware.com> hnick@vmware.com <hnick@vmware.com>
+Timothée Cocault <timothee.cocault@gmail.com> timothee.cocault@gmail.com <timothee.cocault@gmail.com>
+Stefan Weil <sw@weilnetz.de> <weil@mail.berlios.de>
+Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@kiwi.(none)>
 
 
 # There is also a:
 # There is also a:
 #    (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
 #    (no author) <(no author)@c046a42c-6fe2-441c-8c8c-71466251a162>
 # for the cvs2svn initialization commit e63c3dc74bf.
 # for the cvs2svn initialization commit e63c3dc74bf.
 
 
 # Next, translate a few commits where mailman rewrote the From: line due
 # Next, translate a few commits where mailman rewrote the From: line due
-# to strict SPF, although we prefer to avoid adding more entries like that.
+# to strict SPF and DMARC.  Usually, our build process should be flagging
+# commits like these before maintainer merges; if you find the need to add
+# a line here, please also report a bug against the part of the build
+# process that let the mis-attribution slip through in the first place.
+#
+# If the mailing list munges your emails, use:
+#   git config sendemail.from '"Your Name" <your.email@example.com>'
+# the use of "" in that line will differ from the typically unquoted
+# 'git config user.name', which in turn is sufficient for 'git send-email'
+# to add an extra From: line in the body of your email that takes
+# precedence over any munged From: in the mail's headers.
+# See https://lists.openembedded.org/g/openembedded-core/message/166515
+# and https://lists.gnu.org/archive/html/qemu-devel/2023-09/msg06784.html
 Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org>
 Ed Swierk <eswierk@skyportsystems.com> Ed Swierk via Qemu-devel <qemu-devel@nongnu.org>
 Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
 Ian McKellar <ianloic@google.com> Ian McKellar via Qemu-devel <qemu-devel@nongnu.org>
 Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
 Julia Suvorova <jusual@mail.ru> Julia Suvorova via Qemu-devel <qemu-devel@nongnu.org>
 Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
 Justin Terry (VM) <juterry@microsoft.com> Justin Terry (VM) via Qemu-devel <qemu-devel@nongnu.org>
 Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org>
 Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-devel@nongnu.org>
+Stefan Weil <sw@weilnetz.de> Stefan Weil via <qemu-trivial@nongnu.org>
+Andrey Drobyshev <andrey.drobyshev@virtuozzo.com> Andrey Drobyshev via <qemu-block@nongnu.org>
+BALATON Zoltan <balaton@eik.bme.hu> BALATON Zoltan via <qemu-ppc@nongnu.org>
 
 
 # Next, replace old addresses by a more recent one.
 # Next, replace old addresses by a more recent one.
 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
 Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <aleksandar.markovic@mips.com>
@@ -54,6 +73,7 @@ Aleksandar Markovic <aleksandar.qemu.devel@gmail.com> <amarkovic@wavecomp.com>
 Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com>
 Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <arikalo@wavecomp.com>
 Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
 Aleksandar Rikalo <aleksandar.rikalo@syrmia.com> <aleksandar.rikalo@rt-rk.com>
 Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
 Alexander Graf <agraf@csgraf.de> <agraf@suse.de>
+Ani Sinha <anisinha@redhat.com> <ani@anisinha.ca>
 Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
 Anthony Liguori <anthony@codemonkey.ws> Anthony Liguori <aliguori@us.ibm.com>
 Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
 Christian Borntraeger <borntraeger@linux.ibm.com> <borntraeger@de.ibm.com>
 Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
 Damien Hedde <damien.hedde@dahe.fr> <damien.hedde@greensocs.com>
@@ -64,8 +84,12 @@ Greg Kurz <groug@kaod.org> <gkurz@linux.vnet.ibm.com>
 Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
 Huacai Chen <chenhuacai@kernel.org> <chenhc@lemote.com>
 Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
 Huacai Chen <chenhuacai@kernel.org> <chenhuacai@loongson.cn>
 James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
 James Hogan <jhogan@kernel.org> <james.hogan@imgtec.com>
+Juan Quintela <quintela@trasno.org> <quintela@redhat.com>
 Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
 Leif Lindholm <quic_llindhol@quicinc.com> <leif.lindholm@linaro.org>
 Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
 Leif Lindholm <quic_llindhol@quicinc.com> <leif@nuviainc.com>
+Luc Michel <luc@lmichel.fr> <luc.michel@git.antfield.fr>
+Luc Michel <luc@lmichel.fr> <luc.michel@greensocs.com>
+Luc Michel <luc@lmichel.fr> <lmichel@kalray.eu>
 Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
 Radoslaw Biernacki <rad@semihalf.com> <radoslaw.biernacki@linaro.org>
 Paul Brook <paul@nowt.org> <paul@codesourcery.com>
 Paul Brook <paul@nowt.org> <paul@codesourcery.com>
 Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
 Paul Burton <paulburton@kernel.org> <paul.burton@mips.com>
@@ -75,7 +99,11 @@ Paul Burton <paulburton@kernel.org> <pburton@wavecomp.com>
 Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org>
 Philippe Mathieu-Daudé <philmd@linaro.org> <f4bug@amsat.org>
 Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
 Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@redhat.com>
 Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
 Philippe Mathieu-Daudé <philmd@linaro.org> <philmd@fungible.com>
+Roman Bolshakov <rbolshakov@ddn.com> <r.bolshakov@yadro.com>
+Sriram Yagnaraman <sriram.yagnaraman@ericsson.com> <sriram.yagnaraman@est.tech>
 Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
 Stefan Brankovic <stefan.brankovic@syrmia.com> <stefan.brankovic@rt-rk.com.com>
+Stefan Weil <sw@weilnetz.de> Stefan Weil <stefan@weilnetz.de>
+Taylor Simpson <ltaylorsimpson@gmail.com> <tsimpson@quicinc.com>
 Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
 Yongbok Kim <yongbok.kim@mips.com> <yongbok.kim@imgtec.com>
 
 
 # Also list preferred name forms where people have changed their
 # Also list preferred name forms where people have changed their

+ 12 - 7
.readthedocs.yml

@@ -5,16 +5,21 @@
 # Required
 # Required
 version: 2
 version: 2
 
 
+# Set the version of Python and other tools you might need
+build:
+  os: ubuntu-22.04
+  tools:
+    python: "3.11"
+
 # Build documentation in the docs/ directory with Sphinx
 # Build documentation in the docs/ directory with Sphinx
 sphinx:
 sphinx:
   configuration: docs/conf.py
   configuration: docs/conf.py
 
 
+# We recommend specifying your dependencies to enable reproducible builds:
+# https://docs.readthedocs.io/en/stable/guides/reproducible-builds.html
+python:
+  install:
+    - requirements: docs/requirements.txt
+
 # We want all the document formats
 # We want all the document formats
 formats: all
 formats: all
-
-# For consistency, we require that QEMU's Sphinx extensions
-# run with at least the same minimum version of Python that
-# we require for other Python in our codebase (our conf.py
-# enforces this, and some code needs it.)
-python:
-  version: 3.6

+ 22 - 23
.travis.yml

@@ -1,5 +1,5 @@
 os: linux
 os: linux
-dist: focal
+dist: jammy
 language: c
 language: c
 compiler:
 compiler:
   - gcc
   - gcc
@@ -7,13 +7,11 @@ cache:
   # There is one cache per branch and compiler version.
   # There is one cache per branch and compiler version.
   # characteristics of each job are used to identify the cache:
   # characteristics of each job are used to identify the cache:
   # - OS name (currently only linux)
   # - OS name (currently only linux)
-  # - OS distribution (for Linux, bionic or focal)
+  # - OS distribution (e.g. "jammy" for Linux)
   # - Names and values of visible environment variables set in .travis.yml or Settings panel
   # - Names and values of visible environment variables set in .travis.yml or Settings panel
   timeout: 1200
   timeout: 1200
   ccache: true
   ccache: true
   pip: true
   pip: true
-  directories:
-  - $HOME/avocado/data/cache
 
 
 
 
 # The channel name "irc.oftc.net#qemu" is encrypted against qemu/qemu
 # The channel name "irc.oftc.net#qemu" is encrypted against qemu/qemu
@@ -34,8 +32,8 @@ env:
     - BASE_CONFIG="--disable-docs --disable-tools"
     - BASE_CONFIG="--disable-docs --disable-tools"
     - TEST_BUILD_CMD=""
     - TEST_BUILD_CMD=""
     - TEST_CMD="make check V=1"
     - TEST_CMD="make check V=1"
-    # This is broadly a list of "mainline" softmmu targets which have support across the major distros
-    - MAIN_SOFTMMU_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
+    # This is broadly a list of "mainline" system targets which have support across the major distros
+    - MAIN_SYSTEM_TARGETS="aarch64-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
     - CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime"
     - CCACHE_SLOPPINESS="include_file_ctime,include_file_mtime"
     - CCACHE_MAXSIZE=1G
     - CCACHE_MAXSIZE=1G
     - G_MESSAGES_DEBUG=error
     - G_MESSAGES_DEBUG=error
@@ -83,7 +81,6 @@ jobs:
 
 
     - name: "[aarch64] GCC check-tcg"
     - name: "[aarch64] GCC check-tcg"
       arch: arm64
       arch: arm64
-      dist: focal
       addons:
       addons:
         apt_packages:
         apt_packages:
           - libaio-dev
           - libaio-dev
@@ -109,17 +106,17 @@ jobs:
           - libvdeplug-dev
           - libvdeplug-dev
           - libvte-2.91-dev
           - libvte-2.91-dev
           - ninja-build
           - ninja-build
+          - python3-tomli
           # Tests dependencies
           # Tests dependencies
           - genisoimage
           - genisoimage
       env:
       env:
         - TEST_CMD="make check check-tcg V=1"
         - TEST_CMD="make check check-tcg V=1"
         - CONFIG="--disable-containers --enable-fdt=system
         - CONFIG="--disable-containers --enable-fdt=system
-                  --target-list=${MAIN_SOFTMMU_TARGETS} --cxx=/bin/false"
-        - UNRELIABLE=true
+                  --target-list=${MAIN_SYSTEM_TARGETS} --cxx=/bin/false"
 
 
-    - name: "[ppc64] GCC check-tcg"
+    - name: "[ppc64] Clang check-tcg"
       arch: ppc64le
       arch: ppc64le
-      dist: focal
+      compiler: clang
       addons:
       addons:
         apt_packages:
         apt_packages:
           - libaio-dev
           - libaio-dev
@@ -145,6 +142,7 @@ jobs:
           - libvdeplug-dev
           - libvdeplug-dev
           - libvte-2.91-dev
           - libvte-2.91-dev
           - ninja-build
           - ninja-build
+          - python3-tomli
           # Tests dependencies
           # Tests dependencies
           - genisoimage
           - genisoimage
       env:
       env:
@@ -154,7 +152,6 @@ jobs:
 
 
     - name: "[s390x] GCC check-tcg"
     - name: "[s390x] GCC check-tcg"
       arch: s390x
       arch: s390x
-      dist: focal
       addons:
       addons:
         apt_packages:
         apt_packages:
           - libaio-dev
           - libaio-dev
@@ -180,13 +177,13 @@ jobs:
           - libvdeplug-dev
           - libvdeplug-dev
           - libvte-2.91-dev
           - libvte-2.91-dev
           - ninja-build
           - ninja-build
+          - python3-tomli
           # Tests dependencies
           # Tests dependencies
           - genisoimage
           - genisoimage
       env:
       env:
         - TEST_CMD="make check check-tcg V=1"
         - TEST_CMD="make check check-tcg V=1"
-        - CONFIG="--disable-containers --enable-fdt=system
-                  --target-list=${MAIN_SOFTMMU_TARGETS},s390x-linux-user"
-        - UNRELIABLE=true
+        - CONFIG="--disable-containers
+            --target-list=hppa-softmmu,mips64-softmmu,ppc64-softmmu,riscv64-softmmu,s390x-softmmu,x86_64-softmmu"
       script:
       script:
         - BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
         - BUILD_RC=0 && make -j${JOBS} || BUILD_RC=$?
         - |
         - |
@@ -197,9 +194,9 @@ jobs:
               $(exit $BUILD_RC);
               $(exit $BUILD_RC);
           fi
           fi
 
 
-    - name: "[s390x] GCC (other-softmmu)"
+    - name: "[s390x] Clang (other-system)"
       arch: s390x
       arch: s390x
-      dist: focal
+      compiler: clang
       addons:
       addons:
         apt_packages:
         apt_packages:
           - libaio-dev
           - libaio-dev
@@ -220,29 +217,31 @@ jobs:
           - libsnappy-dev
           - libsnappy-dev
           - libzstd-dev
           - libzstd-dev
           - nettle-dev
           - nettle-dev
-          - xfslibs-dev
           - ninja-build
           - ninja-build
+          - python3-tomli
           # Tests dependencies
           # Tests dependencies
           - genisoimage
           - genisoimage
       env:
       env:
-        - CONFIG="--disable-containers --enable-fdt=system --audio-drv-list=sdl
-                  --disable-user --target-list-exclude=${MAIN_SOFTMMU_TARGETS}"
+        - CONFIG="--disable-containers --audio-drv-list=sdl --disable-user
+            --target-list=arm-softmmu,avr-softmmu,microblaze-softmmu,sh4eb-softmmu,sparc64-softmmu,xtensaeb-softmmu"
 
 
     - name: "[s390x] GCC (user)"
     - name: "[s390x] GCC (user)"
       arch: s390x
       arch: s390x
-      dist: focal
       addons:
       addons:
         apt_packages:
         apt_packages:
           - libgcrypt20-dev
           - libgcrypt20-dev
           - libglib2.0-dev
           - libglib2.0-dev
           - libgnutls28-dev
           - libgnutls28-dev
           - ninja-build
           - ninja-build
+          - flex
+          - bison
+          - python3-tomli
       env:
       env:
+        - TEST_CMD="make check check-tcg V=1"
         - CONFIG="--disable-containers --disable-system"
         - CONFIG="--disable-containers --disable-system"
 
 
     - name: "[s390x] Clang (disable-tcg)"
     - name: "[s390x] Clang (disable-tcg)"
       arch: s390x
       arch: s390x
-      dist: focal
       compiler: clang
       compiler: clang
       addons:
       addons:
         apt_packages:
         apt_packages:
@@ -269,8 +268,8 @@ jobs:
           - libvdeplug-dev
           - libvdeplug-dev
           - libvte-2.91-dev
           - libvte-2.91-dev
           - ninja-build
           - ninja-build
+          - python3-tomli
       env:
       env:
         - TEST_CMD="make check-unit"
         - TEST_CMD="make check-unit"
         - CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools
         - CONFIG="--disable-containers --disable-tcg --enable-kvm --disable-tools
                   --enable-fdt=system --host-cc=clang --cxx=clang++"
                   --enable-fdt=system --host-cc=clang --cxx=clang++"
-        - UNRELIABLE=true

+ 9 - 3
Kconfig.host

@@ -11,6 +11,9 @@ config OPENGL
 config X11
 config X11
     bool
     bool
 
 
+config PIXMAN
+    bool
+
 config SPICE
 config SPICE
     bool
     bool
 
 
@@ -20,6 +23,9 @@ config IVSHMEM
 config TPM
 config TPM
     bool
     bool
 
 
+config FDT
+    bool
+
 config VHOST_USER
 config VHOST_USER
     bool
     bool
 
 
@@ -32,9 +38,6 @@ config VHOST_KERNEL
 config VIRTFS
 config VIRTFS
     bool
     bool
 
 
-config PVRDMA
-    bool
-
 config MULTIPROCESS_ALLOWED
 config MULTIPROCESS_ALLOWED
     bool
     bool
     imply MULTIPROCESS
     imply MULTIPROCESS
@@ -46,3 +49,6 @@ config FUZZ
 config VFIO_USER_SERVER_ALLOWED
 config VFIO_USER_SERVER_ALLOWED
     bool
     bool
     imply VFIO_USER_SERVER
     imply VFIO_USER_SERVER
+
+config HV_BALLOON_POSSIBLE
+    bool

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 236 - 88
MAINTAINERS


+ 51 - 42
Makefile

@@ -26,9 +26,9 @@ quiet-command-run = $(if $(V),,$(if $2,printf "  %-7s %s\n" $2 $3 && ))$1
 quiet-@ = $(if $(V),,@)
 quiet-@ = $(if $(V),,@)
 quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
 quiet-command = $(quiet-@)$(call quiet-command-run,$1,$2,$3)
 
 
-UNCHECKED_GOALS := %clean TAGS cscope ctags dist \
+UNCHECKED_GOALS := TAGS gtags cscope ctags dist \
     help check-help print-% \
     help check-help print-% \
-    docker docker-% vm-help vm-test vm-build-%
+    docker docker-% lcitool-refresh vm-help vm-test vm-build-%
 
 
 all:
 all:
 .PHONY: all clean distclean recurse-all dist msi FORCE
 .PHONY: all clean distclean recurse-all dist msi FORCE
@@ -45,18 +45,6 @@ include config-host.mak
 include Makefile.prereqs
 include Makefile.prereqs
 Makefile.prereqs: config-host.mak
 Makefile.prereqs: config-host.mak
 
 
-git-submodule-update:
-.git-submodule-status: git-submodule-update config-host.mak
-Makefile: .git-submodule-status
-
-.PHONY: git-submodule-update
-git-submodule-update:
-ifneq ($(GIT_SUBMODULES_ACTION),ignore)
-	$(call quiet-command, \
-		(GIT="$(GIT)" "$(SRC_PATH)/scripts/git-submodule.sh" $(GIT_SUBMODULES_ACTION) $(GIT_SUBMODULES)), \
-		"GIT","$(GIT_SUBMODULES)")
-endif
-
 # 0. ensure the build tree is okay
 # 0. ensure the build tree is okay
 
 
 # Check that we're not trying to do an out-of-tree build from
 # Check that we're not trying to do an out-of-tree build from
@@ -90,21 +78,23 @@ x := $(shell rm -rf meson-private meson-info meson-logs)
 endif
 endif
 
 
 # 1. ensure config-host.mak is up-to-date
 # 1. ensure config-host.mak is up-to-date
-config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh $(SRC_PATH)/VERSION
+config-host.mak: $(SRC_PATH)/configure $(SRC_PATH)/scripts/meson-buildoptions.sh \
+		$(SRC_PATH)/pythondeps.toml $(SRC_PATH)/VERSION
 	@echo config-host.mak is out-of-date, running configure
 	@echo config-host.mak is out-of-date, running configure
 	@if test -f meson-private/coredata.dat; then \
 	@if test -f meson-private/coredata.dat; then \
 	  ./config.status --skip-meson; \
 	  ./config.status --skip-meson; \
 	else \
 	else \
-	  ./config.status && touch build.ninja.stamp; \
+	  ./config.status; \
 	fi
 	fi
 
 
 # 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
 # 2. meson.stamp exists if meson has run at least once (so ninja reconfigure
 # works), but otherwise never needs to be updated
 # works), but otherwise never needs to be updated
+
 meson-private/coredata.dat: meson.stamp
 meson-private/coredata.dat: meson.stamp
 meson.stamp: config-host.mak
 meson.stamp: config-host.mak
 	@touch meson.stamp
 	@touch meson.stamp
 
 
-# 3. ensure generated build files are up-to-date
+# 3. ensure meson-generated build files are up-to-date
 
 
 ifneq ($(NINJA),)
 ifneq ($(NINJA),)
 Makefile.ninja: build.ninja
 Makefile.ninja: build.ninja
@@ -115,15 +105,23 @@ Makefile.ninja: build.ninja
 	  $(NINJA) -t query build.ninja | sed -n '1,/^  input:/d; /^  outputs:/q; s/$$/ \\/p'; \
 	  $(NINJA) -t query build.ninja | sed -n '1,/^  input:/d; /^  outputs:/q; s/$$/ \\/p'; \
 	} > $@.tmp && mv $@.tmp $@
 	} > $@.tmp && mv $@.tmp $@
 -include Makefile.ninja
 -include Makefile.ninja
+endif
 
 
-# A separate rule is needed for Makefile dependencies to avoid -n
+ifneq ($(MESON),)
+# The path to meson always points to pyvenv/bin/meson, but the absolute
+# paths could change.  In that case, force a regeneration of build.ninja.
+# Note that this invocation of $(NINJA), just like when Make rebuilds
+# Makefiles, does not include -n.
 build.ninja: build.ninja.stamp
 build.ninja: build.ninja.stamp
 $(build-files):
 $(build-files):
 build.ninja.stamp: meson.stamp $(build-files)
 build.ninja.stamp: meson.stamp $(build-files)
-	$(NINJA) $(if $V,-v,) build.ninja && touch $@
-endif
+	@if test "$$(cat build.ninja.stamp)" = "$(MESON)" && test -n "$(NINJA)"; then \
+	  $(NINJA) build.ninja; \
+	else \
+	  echo "$(MESON) setup --reconfigure $(SRC_PATH)"; \
+	  $(MESON) setup --reconfigure $(SRC_PATH); \
+	fi && echo "$(MESON)" > $@
 
 
-ifneq ($(MESON),)
 Makefile.mtest: build.ninja scripts/mtest2make.py
 Makefile.mtest: build.ninja scripts/mtest2make.py
 	$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
 	$(MESON) introspect --targets --tests --benchmarks | $(PYTHON) scripts/mtest2make.py > $@
 -include Makefile.mtest
 -include Makefile.mtest
@@ -144,8 +142,13 @@ MAKE.n = $(findstring n,$(firstword $(filter-out --%,$(MAKEFLAGS))))
 MAKE.k = $(findstring k,$(firstword $(filter-out --%,$(MAKEFLAGS))))
 MAKE.k = $(findstring k,$(firstword $(filter-out --%,$(MAKEFLAGS))))
 MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS))))
 MAKE.q = $(findstring q,$(firstword $(filter-out --%,$(MAKEFLAGS))))
 MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq)
 MAKE.nq = $(if $(word 2, $(MAKE.n) $(MAKE.q)),nq)
-NINJAFLAGS = $(if $V,-v) $(if $(MAKE.n), -n) $(if $(MAKE.k), -k0) \
-        $(filter-out -j, $(lastword -j1 $(filter -l% -j%, $(MAKEFLAGS)))) \
+NINJAFLAGS = \
+        $(if $V,-v) \
+        $(if $(MAKE.n), -n) \
+        $(if $(MAKE.k), -k0) \
+        $(filter-out -j, \
+          $(or $(filter -l% -j%, $(MAKEFLAGS)), \
+               $(if $(filter --jobserver-auth=%, $(MAKEFLAGS)),, -j1))) \
         -d keepdepfile
         -d keepdepfile
 ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
 ninja-cmd-goals = $(or $(MAKECMDGOALS), all)
 ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g))
 ninja-cmd-goals += $(foreach g, $(MAKECMDGOALS), $(.ninja-goals.$g))
@@ -167,19 +170,9 @@ ifneq ($(filter $(ninja-targets), $(ninja-cmd-goals)),)
 endif
 endif
 endif
 endif
 
 
-ifeq ($(CONFIG_PLUGIN),y)
-.PHONY: plugins
-plugins:
-	$(call quiet-command,\
-		$(MAKE) $(SUBDIR_MAKEFLAGS) -C contrib/plugins V="$(V)", \
-		"BUILD", "example plugins")
-endif # $(CONFIG_PLUGIN)
-
 else # config-host.mak does not exist
 else # config-host.mak does not exist
-config-host.mak:
 ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
 ifneq ($(filter-out $(UNCHECKED_GOALS),$(MAKECMDGOALS)),$(if $(MAKECMDGOALS),,fail))
-	@echo "Please call configure before running make!"
-	@exit 1
+$(error Please call configure before running make)
 endif
 endif
 endif # config-host.mak does not exist
 endif # config-host.mak does not exist
 
 
@@ -189,15 +182,20 @@ include $(SRC_PATH)/tests/Makefile.include
 
 
 all: recurse-all
 all: recurse-all
 
 
-ROMS_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(ROMS)))
-.PHONY: $(ROMS_RULES)
-$(ROMS_RULES):
+SUBDIR_RULES=$(foreach t, all clean distclean, $(addsuffix /$(t), $(SUBDIRS)))
+.PHONY: $(SUBDIR_RULES)
+$(SUBDIR_RULES):
 	$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
 	$(call quiet-command,$(MAKE) $(SUBDIR_MAKEFLAGS) -C $(dir $@) V="$(V)" TARGET_DIR="$(dir $@)" $(notdir $@),)
 
 
+ifneq ($(filter contrib/plugins, $(SUBDIRS)),)
+.PHONY: plugins
+plugins: contrib/plugins/all
+endif
+
 .PHONY: recurse-all recurse-clean
 .PHONY: recurse-all recurse-clean
-recurse-all: $(addsuffix /all, $(ROMS))
-recurse-clean: $(addsuffix /clean, $(ROMS))
-recurse-distclean: $(addsuffix /distclean, $(ROMS))
+recurse-all: $(addsuffix /all, $(SUBDIRS))
+recurse-clean: $(addsuffix /clean, $(SUBDIRS))
+recurse-distclean: $(addsuffix /distclean, $(SUBDIRS))
 
 
 ######################################################################
 ######################################################################
 
 
@@ -210,6 +208,7 @@ clean: recurse-clean
 		! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \
 		! -path ./roms/edk2/ArmPkg/Library/GccLto/liblto-arm.a \
 		-exec rm {} +
 		-exec rm {} +
 	rm -f TAGS cscope.* *~ */*~
 	rm -f TAGS cscope.* *~ */*~
+	@$(MAKE) -Ctests/qemu-iotests clean
 
 
 VERSION = $(shell cat $(SRC_PATH)/VERSION)
 VERSION = $(shell cat $(SRC_PATH)/VERSION)
 
 
@@ -291,6 +290,13 @@ include $(SRC_PATH)/tests/vm/Makefile.include
 print-help-run = printf "  %-30s - %s\\n" "$1" "$2"
 print-help-run = printf "  %-30s - %s\\n" "$1" "$2"
 print-help = @$(call print-help-run,$1,$2)
 print-help = @$(call print-help-run,$1,$2)
 
 
+.PHONY: update-linux-vdso
+update-linux-vdso:
+	@for m in $(SRC_PATH)/linux-user/*/Makefile.vdso; do \
+	  $(MAKE) $(SUBDIR_MAKEFLAGS) -C $$(dirname $$m) -f Makefile.vdso \
+		SRC_PATH=$(SRC_PATH) BUILD_DIR=$(BUILD_DIR); \
+	done
+
 .PHONY: help
 .PHONY: help
 help:
 help:
 	@echo  'Generic targets:'
 	@echo  'Generic targets:'
@@ -301,7 +307,7 @@ help:
 	$(call print-help,cscope,Generate cscope index)
 	$(call print-help,cscope,Generate cscope index)
 	$(call print-help,sparse,Run sparse on the QEMU source)
 	$(call print-help,sparse,Run sparse on the QEMU source)
 	@echo  ''
 	@echo  ''
-ifeq ($(CONFIG_PLUGIN),y)
+ifneq ($(filter contrib/plugins, $(SUBDIRS)),)
 	@echo  'Plugin targets:'
 	@echo  'Plugin targets:'
 	$(call print-help,plugins,Build the example TCG plugins)
 	$(call print-help,plugins,Build the example TCG plugins)
 	@echo  ''
 	@echo  ''
@@ -311,6 +317,9 @@ endif
 	$(call print-help,distclean,Remove all generated files)
 	$(call print-help,distclean,Remove all generated files)
 	$(call print-help,dist,Build a distributable tarball)
 	$(call print-help,dist,Build a distributable tarball)
 	@echo  ''
 	@echo  ''
+	@echo  'Linux-user targets:'
+	$(call print-help,update-linux-vdso,Build linux-user vdso images)
+	@echo  ''
 	@echo  'Test targets:'
 	@echo  'Test targets:'
 	$(call print-help,check,Run all tests (check-help for details))
 	$(call print-help,check,Run all tests (check-help for details))
 	$(call print-help,bench,Run all benchmarks)
 	$(call print-help,bench,Run all benchmarks)
@@ -321,7 +330,7 @@ endif
 	@echo  'Documentation targets:'
 	@echo  'Documentation targets:'
 	$(call print-help,html man,Build documentation in specified format)
 	$(call print-help,html man,Build documentation in specified format)
 	@echo  ''
 	@echo  ''
-ifdef CONFIG_WIN32
+ifneq ($(filter msi, $(ninja-targets)),)
 	@echo  'Windows targets:'
 	@echo  'Windows targets:'
 	$(call print-help,installer,Build NSIS-based installer for QEMU)
 	$(call print-help,installer,Build NSIS-based installer for QEMU)
 	$(call print-help,msi,Build MSI-based installer for qemu-ga)
 	$(call print-help,msi,Build MSI-based installer for qemu-ga)

+ 3 - 3
README.rst

@@ -82,7 +82,7 @@ guidelines set out in the `style section
 the Developers Guide.
 the Developers Guide.
 
 
 Additional information on submitting patches can be found online via
 Additional information on submitting patches can be found online via
-the QEMU website
+the QEMU website:
 
 
 * `<https://wiki.qemu.org/Contribute/SubmitAPatch>`_
 * `<https://wiki.qemu.org/Contribute/SubmitAPatch>`_
 * `<https://wiki.qemu.org/Contribute/TrivialPatches>`_
 * `<https://wiki.qemu.org/Contribute/TrivialPatches>`_
@@ -102,7 +102,7 @@ requires a working 'git send-email' setup, and by default doesn't
 automate everything, so you may want to go through the above steps
 automate everything, so you may want to go through the above steps
 manually for once.
 manually for once.
 
 
-For installation instructions, please go to
+For installation instructions, please go to:
 
 
 *  `<https://github.com/stefanha/git-publish>`_
 *  `<https://github.com/stefanha/git-publish>`_
 
 
@@ -159,7 +159,7 @@ Contact
 =======
 =======
 
 
 The QEMU community can be contacted in a number of ways, with the two
 The QEMU community can be contacted in a number of ways, with the two
-main methods being email and IRC
+main methods being email and IRC:
 
 
 * `<mailto:qemu-devel@nongnu.org>`_
 * `<mailto:qemu-devel@nongnu.org>`_
 * `<https://lists.nongnu.org/mailman/listinfo/qemu-devel>`_
 * `<https://lists.nongnu.org/mailman/listinfo/qemu-devel>`_

+ 1 - 1
VERSION

@@ -1 +1 @@
-8.0.2
+9.1.0

+ 1 - 3
accel/Kconfig

@@ -4,9 +4,6 @@ config WHPX
 config NVMM
 config NVMM
     bool
     bool
 
 
-config HAX
-    bool
-
 config HVF
 config HVF
     bool
     bool
 
 
@@ -19,3 +16,4 @@ config KVM
 config XEN
 config XEN
     bool
     bool
     select FSDEV_9P if VIRTFS
     select FSDEV_9P if VIRTFS
+    select XEN_BUS

+ 5 - 5
accel/accel-blocker.c

@@ -41,7 +41,7 @@ void accel_blocker_init(void)
 
 
 void accel_ioctl_begin(void)
 void accel_ioctl_begin(void)
 {
 {
-    if (likely(qemu_mutex_iothread_locked())) {
+    if (likely(bql_locked())) {
         return;
         return;
     }
     }
 
 
@@ -51,7 +51,7 @@ void accel_ioctl_begin(void)
 
 
 void accel_ioctl_end(void)
 void accel_ioctl_end(void)
 {
 {
-    if (likely(qemu_mutex_iothread_locked())) {
+    if (likely(bql_locked())) {
         return;
         return;
     }
     }
 
 
@@ -62,7 +62,7 @@ void accel_ioctl_end(void)
 
 
 void accel_cpu_ioctl_begin(CPUState *cpu)
 void accel_cpu_ioctl_begin(CPUState *cpu)
 {
 {
-    if (unlikely(qemu_mutex_iothread_locked())) {
+    if (unlikely(bql_locked())) {
         return;
         return;
     }
     }
 
 
@@ -72,7 +72,7 @@ void accel_cpu_ioctl_begin(CPUState *cpu)
 
 
 void accel_cpu_ioctl_end(CPUState *cpu)
 void accel_cpu_ioctl_end(CPUState *cpu)
 {
 {
-    if (unlikely(qemu_mutex_iothread_locked())) {
+    if (unlikely(bql_locked())) {
         return;
         return;
     }
     }
 
 
@@ -105,7 +105,7 @@ void accel_ioctl_inhibit_begin(void)
      * We allow to inhibit only when holding the BQL, so we can identify
      * We allow to inhibit only when holding the BQL, so we can identify
      * when an inhibitor wants to issue an ioctl easily.
      * when an inhibitor wants to issue an ioctl easily.
      */
      */
-    g_assert(qemu_mutex_iothread_locked());
+    g_assert(bql_locked());
 
 
     /* Block further invocations of the ioctls outside the BQL.  */
     /* Block further invocations of the ioctls outside the BQL.  */
     CPU_FOREACH(cpu) {
     CPU_FOREACH(cpu) {

+ 4 - 4
accel/accel-softmmu.c → accel/accel-system.c

@@ -28,7 +28,7 @@
 #include "hw/boards.h"
 #include "hw/boards.h"
 #include "sysemu/cpus.h"
 #include "sysemu/cpus.h"
 #include "qemu/error-report.h"
 #include "qemu/error-report.h"
-#include "accel-softmmu.h"
+#include "accel-system.h"
 
 
 int accel_init_machine(AccelState *accel, MachineState *ms)
 int accel_init_machine(AccelState *accel, MachineState *ms)
 {
 {
@@ -62,7 +62,7 @@ void accel_setup_post(MachineState *ms)
 }
 }
 
 
 /* initialize the arch-independent accel operation interfaces */
 /* initialize the arch-independent accel operation interfaces */
-void accel_init_ops_interfaces(AccelClass *ac)
+void accel_system_init_ops_interfaces(AccelClass *ac)
 {
 {
     const char *ac_name;
     const char *ac_name;
     char *ops_name;
     char *ops_name;
@@ -99,8 +99,8 @@ static const TypeInfo accel_ops_type_info = {
     .class_size = sizeof(AccelOpsClass),
     .class_size = sizeof(AccelOpsClass),
 };
 };
 
 
-static void accel_softmmu_register_types(void)
+static void accel_system_register_types(void)
 {
 {
     type_register_static(&accel_ops_type_info);
     type_register_static(&accel_ops_type_info);
 }
 }
-type_init(accel_softmmu_register_types);
+type_init(accel_system_register_types);

+ 4 - 4
accel/accel-softmmu.h → accel/accel-system.h

@@ -7,9 +7,9 @@
  * See the COPYING file in the top-level directory.
  * See the COPYING file in the top-level directory.
  */
  */
 
 
-#ifndef ACCEL_SOFTMMU_H
-#define ACCEL_SOFTMMU_H
+#ifndef ACCEL_SYSTEM_H
+#define ACCEL_SYSTEM_H
 
 
-void accel_init_ops_interfaces(AccelClass *ac);
+void accel_system_init_ops_interfaces(AccelClass *ac);
 
 
-#endif /* ACCEL_SOFTMMU_H */
+#endif /* ACCEL_SYSTEM_H */

+ 26 - 5
accel/accel-common.c → accel/accel-target.c

@@ -30,7 +30,7 @@
 #include "hw/core/accel-cpu.h"
 #include "hw/core/accel-cpu.h"
 
 
 #ifndef CONFIG_USER_ONLY
 #ifndef CONFIG_USER_ONLY
-#include "accel-softmmu.h"
+#include "accel-system.h"
 #endif /* !CONFIG_USER_ONLY */
 #endif /* !CONFIG_USER_ONLY */
 
 
 static const TypeInfo accel_type = {
 static const TypeInfo accel_type = {
@@ -104,7 +104,7 @@ static void accel_init_cpu_interfaces(AccelClass *ac)
 void accel_init_interfaces(AccelClass *ac)
 void accel_init_interfaces(AccelClass *ac)
 {
 {
 #ifndef CONFIG_USER_ONLY
 #ifndef CONFIG_USER_ONLY
-    accel_init_ops_interfaces(ac);
+    accel_system_init_ops_interfaces(ac);
 #endif /* !CONFIG_USER_ONLY */
 #endif /* !CONFIG_USER_ONLY */
 
 
     accel_init_cpu_interfaces(ac);
     accel_init_cpu_interfaces(ac);
@@ -119,16 +119,37 @@ void accel_cpu_instance_init(CPUState *cpu)
     }
     }
 }
 }
 
 
-bool accel_cpu_realizefn(CPUState *cpu, Error **errp)
+bool accel_cpu_common_realize(CPUState *cpu, Error **errp)
 {
 {
     CPUClass *cc = CPU_GET_CLASS(cpu);
     CPUClass *cc = CPU_GET_CLASS(cpu);
+    AccelState *accel = current_accel();
+    AccelClass *acc = ACCEL_GET_CLASS(accel);
+
+    /* target specific realization */
+    if (cc->accel_cpu && cc->accel_cpu->cpu_target_realize
+        && !cc->accel_cpu->cpu_target_realize(cpu, errp)) {
+        return false;
+    }
 
 
-    if (cc->accel_cpu && cc->accel_cpu->cpu_realizefn) {
-        return cc->accel_cpu->cpu_realizefn(cpu, errp);
+    /* generic realization */
+    if (acc->cpu_common_realize && !acc->cpu_common_realize(cpu, errp)) {
+        return false;
     }
     }
+
     return true;
     return true;
 }
 }
 
 
+void accel_cpu_common_unrealize(CPUState *cpu)
+{
+    AccelState *accel = current_accel();
+    AccelClass *acc = ACCEL_GET_CLASS(accel);
+
+    /* generic unrealization */
+    if (acc->cpu_common_unrealize) {
+        acc->cpu_common_unrealize(cpu);
+    }
+}
+
 int accel_supported_gdbstub_sstep_flags(void)
 int accel_supported_gdbstub_sstep_flags(void)
 {
 {
     AccelState *accel = current_accel();
     AccelState *accel = current_accel();

+ 4 - 8
accel/dummy-cpus.c

@@ -24,10 +24,9 @@ static void *dummy_cpu_thread_fn(void *arg)
 
 
     rcu_register_thread();
     rcu_register_thread();
 
 
-    qemu_mutex_lock_iothread();
+    bql_lock();
     qemu_thread_get_self(cpu->thread);
     qemu_thread_get_self(cpu->thread);
     cpu->thread_id = qemu_get_thread_id();
     cpu->thread_id = qemu_get_thread_id();
-    cpu->can_do_io = 1;
     current_cpu = cpu;
     current_cpu = cpu;
 
 
 #ifndef _WIN32
 #ifndef _WIN32
@@ -43,7 +42,7 @@ static void *dummy_cpu_thread_fn(void *arg)
     qemu_guest_random_seed_thread_part2(cpu->random_seed);
     qemu_guest_random_seed_thread_part2(cpu->random_seed);
 
 
     do {
     do {
-        qemu_mutex_unlock_iothread();
+        bql_unlock();
 #ifndef _WIN32
 #ifndef _WIN32
         do {
         do {
             int sig;
             int sig;
@@ -56,11 +55,11 @@ static void *dummy_cpu_thread_fn(void *arg)
 #else
 #else
         qemu_sem_wait(&cpu->sem);
         qemu_sem_wait(&cpu->sem);
 #endif
 #endif
-        qemu_mutex_lock_iothread();
+        bql_lock();
         qemu_wait_io_event(cpu);
         qemu_wait_io_event(cpu);
     } while (!cpu->unplug);
     } while (!cpu->unplug);
 
 
-    qemu_mutex_unlock_iothread();
+    bql_unlock();
     rcu_unregister_thread();
     rcu_unregister_thread();
     return NULL;
     return NULL;
 }
 }
@@ -69,9 +68,6 @@ void dummy_start_vcpu_thread(CPUState *cpu)
 {
 {
     char thread_name[VCPU_THREAD_NAME_SIZE];
     char thread_name[VCPU_THREAD_NAME_SIZE];
 
 
-    cpu->thread = g_malloc0(sizeof(QemuThread));
-    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
-    qemu_cond_init(cpu->halt_cond);
     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/DUMMY",
              cpu->cpu_index);
              cpu->cpu_index);
     qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,
     qemu_thread_create(cpu->thread, thread_name, dummy_cpu_thread_fn, cpu,

+ 147 - 38
accel/hvf/hvf-accel-ops.c

@@ -52,6 +52,7 @@
 #include "qemu/main-loop.h"
 #include "qemu/main-loop.h"
 #include "exec/address-spaces.h"
 #include "exec/address-spaces.h"
 #include "exec/exec-all.h"
 #include "exec/exec-all.h"
+#include "gdbstub/enums.h"
 #include "sysemu/cpus.h"
 #include "sysemu/cpus.h"
 #include "sysemu/hvf.h"
 #include "sysemu/hvf.h"
 #include "sysemu/hvf_int.h"
 #include "sysemu/hvf_int.h"
@@ -60,6 +61,7 @@
 #include "hw/boards.h"
 #include "hw/boards.h"
 
 
 HVFState *hvf_state;
 HVFState *hvf_state;
+bool hvf_tso_mode = 0;
 
 
 /* Memory slots */
 /* Memory slots */
 
 
@@ -200,15 +202,15 @@ static void hvf_set_phys_mem(MemoryRegionSection *section, bool add)
 
 
 static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
 static void do_hvf_cpu_synchronize_state(CPUState *cpu, run_on_cpu_data arg)
 {
 {
-    if (!cpu->vcpu_dirty) {
+    if (!cpu->accel->dirty) {
         hvf_get_registers(cpu);
         hvf_get_registers(cpu);
-        cpu->vcpu_dirty = true;
+        cpu->accel->dirty = true;
     }
     }
 }
 }
 
 
 static void hvf_cpu_synchronize_state(CPUState *cpu)
 static void hvf_cpu_synchronize_state(CPUState *cpu)
 {
 {
-    if (!cpu->vcpu_dirty) {
+    if (!cpu->accel->dirty) {
         run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
         run_on_cpu(cpu, do_hvf_cpu_synchronize_state, RUN_ON_CPU_NULL);
     }
     }
 }
 }
@@ -217,7 +219,7 @@ static void do_hvf_cpu_synchronize_set_dirty(CPUState *cpu,
                                              run_on_cpu_data arg)
                                              run_on_cpu_data arg)
 {
 {
     /* QEMU state is the reference, push it to HVF now and on next entry */
     /* QEMU state is the reference, push it to HVF now and on next entry */
-    cpu->vcpu_dirty = true;
+    cpu->accel->dirty = true;
 }
 }
 
 
 static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
 static void hvf_cpu_synchronize_post_reset(CPUState *cpu)
@@ -300,7 +302,7 @@ static void hvf_region_del(MemoryListener *listener,
 
 
 static MemoryListener hvf_memory_listener = {
 static MemoryListener hvf_memory_listener = {
     .name = "hvf",
     .name = "hvf",
-    .priority = 10,
+    .priority = MEMORY_LISTENER_PRIORITY_ACCEL,
     .region_add = hvf_region_add,
     .region_add = hvf_region_add,
     .region_del = hvf_region_del,
     .region_del = hvf_region_del,
     .log_start = hvf_log_start,
     .log_start = hvf_log_start,
@@ -316,12 +318,23 @@ bool hvf_allowed;
 
 
 static int hvf_accel_init(MachineState *ms)
 static int hvf_accel_init(MachineState *ms)
 {
 {
+    int x;
     hv_return_t ret;
     hv_return_t ret;
-    HVFState *s = HVF_STATE(ms->accelerator);
+    HVFState *s;
 
 
-    ret = hvf_arch_vm_create(s);
+    ret = hvf_arch_vm_create();
     assert_hvf_ok(ret);
     assert_hvf_ok(ret);
 
 
+    s = g_new0(HVFState, 1);
+
+    s->num_slots = ARRAY_SIZE(s->slots);
+    for (x = 0; x < s->num_slots; ++x) {
+        s->slots[x].size = 0;
+        s->slots[x].slot_id = x;
+    }
+
+    QTAILQ_INIT(&s->hvf_sw_breakpoints);
+
     hvf_state = s;
     hvf_state = s;
     memory_listener_register(&hvf_memory_listener, &address_space_memory);
     memory_listener_register(&hvf_memory_listener, &address_space_memory);
 
 
@@ -332,28 +345,19 @@ static int hvf_accel_init(MachineState *ms)
 
 
 static bool hvf_get_tso(Object *obj, Error **errp)
 static bool hvf_get_tso(Object *obj, Error **errp)
 {
 {
-    HVFState *s = HVF_STATE(obj);
-    return s->tso_mode;
+    return hvf_tso_mode;
 }
 }
 
 
 static void hvf_set_tso(Object *obj, bool value, Error **errp)
 static void hvf_set_tso(Object *obj, bool value, Error **errp)
 {
 {
-    HVFState *s = HVF_STATE(obj);
-    s->tso_mode = value;
+    hvf_tso_mode = value;
 }
 }
 
 
 #endif
 #endif
 
 
-static void hvf_accel_instance_init(Object *obj)
+static inline int hvf_gdbstub_sstep_flags(void)
 {
 {
-    int x;
-    HVFState *s = HVF_STATE(obj);
-
-    s->num_slots = ARRAY_SIZE(s->slots);
-    for (x = 0; x < s->num_slots; ++x) {
-        s->slots[x].size = 0;
-        s->slots[x].slot_id = x;
-    }
+    return SSTEP_ENABLE | SSTEP_NOIRQ;
 }
 }
 
 
 static void hvf_accel_class_init(ObjectClass *oc, void *data)
 static void hvf_accel_class_init(ObjectClass *oc, void *data)
@@ -362,6 +366,7 @@ static void hvf_accel_class_init(ObjectClass *oc, void *data)
     ac->name = "HVF";
     ac->name = "HVF";
     ac->init_machine = hvf_accel_init;
     ac->init_machine = hvf_accel_init;
     ac->allowed = &hvf_allowed;
     ac->allowed = &hvf_allowed;
+    ac->gdbstub_supported_sstep_flags = hvf_gdbstub_sstep_flags;
 
 
 #if defined(CONFIG_HVF_PRIVATE) && defined(__aarch64__)
 #if defined(CONFIG_HVF_PRIVATE) && defined(__aarch64__)
     object_class_property_add_bool(oc, "tso",
     object_class_property_add_bool(oc, "tso",
@@ -374,9 +379,7 @@ static void hvf_accel_class_init(ObjectClass *oc, void *data)
 static const TypeInfo hvf_accel_type = {
 static const TypeInfo hvf_accel_type = {
     .name = TYPE_HVF_ACCEL,
     .name = TYPE_HVF_ACCEL,
     .parent = TYPE_ACCEL,
     .parent = TYPE_ACCEL,
-    .instance_init = hvf_accel_instance_init,
     .class_init = hvf_accel_class_init,
     .class_init = hvf_accel_class_init,
-    .instance_size = sizeof(HVFState),
 };
 };
 
 
 static void hvf_type_init(void)
 static void hvf_type_init(void)
@@ -388,19 +391,19 @@ type_init(hvf_type_init);
 
 
 static void hvf_vcpu_destroy(CPUState *cpu)
 static void hvf_vcpu_destroy(CPUState *cpu)
 {
 {
-    hv_return_t ret = hv_vcpu_destroy(cpu->hvf->fd);
+    hv_return_t ret = hv_vcpu_destroy(cpu->accel->fd);
     assert_hvf_ok(ret);
     assert_hvf_ok(ret);
 
 
     hvf_arch_vcpu_destroy(cpu);
     hvf_arch_vcpu_destroy(cpu);
-    g_free(cpu->hvf);
-    cpu->hvf = NULL;
+    g_free(cpu->accel);
+    cpu->accel = NULL;
 }
 }
 
 
 static int hvf_init_vcpu(CPUState *cpu)
 static int hvf_init_vcpu(CPUState *cpu)
 {
 {
     int r;
     int r;
 
 
-    cpu->hvf = g_malloc0(sizeof(*cpu->hvf));
+    cpu->accel = g_new0(AccelCPUState, 1);
 
 
     /* init cpu signals */
     /* init cpu signals */
     struct sigaction sigact;
     struct sigaction sigact;
@@ -409,17 +412,20 @@ static int hvf_init_vcpu(CPUState *cpu)
     sigact.sa_handler = dummy_signal;
     sigact.sa_handler = dummy_signal;
     sigaction(SIG_IPI, &sigact, NULL);
     sigaction(SIG_IPI, &sigact, NULL);
 
 
-    pthread_sigmask(SIG_BLOCK, NULL, &cpu->hvf->unblock_ipi_mask);
-    sigdelset(&cpu->hvf->unblock_ipi_mask, SIG_IPI);
+    pthread_sigmask(SIG_BLOCK, NULL, &cpu->accel->unblock_ipi_mask);
+    sigdelset(&cpu->accel->unblock_ipi_mask, SIG_IPI);
 
 
 #ifdef __aarch64__
 #ifdef __aarch64__
-    r = hv_vcpu_create(&cpu->hvf->fd, (hv_vcpu_exit_t **)&cpu->hvf->exit, NULL);
+    r = hv_vcpu_create(&cpu->accel->fd,
+                       (hv_vcpu_exit_t **)&cpu->accel->exit, NULL);
 #else
 #else
-    r = hv_vcpu_create((hv_vcpuid_t *)&cpu->hvf->fd, HV_VCPU_DEFAULT);
+    r = hv_vcpu_create(&cpu->accel->fd, HV_VCPU_DEFAULT);
 #endif
 #endif
-    cpu->vcpu_dirty = 1;
+    cpu->accel->dirty = true;
     assert_hvf_ok(r);
     assert_hvf_ok(r);
 
 
+    cpu->accel->guest_debug_enabled = false;
+
     return hvf_arch_init_vcpu(cpu);
     return hvf_arch_init_vcpu(cpu);
 }
 }
 
 
@@ -437,11 +443,10 @@ static void *hvf_cpu_thread_fn(void *arg)
 
 
     rcu_register_thread();
     rcu_register_thread();
 
 
-    qemu_mutex_lock_iothread();
+    bql_lock();
     qemu_thread_get_self(cpu->thread);
     qemu_thread_get_self(cpu->thread);
 
 
     cpu->thread_id = qemu_get_thread_id();
     cpu->thread_id = qemu_get_thread_id();
-    cpu->can_do_io = 1;
     current_cpu = cpu;
     current_cpu = cpu;
 
 
     hvf_init_vcpu(cpu);
     hvf_init_vcpu(cpu);
@@ -462,7 +467,7 @@ static void *hvf_cpu_thread_fn(void *arg)
 
 
     hvf_vcpu_destroy(cpu);
     hvf_vcpu_destroy(cpu);
     cpu_thread_signal_destroyed(cpu);
     cpu_thread_signal_destroyed(cpu);
-    qemu_mutex_unlock_iothread();
+    bql_unlock();
     rcu_unregister_thread();
     rcu_unregister_thread();
     return NULL;
     return NULL;
 }
 }
@@ -477,16 +482,114 @@ static void hvf_start_vcpu_thread(CPUState *cpu)
      */
      */
     assert(hvf_enabled());
     assert(hvf_enabled());
 
 
-    cpu->thread = g_malloc0(sizeof(QemuThread));
-    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
-    qemu_cond_init(cpu->halt_cond);
-
     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/HVF",
              cpu->cpu_index);
              cpu->cpu_index);
     qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
     qemu_thread_create(cpu->thread, thread_name, hvf_cpu_thread_fn,
                        cpu, QEMU_THREAD_JOINABLE);
                        cpu, QEMU_THREAD_JOINABLE);
 }
 }
 
 
+static int hvf_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
+{
+    struct hvf_sw_breakpoint *bp;
+    int err;
+
+    if (type == GDB_BREAKPOINT_SW) {
+        bp = hvf_find_sw_breakpoint(cpu, addr);
+        if (bp) {
+            bp->use_count++;
+            return 0;
+        }
+
+        bp = g_new(struct hvf_sw_breakpoint, 1);
+        bp->pc = addr;
+        bp->use_count = 1;
+        err = hvf_arch_insert_sw_breakpoint(cpu, bp);
+        if (err) {
+            g_free(bp);
+            return err;
+        }
+
+        QTAILQ_INSERT_HEAD(&hvf_state->hvf_sw_breakpoints, bp, entry);
+    } else {
+        err = hvf_arch_insert_hw_breakpoint(addr, len, type);
+        if (err) {
+            return err;
+        }
+    }
+
+    CPU_FOREACH(cpu) {
+        err = hvf_update_guest_debug(cpu);
+        if (err) {
+            return err;
+        }
+    }
+    return 0;
+}
+
+static int hvf_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len)
+{
+    struct hvf_sw_breakpoint *bp;
+    int err;
+
+    if (type == GDB_BREAKPOINT_SW) {
+        bp = hvf_find_sw_breakpoint(cpu, addr);
+        if (!bp) {
+            return -ENOENT;
+        }
+
+        if (bp->use_count > 1) {
+            bp->use_count--;
+            return 0;
+        }
+
+        err = hvf_arch_remove_sw_breakpoint(cpu, bp);
+        if (err) {
+            return err;
+        }
+
+        QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
+        g_free(bp);
+    } else {
+        err = hvf_arch_remove_hw_breakpoint(addr, len, type);
+        if (err) {
+            return err;
+        }
+    }
+
+    CPU_FOREACH(cpu) {
+        err = hvf_update_guest_debug(cpu);
+        if (err) {
+            return err;
+        }
+    }
+    return 0;
+}
+
+static void hvf_remove_all_breakpoints(CPUState *cpu)
+{
+    struct hvf_sw_breakpoint *bp, *next;
+    CPUState *tmpcpu;
+
+    QTAILQ_FOREACH_SAFE(bp, &hvf_state->hvf_sw_breakpoints, entry, next) {
+        if (hvf_arch_remove_sw_breakpoint(cpu, bp) != 0) {
+            /* Try harder to find a CPU that currently sees the breakpoint. */
+            CPU_FOREACH(tmpcpu)
+            {
+                if (hvf_arch_remove_sw_breakpoint(tmpcpu, bp) == 0) {
+                    break;
+                }
+            }
+        }
+        QTAILQ_REMOVE(&hvf_state->hvf_sw_breakpoints, bp, entry);
+        g_free(bp);
+    }
+    hvf_arch_remove_all_hw_breakpoints();
+
+    CPU_FOREACH(cpu) {
+        hvf_update_guest_debug(cpu);
+    }
+}
+
 static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
 static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
 {
 {
     AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
     AccelOpsClass *ops = ACCEL_OPS_CLASS(oc);
@@ -498,6 +601,12 @@ static void hvf_accel_ops_class_init(ObjectClass *oc, void *data)
     ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
     ops->synchronize_post_init = hvf_cpu_synchronize_post_init;
     ops->synchronize_state = hvf_cpu_synchronize_state;
     ops->synchronize_state = hvf_cpu_synchronize_state;
     ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
     ops->synchronize_pre_loadvm = hvf_cpu_synchronize_pre_loadvm;
+
+    ops->insert_breakpoint = hvf_insert_breakpoint;
+    ops->remove_breakpoint = hvf_remove_breakpoint;
+    ops->remove_all_breakpoints = hvf_remove_all_breakpoints;
+    ops->update_guest_debug = hvf_update_guest_debug;
+    ops->supports_guest_debug = hvf_arch_supports_guest_debug;
 };
 };
 static const TypeInfo hvf_accel_ops_type = {
 static const TypeInfo hvf_accel_ops_type = {
     .name = ACCEL_OPS_NAME("hvf"),
     .name = ACCEL_OPS_NAME("hvf"),

+ 42 - 23
accel/hvf/hvf-all.c

@@ -13,34 +13,53 @@
 #include "sysemu/hvf.h"
 #include "sysemu/hvf.h"
 #include "sysemu/hvf_int.h"
 #include "sysemu/hvf_int.h"
 
 
-void assert_hvf_ok(hv_return_t ret)
+const char *hvf_return_string(hv_return_t ret)
+{
+    switch (ret) {
+    case HV_SUCCESS:      return "HV_SUCCESS";
+    case HV_ERROR:        return "HV_ERROR";
+    case HV_BUSY:         return "HV_BUSY";
+    case HV_BAD_ARGUMENT: return "HV_BAD_ARGUMENT";
+    case HV_NO_RESOURCES: return "HV_NO_RESOURCES";
+    case HV_NO_DEVICE:    return "HV_NO_DEVICE";
+    case HV_UNSUPPORTED:  return "HV_UNSUPPORTED";
+    case HV_DENIED:       return "HV_DENIED";
+    default:              return "[unknown hv_return value]";
+    }
+}
+
+void assert_hvf_ok_impl(hv_return_t ret, const char *file, unsigned int line,
+                        const char *exp)
 {
 {
     if (ret == HV_SUCCESS) {
     if (ret == HV_SUCCESS) {
         return;
         return;
     }
     }
 
 
-    switch (ret) {
-    case HV_ERROR:
-        error_report("Error: HV_ERROR");
-        break;
-    case HV_BUSY:
-        error_report("Error: HV_BUSY");
-        break;
-    case HV_BAD_ARGUMENT:
-        error_report("Error: HV_BAD_ARGUMENT");
-        break;
-    case HV_NO_RESOURCES:
-        error_report("Error: HV_NO_RESOURCES");
-        break;
-    case HV_NO_DEVICE:
-        error_report("Error: HV_NO_DEVICE");
-        break;
-    case HV_UNSUPPORTED:
-        error_report("Error: HV_UNSUPPORTED");
-        break;
-    default:
-        error_report("Unknown Error");
-    }
+    error_report("Error: %s = %s (0x%x, at %s:%u)",
+        exp, hvf_return_string(ret), ret, file, line);
 
 
     abort();
     abort();
 }
 }
+
+struct hvf_sw_breakpoint *hvf_find_sw_breakpoint(CPUState *cpu, vaddr pc)
+{
+    struct hvf_sw_breakpoint *bp;
+
+    QTAILQ_FOREACH(bp, &hvf_state->hvf_sw_breakpoints, entry) {
+        if (bp->pc == pc) {
+            return bp;
+        }
+    }
+    return NULL;
+}
+
+int hvf_sw_breakpoints_active(CPUState *cpu)
+{
+    return !QTAILQ_EMPTY(&hvf_state->hvf_sw_breakpoints);
+}
+
+int hvf_update_guest_debug(CPUState *cpu)
+{
+    hvf_arch_update_guest_debug(cpu);
+    return 0;
+}

+ 5 - 9
accel/kvm/kvm-accel-ops.c

@@ -33,10 +33,9 @@ static void *kvm_vcpu_thread_fn(void *arg)
 
 
     rcu_register_thread();
     rcu_register_thread();
 
 
-    qemu_mutex_lock_iothread();
+    bql_lock();
     qemu_thread_get_self(cpu->thread);
     qemu_thread_get_self(cpu->thread);
     cpu->thread_id = qemu_get_thread_id();
     cpu->thread_id = qemu_get_thread_id();
-    cpu->can_do_io = 1;
     current_cpu = cpu;
     current_cpu = cpu;
 
 
     r = kvm_init_vcpu(cpu, &error_fatal);
     r = kvm_init_vcpu(cpu, &error_fatal);
@@ -58,7 +57,7 @@ static void *kvm_vcpu_thread_fn(void *arg)
 
 
     kvm_destroy_vcpu(cpu);
     kvm_destroy_vcpu(cpu);
     cpu_thread_signal_destroyed(cpu);
     cpu_thread_signal_destroyed(cpu);
-    qemu_mutex_unlock_iothread();
+    bql_unlock();
     rcu_unregister_thread();
     rcu_unregister_thread();
     return NULL;
     return NULL;
 }
 }
@@ -67,9 +66,6 @@ static void kvm_start_vcpu_thread(CPUState *cpu)
 {
 {
     char thread_name[VCPU_THREAD_NAME_SIZE];
     char thread_name[VCPU_THREAD_NAME_SIZE];
 
 
-    cpu->thread = g_malloc0(sizeof(QemuThread));
-    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
-    qemu_cond_init(cpu->halt_cond);
     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/KVM",
              cpu->cpu_index);
              cpu->cpu_index);
     qemu_thread_create(cpu->thread, thread_name, kvm_vcpu_thread_fn,
     qemu_thread_create(cpu->thread, thread_name, kvm_vcpu_thread_fn,
@@ -83,10 +79,10 @@ static bool kvm_vcpu_thread_is_idle(CPUState *cpu)
 
 
 static bool kvm_cpus_are_resettable(void)
 static bool kvm_cpus_are_resettable(void)
 {
 {
-    return !kvm_enabled() || kvm_cpu_check_are_resettable();
+    return !kvm_enabled() || !kvm_state->guest_state_protected;
 }
 }
 
 
-#ifdef KVM_CAP_SET_GUEST_DEBUG
+#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
 static int kvm_update_guest_debug_ops(CPUState *cpu)
 static int kvm_update_guest_debug_ops(CPUState *cpu)
 {
 {
     return kvm_update_guest_debug(cpu, 0);
     return kvm_update_guest_debug(cpu, 0);
@@ -105,7 +101,7 @@ static void kvm_accel_ops_class_init(ObjectClass *oc, void *data)
     ops->synchronize_state = kvm_cpu_synchronize_state;
     ops->synchronize_state = kvm_cpu_synchronize_state;
     ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
     ops->synchronize_pre_loadvm = kvm_cpu_synchronize_pre_loadvm;
 
 
-#ifdef KVM_CAP_SET_GUEST_DEBUG
+#ifdef TARGET_KVM_HAVE_GUEST_DEBUG
     ops->update_guest_debug = kvm_update_guest_debug_ops;
     ops->update_guest_debug = kvm_update_guest_debug_ops;
     ops->supports_guest_debug = kvm_supports_guest_debug;
     ops->supports_guest_debug = kvm_supports_guest_debug;
     ops->insert_breakpoint = kvm_insert_breakpoint;
     ops->insert_breakpoint = kvm_insert_breakpoint;

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 322 - 314
accel/kvm/kvm-all.c


+ 0 - 1
accel/kvm/kvm-cpus.h

@@ -22,5 +22,4 @@ bool kvm_supports_guest_debug(void);
 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
 int kvm_insert_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
 int kvm_remove_breakpoint(CPUState *cpu, int type, vaddr addr, vaddr len);
 void kvm_remove_all_breakpoints(CPUState *cpu);
 void kvm_remove_all_breakpoints(CPUState *cpu);
-
 #endif /* KVM_CPUS_H */
 #endif /* KVM_CPUS_H */

+ 12 - 2
accel/kvm/trace-events

@@ -9,13 +9,17 @@ kvm_device_ioctl(int fd, int type, void *arg) "dev fd %d, type 0x%x, arg %p"
 kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
 kvm_failed_reg_get(uint64_t id, const char *msg) "Warning: Unable to retrieve ONEREG %" PRIu64 " from KVM: %s"
 kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
 kvm_failed_reg_set(uint64_t id, const char *msg) "Warning: Unable to set ONEREG %" PRIu64 " to KVM: %s"
 kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
 kvm_init_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
+kvm_create_vcpu(int cpu_index, unsigned long arch_cpu_id, int kvm_fd) "index: %d, id: %lu, kvm fd: %d"
+kvm_destroy_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
+kvm_park_vcpu(int cpu_index, unsigned long arch_cpu_id) "index: %d id: %lu"
+kvm_unpark_vcpu(unsigned long arch_cpu_id, const char *msg) "id: %lu %s"
 kvm_irqchip_commit_routes(void) ""
 kvm_irqchip_commit_routes(void) ""
 kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
 kvm_irqchip_add_msi_route(char *name, int vector, int virq) "dev %s vector %d virq %d"
 kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
 kvm_irqchip_update_msi_route(int virq) "Updating MSI route virq=%d"
 kvm_irqchip_release_virq(int virq) "virq %d"
 kvm_irqchip_release_virq(int virq) "virq %d"
 kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
 kvm_set_ioeventfd_mmio(int fd, uint64_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%" PRIx64 " val=0x%x assign: %d size: %d match: %d"
 kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
 kvm_set_ioeventfd_pio(int fd, uint16_t addr, uint32_t val, bool assign, uint32_t size, bool datamatch) "fd: %d @0x%x val=0x%x assign: %d size: %d match: %d"
-kvm_set_user_memory(uint32_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, int ret) "Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " ret=%d"
+kvm_set_user_memory(uint16_t as, uint16_t slot, uint32_t flags, uint64_t guest_phys_addr, uint64_t memory_size, uint64_t userspace_addr, uint32_t fd, uint64_t fd_offset, int ret) "AddrSpace#%d Slot#%d flags=0x%x gpa=0x%"PRIx64 " size=0x%"PRIx64 " ua=0x%"PRIx64 " guest_memfd=%d" " guest_memfd_offset=0x%" PRIx64 " ret=%d"
 kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
 kvm_clear_dirty_log(uint32_t slot, uint64_t start, uint32_t size) "slot#%"PRId32" start 0x%"PRIx64" size 0x%"PRIx32
 kvm_resample_fd_notify(int gsi) "gsi %d"
 kvm_resample_fd_notify(int gsi) "gsi %d"
 kvm_dirty_ring_full(int id) "vcpu %d"
 kvm_dirty_ring_full(int id) "vcpu %d"
@@ -25,4 +29,10 @@ kvm_dirty_ring_reaper(const char *s) "%s"
 kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
 kvm_dirty_ring_reap(uint64_t count, int64_t t) "reaped %"PRIu64" pages (took %"PRIi64" us)"
 kvm_dirty_ring_reaper_kick(const char *reason) "%s"
 kvm_dirty_ring_reaper_kick(const char *reason) "%s"
 kvm_dirty_ring_flush(int finished) "%d"
 kvm_dirty_ring_flush(int finished) "%d"
-
+kvm_failed_get_vcpu_mmap_size(void) ""
+kvm_cpu_exec(void) ""
+kvm_interrupt_exit_request(void) ""
+kvm_io_window_exit(void) ""
+kvm_run_exit_system_event(int cpu_index, uint32_t event_type) "cpu_index %d, system_even_type %"PRIu32
+kvm_convert_memory(uint64_t start, uint64_t size, const char *msg) "start 0x%" PRIx64 " size 0x%" PRIx64 " %s"
+kvm_memory_fault(uint64_t start, uint64_t size, uint64_t flags) "start 0x%" PRIx64 " size 0x%" PRIx64 " flags 0x%" PRIx64

+ 3 - 3
accel/meson.build

@@ -1,5 +1,5 @@
-specific_ss.add(files('accel-common.c', 'accel-blocker.c'))
-softmmu_ss.add(files('accel-softmmu.c'))
+specific_ss.add(files('accel-target.c'))
+system_ss.add(files('accel-system.c', 'accel-blocker.c'))
 user_ss.add(files('accel-user.c'))
 user_ss.add(files('accel-user.c'))
 
 
 subdir('tcg')
 subdir('tcg')
@@ -12,4 +12,4 @@ if have_system
 endif
 endif
 
 
 # qtest
 # qtest
-softmmu_ss.add(files('dummy-cpus.c'))
+system_ss.add(files('dummy-cpus.c'))

+ 1 - 1
accel/qtest/meson.build

@@ -1 +1 @@
-qtest_module_ss.add(when: ['CONFIG_SOFTMMU'], if_true: files('qtest.c'))
+qtest_module_ss.add(when: ['CONFIG_SYSTEM_ONLY'], if_true: files('qtest.c'))

+ 13 - 0
accel/qtest/qtest.c

@@ -24,6 +24,18 @@
 #include "qemu/main-loop.h"
 #include "qemu/main-loop.h"
 #include "hw/core/cpu.h"
 #include "hw/core/cpu.h"
 
 
+static int64_t qtest_clock_counter;
+
+static int64_t qtest_get_virtual_clock(void)
+{
+    return qatomic_read_i64(&qtest_clock_counter);
+}
+
+static void qtest_set_virtual_clock(int64_t count)
+{
+    qatomic_set_i64(&qtest_clock_counter, count);
+}
+
 static int qtest_init_accel(MachineState *ms)
 static int qtest_init_accel(MachineState *ms)
 {
 {
     return 0;
     return 0;
@@ -52,6 +64,7 @@ static void qtest_accel_ops_class_init(ObjectClass *oc, void *data)
 
 
     ops->create_vcpu_thread = dummy_start_vcpu_thread;
     ops->create_vcpu_thread = dummy_start_vcpu_thread;
     ops->get_virtual_clock = qtest_get_virtual_clock;
     ops->get_virtual_clock = qtest_get_virtual_clock;
+    ops->set_virtual_clock = qtest_set_virtual_clock;
 };
 };
 
 
 static const TypeInfo qtest_accel_ops_type = {
 static const TypeInfo qtest_accel_ops_type = {

+ 0 - 24
accel/stubs/hax-stub.c

@@ -1,24 +0,0 @@
-/*
- * QEMU HAXM support
- *
- * Copyright (c) 2015, Intel Corporation
- *
- * Copyright 2016 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * See the COPYING file in the top-level directory.
- *
- */
-
-#include "qemu/osdep.h"
-#include "sysemu/hax.h"
-
-bool hax_allowed;
-
-int hax_sync_vcpus(void)
-{
-    return 0;
-}

+ 17 - 15
accel/stubs/kvm-stub.c

@@ -17,15 +17,12 @@
 KVMState *kvm_state;
 KVMState *kvm_state;
 bool kvm_kernel_irqchip;
 bool kvm_kernel_irqchip;
 bool kvm_async_interrupts_allowed;
 bool kvm_async_interrupts_allowed;
-bool kvm_eventfds_allowed;
-bool kvm_irqfds_allowed;
 bool kvm_resamplefds_allowed;
 bool kvm_resamplefds_allowed;
 bool kvm_msi_via_irqfd_allowed;
 bool kvm_msi_via_irqfd_allowed;
 bool kvm_gsi_routing_allowed;
 bool kvm_gsi_routing_allowed;
 bool kvm_gsi_direct_mapping;
 bool kvm_gsi_direct_mapping;
 bool kvm_allowed;
 bool kvm_allowed;
 bool kvm_readonly_mem_allowed;
 bool kvm_readonly_mem_allowed;
-bool kvm_ioeventfd_any_length_allowed;
 bool kvm_msi_use_devid;
 bool kvm_msi_use_devid;
 
 
 void kvm_flush_coalesced_mmio_buffer(void)
 void kvm_flush_coalesced_mmio_buffer(void)
@@ -41,11 +38,6 @@ bool kvm_has_sync_mmu(void)
     return false;
     return false;
 }
 }
 
 
-int kvm_has_many_ioeventfds(void)
-{
-    return 0;
-}
-
 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
 int kvm_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
 {
 {
     return 1;
     return 1;
@@ -91,11 +83,6 @@ void kvm_irqchip_change_notify(void)
 {
 {
 }
 }
 
 
-int kvm_irqchip_add_adapter_route(KVMState *s, AdapterInfo *adapter)
-{
-    return -ENOSYS;
-}
-
 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
 int kvm_irqchip_add_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
                                        EventNotifier *rn, int virq)
                                        EventNotifier *rn, int virq)
 {
 {
@@ -108,9 +95,14 @@ int kvm_irqchip_remove_irqfd_notifier_gsi(KVMState *s, EventNotifier *n,
     return -ENOSYS;
     return -ENOSYS;
 }
 }
 
 
-bool kvm_has_free_slot(MachineState *ms)
+unsigned int kvm_get_max_memslots(void)
 {
 {
-    return false;
+    return 0;
+}
+
+unsigned int kvm_get_free_memslots(void)
+{
+    return 0;
 }
 }
 
 
 void kvm_init_cpu_signals(CPUState *cpu)
 void kvm_init_cpu_signals(CPUState *cpu)
@@ -132,3 +124,13 @@ uint32_t kvm_dirty_ring_size(void)
 {
 {
     return 0;
     return 0;
 }
 }
+
+bool kvm_hwpoisoned_mem(void)
+{
+    return false;
+}
+
+int kvm_create_guest_memfd(uint64_t size, uint64_t flags, Error **errp)
+{
+    return -ENOSYS;
+}

+ 5 - 6
accel/stubs/meson.build

@@ -1,7 +1,6 @@
-sysemu_stubs_ss = ss.source_set()
-sysemu_stubs_ss.add(when: 'CONFIG_HAX', if_false: files('hax-stub.c'))
-sysemu_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
-sysemu_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
-sysemu_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
+system_stubs_ss = ss.source_set()
+system_stubs_ss.add(when: 'CONFIG_XEN', if_false: files('xen-stub.c'))
+system_stubs_ss.add(when: 'CONFIG_KVM', if_false: files('kvm-stub.c'))
+system_stubs_ss.add(when: 'CONFIG_TCG', if_false: files('tcg-stub.c'))
 
 
-specific_ss.add_all(when: ['CONFIG_SOFTMMU'], if_true: sysemu_stubs_ss)
+specific_ss.add_all(when: ['CONFIG_SYSTEM_ONLY'], if_true: system_stubs_ss)

+ 0 - 22
accel/stubs/tcg-stub.c

@@ -18,28 +18,6 @@ void tb_flush(CPUState *cpu)
 {
 {
 }
 }
 
 
-void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
-{
-}
-
-void tcg_flush_jmp_cache(CPUState *cpu)
-{
-}
-
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
-                       MMUAccessType access_type, int mmu_idx,
-                       bool nonfault, void **phost, uintptr_t retaddr)
-{
-     g_assert_not_reached();
-}
-
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
-                   MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
-{
-     /* Handled by hardware accelerator. */
-     g_assert_not_reached();
-}
-
 G_NORETURN void cpu_loop_exit(CPUState *cpu)
 G_NORETURN void cpu_loop_exit(CPUState *cpu)
 {
 {
     g_assert_not_reached();
     g_assert_not_reached();

+ 8 - 42
accel/tcg/atomic_common.c.inc

@@ -13,26 +13,12 @@
  * See the COPYING file in the top-level directory.
  * See the COPYING file in the top-level directory.
  */
  */
 
 
-static void atomic_trace_rmw_post(CPUArchState *env, target_ulong addr,
+static void atomic_trace_rmw_post(CPUArchState *env, uint64_t addr,
                                   MemOpIdx oi)
                                   MemOpIdx oi)
 {
 {
     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
     qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_RW);
 }
 }
 
 
-#if HAVE_ATOMIC128
-static void atomic_trace_ld_post(CPUArchState *env, target_ulong addr,
-                                 MemOpIdx oi)
-{
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-}
-
-static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
-                                 MemOpIdx oi)
-{
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-#endif
-
 /*
 /*
  * Atomic helpers callable from TCG.
  * Atomic helpers callable from TCG.
  * These have a common interface and all defer to cpu_atomic_*
  * These have a common interface and all defer to cpu_atomic_*
@@ -40,7 +26,7 @@ static void atomic_trace_st_post(CPUArchState *env, target_ulong addr,
  */
  */
 
 
 #define CMPXCHG_HELPER(OP, TYPE) \
 #define CMPXCHG_HELPER(OP, TYPE) \
-    TYPE HELPER(atomic_##OP)(CPUArchState *env, target_ulong addr,  \
+    TYPE HELPER(atomic_##OP)(CPUArchState *env, uint64_t addr,      \
                              TYPE oldv, TYPE newv, uint32_t oi)     \
                              TYPE oldv, TYPE newv, uint32_t oi)     \
     { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
     { return cpu_atomic_##OP##_mmu(env, addr, oldv, newv, oi, GETPC()); }
 
 
@@ -55,43 +41,23 @@ CMPXCHG_HELPER(cmpxchgq_be, uint64_t)
 CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
 CMPXCHG_HELPER(cmpxchgq_le, uint64_t)
 #endif
 #endif
 
 
-#ifdef CONFIG_CMPXCHG128
+#if HAVE_CMPXCHG128
 CMPXCHG_HELPER(cmpxchgo_be, Int128)
 CMPXCHG_HELPER(cmpxchgo_be, Int128)
 CMPXCHG_HELPER(cmpxchgo_le, Int128)
 CMPXCHG_HELPER(cmpxchgo_le, Int128)
 #endif
 #endif
 
 
 #undef CMPXCHG_HELPER
 #undef CMPXCHG_HELPER
 
 
-Int128 HELPER(nonatomic_cmpxchgo_be)(CPUArchState *env, target_ulong addr,
-                                     Int128 cmpv, Int128 newv, uint32_t oi)
-{
-#if TCG_TARGET_REG_BITS == 32
-    uintptr_t ra = GETPC();
-    Int128 oldv;
-
-    oldv = cpu_ld16_be_mmu(env, addr, oi, ra);
-    if (int128_eq(oldv, cmpv)) {
-        cpu_st16_be_mmu(env, addr, newv, oi, ra);
-    } else {
-        /* Even with comparison failure, still need a write cycle. */
-        probe_write(env, addr, 16, get_mmuidx(oi), ra);
-    }
-    return oldv;
-#else
-    g_assert_not_reached();
-#endif
-}
-
-Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
-                                     Int128 cmpv, Int128 newv, uint32_t oi)
+Int128 HELPER(nonatomic_cmpxchgo)(CPUArchState *env, uint64_t addr,
+                                  Int128 cmpv, Int128 newv, uint32_t oi)
 {
 {
 #if TCG_TARGET_REG_BITS == 32
 #if TCG_TARGET_REG_BITS == 32
     uintptr_t ra = GETPC();
     uintptr_t ra = GETPC();
     Int128 oldv;
     Int128 oldv;
 
 
-    oldv = cpu_ld16_le_mmu(env, addr, oi, ra);
+    oldv = cpu_ld16_mmu(env, addr, oi, ra);
     if (int128_eq(oldv, cmpv)) {
     if (int128_eq(oldv, cmpv)) {
-        cpu_st16_le_mmu(env, addr, newv, oi, ra);
+        cpu_st16_mmu(env, addr, newv, oi, ra);
     } else {
     } else {
         /* Even with comparison failure, still need a write cycle. */
         /* Even with comparison failure, still need a write cycle. */
         probe_write(env, addr, 16, get_mmuidx(oi), ra);
         probe_write(env, addr, 16, get_mmuidx(oi), ra);
@@ -103,7 +69,7 @@ Int128 HELPER(nonatomic_cmpxchgo_le)(CPUArchState *env, target_ulong addr,
 }
 }
 
 
 #define ATOMIC_HELPER(OP, TYPE) \
 #define ATOMIC_HELPER(OP, TYPE) \
-    TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, target_ulong addr,  \
+    TYPE HELPER(glue(atomic_,OP))(CPUArchState *env, uint64_t addr,  \
                                   TYPE val, uint32_t oi)                 \
                                   TYPE val, uint32_t oi)                 \
     { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }
     { return glue(glue(cpu_atomic_,OP),_mmu)(env, addr, val, oi, GETPC()); }
 
 

+ 28 - 85
accel/tcg/atomic_template.h

@@ -69,12 +69,12 @@
 # define END  _le
 # define END  _le
 #endif
 #endif
 
 
-ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
+ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
                               ABI_TYPE cmpv, ABI_TYPE newv,
                               ABI_TYPE cmpv, ABI_TYPE newv,
                               MemOpIdx oi, uintptr_t retaddr)
                               MemOpIdx oi, uintptr_t retaddr)
 {
 {
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
-                                         PAGE_READ | PAGE_WRITE, retaddr);
+    DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+                                         DATA_SIZE, retaddr);
     DATA_TYPE ret;
     DATA_TYPE ret;
 
 
 #if DATA_SIZE == 16
 #if DATA_SIZE == 16
@@ -87,38 +87,12 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
     return ret;
     return ret;
 }
 }
 
 
-#if DATA_SIZE >= 16
-#if HAVE_ATOMIC128
-ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
-                         MemOpIdx oi, uintptr_t retaddr)
-{
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
-                                         PAGE_READ, retaddr);
-    DATA_TYPE val;
-
-    val = atomic16_read(haddr);
-    ATOMIC_MMU_CLEANUP;
-    atomic_trace_ld_post(env, addr, oi);
-    return val;
-}
-
-void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
-                     MemOpIdx oi, uintptr_t retaddr)
-{
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
-                                         PAGE_WRITE, retaddr);
-
-    atomic16_set(haddr, val);
-    ATOMIC_MMU_CLEANUP;
-    atomic_trace_st_post(env, addr, oi);
-}
-#endif
-#else
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
+#if DATA_SIZE < 16
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
                            MemOpIdx oi, uintptr_t retaddr)
                            MemOpIdx oi, uintptr_t retaddr)
 {
 {
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
-                                         PAGE_READ | PAGE_WRITE, retaddr);
+    DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+                                         DATA_SIZE, retaddr);
     DATA_TYPE ret;
     DATA_TYPE ret;
 
 
     ret = qatomic_xchg__nocheck(haddr, val);
     ret = qatomic_xchg__nocheck(haddr, val);
@@ -128,12 +102,11 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
 }
 }
 
 
 #define GEN_ATOMIC_HELPER(X)                                        \
 #define GEN_ATOMIC_HELPER(X)                                        \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr,            \
                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
 {                                                                   \
 {                                                                   \
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \
-                                         PAGE_READ | PAGE_WRITE, retaddr); \
-    DATA_TYPE ret;                                                  \
+    DATA_TYPE *haddr, ret;                                          \
+    haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr);   \
     ret = qatomic_##X(haddr, val);                                  \
     ret = qatomic_##X(haddr, val);                                  \
     ATOMIC_MMU_CLEANUP;                                             \
     ATOMIC_MMU_CLEANUP;                                             \
     atomic_trace_rmw_post(env, addr, oi);                           \
     atomic_trace_rmw_post(env, addr, oi);                           \
@@ -160,12 +133,11 @@ GEN_ATOMIC_HELPER(xor_fetch)
  * of CF_PARALLEL's value, we'll trace just a read and a write.
  * of CF_PARALLEL's value, we'll trace just a read and a write.
  */
  */
 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr,            \
                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
 {                                                                   \
 {                                                                   \
-    XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
-                                          PAGE_READ | PAGE_WRITE, retaddr); \
-    XDATA_TYPE cmp, old, new, val = xval;                           \
+    XDATA_TYPE *haddr, cmp, old, new, val = xval;                   \
+    haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr);   \
     smp_mb();                                                       \
     smp_mb();                                                       \
     cmp = qatomic_read__nocheck(haddr);                             \
     cmp = qatomic_read__nocheck(haddr);                             \
     do {                                                            \
     do {                                                            \
@@ -188,7 +160,7 @@ GEN_ATOMIC_HELPER_FN(smax_fetch, MAX, SDATA_TYPE, new)
 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)
 GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)
 
 
 #undef GEN_ATOMIC_HELPER_FN
 #undef GEN_ATOMIC_HELPER_FN
-#endif /* DATA SIZE >= 16 */
+#endif /* DATA SIZE < 16 */
 
 
 #undef END
 #undef END
 
 
@@ -202,12 +174,12 @@ GEN_ATOMIC_HELPER_FN(umax_fetch, MAX,  DATA_TYPE, new)
 # define END  _be
 # define END  _be
 #endif
 #endif
 
 
-ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
+ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, abi_ptr addr,
                               ABI_TYPE cmpv, ABI_TYPE newv,
                               ABI_TYPE cmpv, ABI_TYPE newv,
                               MemOpIdx oi, uintptr_t retaddr)
                               MemOpIdx oi, uintptr_t retaddr)
 {
 {
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
-                                         PAGE_READ | PAGE_WRITE, retaddr);
+    DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+                                         DATA_SIZE, retaddr);
     DATA_TYPE ret;
     DATA_TYPE ret;
 
 
 #if DATA_SIZE == 16
 #if DATA_SIZE == 16
@@ -220,39 +192,12 @@ ABI_TYPE ATOMIC_NAME(cmpxchg)(CPUArchState *env, target_ulong addr,
     return BSWAP(ret);
     return BSWAP(ret);
 }
 }
 
 
-#if DATA_SIZE >= 16
-#if HAVE_ATOMIC128
-ABI_TYPE ATOMIC_NAME(ld)(CPUArchState *env, target_ulong addr,
-                         MemOpIdx oi, uintptr_t retaddr)
-{
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
-                                         PAGE_READ, retaddr);
-    DATA_TYPE val;
-
-    val = atomic16_read(haddr);
-    ATOMIC_MMU_CLEANUP;
-    atomic_trace_ld_post(env, addr, oi);
-    return BSWAP(val);
-}
-
-void ATOMIC_NAME(st)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
-                     MemOpIdx oi, uintptr_t retaddr)
-{
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
-                                         PAGE_WRITE, retaddr);
-
-    val = BSWAP(val);
-    atomic16_set(haddr, val);
-    ATOMIC_MMU_CLEANUP;
-    atomic_trace_st_post(env, addr, oi);
-}
-#endif
-#else
-ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
+#if DATA_SIZE < 16
+ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, abi_ptr addr, ABI_TYPE val,
                            MemOpIdx oi, uintptr_t retaddr)
                            MemOpIdx oi, uintptr_t retaddr)
 {
 {
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,
-                                         PAGE_READ | PAGE_WRITE, retaddr);
+    DATA_TYPE *haddr = atomic_mmu_lookup(env_cpu(env), addr, oi,
+                                         DATA_SIZE, retaddr);
     ABI_TYPE ret;
     ABI_TYPE ret;
 
 
     ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
     ret = qatomic_xchg__nocheck(haddr, BSWAP(val));
@@ -262,12 +207,11 @@ ABI_TYPE ATOMIC_NAME(xchg)(CPUArchState *env, target_ulong addr, ABI_TYPE val,
 }
 }
 
 
 #define GEN_ATOMIC_HELPER(X)                                        \
 #define GEN_ATOMIC_HELPER(X)                                        \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr,            \
                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
                         ABI_TYPE val, MemOpIdx oi, uintptr_t retaddr) \
 {                                                                   \
 {                                                                   \
-    DATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE,  \
-                                         PAGE_READ | PAGE_WRITE, retaddr); \
-    DATA_TYPE ret;                                                  \
+    DATA_TYPE *haddr, ret;                                          \
+    haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr);   \
     ret = qatomic_##X(haddr, BSWAP(val));                           \
     ret = qatomic_##X(haddr, BSWAP(val));                           \
     ATOMIC_MMU_CLEANUP;                                             \
     ATOMIC_MMU_CLEANUP;                                             \
     atomic_trace_rmw_post(env, addr, oi);                           \
     atomic_trace_rmw_post(env, addr, oi);                           \
@@ -291,12 +235,11 @@ GEN_ATOMIC_HELPER(xor_fetch)
  * of CF_PARALLEL's value, we'll trace just a read and a write.
  * of CF_PARALLEL's value, we'll trace just a read and a write.
  */
  */
 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
 #define GEN_ATOMIC_HELPER_FN(X, FN, XDATA_TYPE, RET)                \
-ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, target_ulong addr,       \
+ABI_TYPE ATOMIC_NAME(X)(CPUArchState *env, abi_ptr addr,            \
                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
                         ABI_TYPE xval, MemOpIdx oi, uintptr_t retaddr) \
 {                                                                   \
 {                                                                   \
-    XDATA_TYPE *haddr = atomic_mmu_lookup(env, addr, oi, DATA_SIZE, \
-                                          PAGE_READ | PAGE_WRITE, retaddr); \
-    XDATA_TYPE ldo, ldn, old, new, val = xval;                      \
+    XDATA_TYPE *haddr, ldo, ldn, old, new, val = xval;              \
+    haddr = atomic_mmu_lookup(env_cpu(env), addr, oi, DATA_SIZE, retaddr);   \
     smp_mb();                                                       \
     smp_mb();                                                       \
     ldn = qatomic_read__nocheck(haddr);                             \
     ldn = qatomic_read__nocheck(haddr);                             \
     do {                                                            \
     do {                                                            \
@@ -326,7 +269,7 @@ GEN_ATOMIC_HELPER_FN(add_fetch, ADD, DATA_TYPE, new)
 #undef ADD
 #undef ADD
 
 
 #undef GEN_ATOMIC_HELPER_FN
 #undef GEN_ATOMIC_HELPER_FN
-#endif /* DATA_SIZE >= 16 */
+#endif /* DATA_SIZE < 16 */
 
 
 #undef END
 #undef END
 #endif /* DATA_SIZE > 1 */
 #endif /* DATA_SIZE > 1 */

+ 4 - 32
accel/tcg/cpu-exec-common.c

@@ -20,8 +20,8 @@
 #include "qemu/osdep.h"
 #include "qemu/osdep.h"
 #include "sysemu/cpus.h"
 #include "sysemu/cpus.h"
 #include "sysemu/tcg.h"
 #include "sysemu/tcg.h"
-#include "exec/exec-all.h"
 #include "qemu/plugin.h"
 #include "qemu/plugin.h"
+#include "internal-common.h"
 
 
 bool tcg_allowed;
 bool tcg_allowed;
 
 
@@ -32,40 +32,10 @@ void cpu_loop_exit_noexc(CPUState *cpu)
     cpu_loop_exit(cpu);
     cpu_loop_exit(cpu);
 }
 }
 
 
-#if defined(CONFIG_SOFTMMU)
-void cpu_reloading_memory_map(void)
-{
-    if (qemu_in_vcpu_thread() && current_cpu->running) {
-        /* The guest can in theory prolong the RCU critical section as long
-         * as it feels like. The major problem with this is that because it
-         * can do multiple reconfigurations of the memory map within the
-         * critical section, we could potentially accumulate an unbounded
-         * collection of memory data structures awaiting reclamation.
-         *
-         * Because the only thing we're currently protecting with RCU is the
-         * memory data structures, it's sufficient to break the critical section
-         * in this callback, which we know will get called every time the
-         * memory map is rearranged.
-         *
-         * (If we add anything else in the system that uses RCU to protect
-         * its data structures, we will need to implement some other mechanism
-         * to force TCG CPUs to exit the critical section, at which point this
-         * part of this callback might become unnecessary.)
-         *
-         * This pair matches cpu_exec's rcu_read_lock()/rcu_read_unlock(), which
-         * only protects cpu->as->dispatch. Since we know our caller is about
-         * to reload it, it's safe to split the critical section.
-         */
-        rcu_read_unlock();
-        rcu_read_lock();
-    }
-}
-#endif
-
 void cpu_loop_exit(CPUState *cpu)
 void cpu_loop_exit(CPUState *cpu)
 {
 {
     /* Undo the setting in cpu_tb_exec.  */
     /* Undo the setting in cpu_tb_exec.  */
-    cpu->can_do_io = 1;
+    cpu->neg.can_do_io = true;
     /* Undo any setting in generated code.  */
     /* Undo any setting in generated code.  */
     qemu_plugin_disable_mem_helpers(cpu);
     qemu_plugin_disable_mem_helpers(cpu);
     siglongjmp(cpu->jmp_env, 1);
     siglongjmp(cpu->jmp_env, 1);
@@ -81,6 +51,8 @@ void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc)
 
 
 void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
 void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc)
 {
 {
+    /* Prevent looping if already executing in a serial context. */
+    g_assert(!cpu_in_serial_context(cpu));
     cpu->exception_index = EXCP_ATOMIC;
     cpu->exception_index = EXCP_ATOMIC;
     cpu_loop_exit_restore(cpu, pc);
     cpu_loop_exit_restore(cpu, pc);
 }
 }

+ 208 - 208
accel/tcg/cpu-exec.c

@@ -31,19 +31,17 @@
 #include "qemu/rcu.h"
 #include "qemu/rcu.h"
 #include "exec/log.h"
 #include "exec/log.h"
 #include "qemu/main-loop.h"
 #include "qemu/main-loop.h"
-#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
-#include "hw/i386/apic.h"
-#endif
 #include "sysemu/cpus.h"
 #include "sysemu/cpus.h"
 #include "exec/cpu-all.h"
 #include "exec/cpu-all.h"
 #include "sysemu/cpu-timers.h"
 #include "sysemu/cpu-timers.h"
 #include "exec/replay-core.h"
 #include "exec/replay-core.h"
 #include "sysemu/tcg.h"
 #include "sysemu/tcg.h"
-#include "exec/helper-proto.h"
+#include "exec/helper-proto-common.h"
 #include "tb-jmp-cache.h"
 #include "tb-jmp-cache.h"
 #include "tb-hash.h"
 #include "tb-hash.h"
 #include "tb-context.h"
 #include "tb-context.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
 
 
 /* -icount align implementation. */
 /* -icount align implementation. */
 
 
@@ -74,7 +72,7 @@ static void align_clocks(SyncClocks *sc, CPUState *cpu)
         return;
         return;
     }
     }
 
 
-    cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
+    cpu_icount = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
     sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
     sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
     sc->last_cpu_icount = cpu_icount;
     sc->last_cpu_icount = cpu_icount;
 
 
@@ -125,7 +123,7 @@ static void init_delay_params(SyncClocks *sc, CPUState *cpu)
     sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
     sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
     sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
     sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
     sc->last_cpu_icount
     sc->last_cpu_icount
-        = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
+        = cpu->icount_extra + cpu->neg.icount_decr.u16.low;
     if (sc->diff_clk < max_delay) {
     if (sc->diff_clk < max_delay) {
         max_delay = sc->diff_clk;
         max_delay = sc->diff_clk;
     }
     }
@@ -147,6 +145,16 @@ static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
 }
 }
 #endif /* CONFIG USER ONLY */
 #endif /* CONFIG USER ONLY */
 
 
+bool tcg_cflags_has(CPUState *cpu, uint32_t flags)
+{
+    return cpu->tcg_cflags & flags;
+}
+
+void tcg_cflags_set(CPUState *cpu, uint32_t flags)
+{
+    cpu->tcg_cflags |= flags;
+}
+
 uint32_t curr_cflags(CPUState *cpu)
 uint32_t curr_cflags(CPUState *cpu)
 {
 {
     uint32_t cflags = cpu->tcg_cflags;
     uint32_t cflags = cpu->tcg_cflags;
@@ -160,7 +168,7 @@ uint32_t curr_cflags(CPUState *cpu)
      */
      */
     if (unlikely(cpu->singlestep_enabled)) {
     if (unlikely(cpu->singlestep_enabled)) {
         cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
         cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
-    } else if (singlestep) {
+    } else if (qatomic_read(&one_insn_per_tb)) {
         cflags |= CF_NO_GOTO_TB | 1;
         cflags |= CF_NO_GOTO_TB | 1;
     } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
     } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
         cflags |= CF_NO_GOTO_TB;
         cflags |= CF_NO_GOTO_TB;
@@ -170,13 +178,12 @@ uint32_t curr_cflags(CPUState *cpu)
 }
 }
 
 
 struct tb_desc {
 struct tb_desc {
-    target_ulong pc;
-    target_ulong cs_base;
+    vaddr pc;
+    uint64_t cs_base;
     CPUArchState *env;
     CPUArchState *env;
     tb_page_addr_t page_addr0;
     tb_page_addr_t page_addr0;
     uint32_t flags;
     uint32_t flags;
     uint32_t cflags;
     uint32_t cflags;
-    uint32_t trace_vcpu_dstate;
 };
 };
 
 
 static bool tb_lookup_cmp(const void *p, const void *d)
 static bool tb_lookup_cmp(const void *p, const void *d)
@@ -188,7 +195,6 @@ static bool tb_lookup_cmp(const void *p, const void *d)
         tb_page_addr0(tb) == desc->page_addr0 &&
         tb_page_addr0(tb) == desc->page_addr0 &&
         tb->cs_base == desc->cs_base &&
         tb->cs_base == desc->cs_base &&
         tb->flags == desc->flags &&
         tb->flags == desc->flags &&
-        tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
         tb_cflags(tb) == desc->cflags) {
         tb_cflags(tb) == desc->cflags) {
         /* check next page if needed */
         /* check next page if needed */
         tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
         tb_page_addr_t tb_phys_page1 = tb_page_addr1(tb);
@@ -196,7 +202,7 @@ static bool tb_lookup_cmp(const void *p, const void *d)
             return true;
             return true;
         } else {
         } else {
             tb_page_addr_t phys_page1;
             tb_page_addr_t phys_page1;
-            target_ulong virt_page1;
+            vaddr virt_page1;
 
 
             /*
             /*
              * We know that the first page matched, and an otherwise valid TB
              * We know that the first page matched, and an otherwise valid TB
@@ -217,19 +223,18 @@ static bool tb_lookup_cmp(const void *p, const void *d)
     return false;
     return false;
 }
 }
 
 
-static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
-                                          target_ulong cs_base, uint32_t flags,
+static TranslationBlock *tb_htable_lookup(CPUState *cpu, vaddr pc,
+                                          uint64_t cs_base, uint32_t flags,
                                           uint32_t cflags)
                                           uint32_t cflags)
 {
 {
     tb_page_addr_t phys_pc;
     tb_page_addr_t phys_pc;
     struct tb_desc desc;
     struct tb_desc desc;
     uint32_t h;
     uint32_t h;
 
 
-    desc.env = cpu->env_ptr;
+    desc.env = cpu_env(cpu);
     desc.cs_base = cs_base;
     desc.cs_base = cs_base;
     desc.flags = flags;
     desc.flags = flags;
     desc.cflags = cflags;
     desc.cflags = cflags;
-    desc.trace_vcpu_dstate = *cpu->trace_dstate;
     desc.pc = pc;
     desc.pc = pc;
     phys_pc = get_page_addr_code(desc.env, pc);
     phys_pc = get_page_addr_code(desc.env, pc);
     if (phys_pc == -1) {
     if (phys_pc == -1) {
@@ -237,14 +242,14 @@ static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
     }
     }
     desc.page_addr0 = phys_pc;
     desc.page_addr0 = phys_pc;
     h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
     h = tb_hash_func(phys_pc, (cflags & CF_PCREL ? 0 : pc),
-                     flags, cflags, *cpu->trace_dstate);
+                     flags, cs_base, cflags);
     return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
     return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
 }
 }
 
 
 /* Might cause an exception, so have a longjmp destination ready */
 /* Might cause an exception, so have a longjmp destination ready */
-static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
-                                          target_ulong cs_base,
-                                          uint32_t flags, uint32_t cflags)
+static inline TranslationBlock *tb_lookup(CPUState *cpu, vaddr pc,
+                                          uint64_t cs_base, uint32_t flags,
+                                          uint32_t cflags)
 {
 {
     TranslationBlock *tb;
     TranslationBlock *tb;
     CPUJumpCache *jc;
     CPUJumpCache *jc;
@@ -256,59 +261,42 @@ static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
     hash = tb_jmp_cache_hash_func(pc);
     hash = tb_jmp_cache_hash_func(pc);
     jc = cpu->tb_jmp_cache;
     jc = cpu->tb_jmp_cache;
 
 
-    if (cflags & CF_PCREL) {
-        /* Use acquire to ensure current load of pc from jc. */
-        tb = qatomic_load_acquire(&jc->array[hash].tb);
-
-        if (likely(tb &&
-                   jc->array[hash].pc == pc &&
-                   tb->cs_base == cs_base &&
-                   tb->flags == flags &&
-                   tb->trace_vcpu_dstate == *cpu->trace_dstate &&
-                   tb_cflags(tb) == cflags)) {
-            return tb;
-        }
-        tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
-        if (tb == NULL) {
-            return NULL;
-        }
-        jc->array[hash].pc = pc;
-        /* Ensure pc is written first. */
-        qatomic_store_release(&jc->array[hash].tb, tb);
-    } else {
-        /* Use rcu_read to ensure current load of pc from *tb. */
-        tb = qatomic_rcu_read(&jc->array[hash].tb);
-
-        if (likely(tb &&
-                   tb->pc == pc &&
-                   tb->cs_base == cs_base &&
-                   tb->flags == flags &&
-                   tb->trace_vcpu_dstate == *cpu->trace_dstate &&
-                   tb_cflags(tb) == cflags)) {
-            return tb;
-        }
-        tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
-        if (tb == NULL) {
-            return NULL;
-        }
-        /* Use the pc value already stored in tb->pc. */
-        qatomic_set(&jc->array[hash].tb, tb);
+    tb = qatomic_read(&jc->array[hash].tb);
+    if (likely(tb &&
+               jc->array[hash].pc == pc &&
+               tb->cs_base == cs_base &&
+               tb->flags == flags &&
+               tb_cflags(tb) == cflags)) {
+        goto hit;
+    }
+
+    tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
+    if (tb == NULL) {
+        return NULL;
     }
     }
 
 
+    jc->array[hash].pc = pc;
+    qatomic_set(&jc->array[hash].tb, tb);
+
+hit:
+    /*
+     * As long as tb is not NULL, the contents are consistent.  Therefore,
+     * the virtual PC has to match for non-CF_PCREL translations.
+     */
+    assert((tb_cflags(tb) & CF_PCREL) || tb->pc == pc);
     return tb;
     return tb;
 }
 }
 
 
-static void log_cpu_exec(target_ulong pc, CPUState *cpu,
+static void log_cpu_exec(vaddr pc, CPUState *cpu,
                          const TranslationBlock *tb)
                          const TranslationBlock *tb)
 {
 {
     if (qemu_log_in_addr_range(pc)) {
     if (qemu_log_in_addr_range(pc)) {
         qemu_log_mask(CPU_LOG_EXEC,
         qemu_log_mask(CPU_LOG_EXEC,
-                      "Trace %d: %p [" TARGET_FMT_lx
-                      "/" TARGET_FMT_lx "/%08x/%08x] %s\n",
+                      "Trace %d: %p [%08" PRIx64
+                      "/%016" VADDR_PRIx "/%08x/%08x] %s\n",
                       cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
                       cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
                       tb->flags, tb->cflags, lookup_symbol(pc));
                       tb->flags, tb->cflags, lookup_symbol(pc));
 
 
-#if defined(DEBUG_DISAS)
         if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
         if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
             FILE *logfile = qemu_log_trylock();
             FILE *logfile = qemu_log_trylock();
             if (logfile) {
             if (logfile) {
@@ -320,15 +308,17 @@ static void log_cpu_exec(target_ulong pc, CPUState *cpu,
 #if defined(TARGET_I386)
 #if defined(TARGET_I386)
                 flags |= CPU_DUMP_CCOP;
                 flags |= CPU_DUMP_CCOP;
 #endif
 #endif
+                if (qemu_loglevel_mask(CPU_LOG_TB_VPU)) {
+                    flags |= CPU_DUMP_VPU;
+                }
                 cpu_dump_state(cpu, logfile, flags);
                 cpu_dump_state(cpu, logfile, flags);
                 qemu_log_unlock(logfile);
                 qemu_log_unlock(logfile);
             }
             }
         }
         }
-#endif /* DEBUG_DISAS */
     }
     }
 }
 }
 
 
-static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
+static bool check_for_breakpoints_slow(CPUState *cpu, vaddr pc,
                                        uint32_t *cflags)
                                        uint32_t *cflags)
 {
 {
     CPUBreakpoint *bp;
     CPUBreakpoint *bp;
@@ -361,9 +351,9 @@ static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
 #ifdef CONFIG_USER_ONLY
 #ifdef CONFIG_USER_ONLY
                 g_assert_not_reached();
                 g_assert_not_reached();
 #else
 #else
-                CPUClass *cc = CPU_GET_CLASS(cpu);
-                assert(cc->tcg_ops->debug_check_breakpoint);
-                match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
+                const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+                assert(tcg_ops->debug_check_breakpoint);
+                match_bp = tcg_ops->debug_check_breakpoint(cpu);
 #endif
 #endif
             }
             }
 
 
@@ -389,12 +379,12 @@ static bool check_for_breakpoints_slow(CPUState *cpu, target_ulong pc,
      * breakpoints are removed.
      * breakpoints are removed.
      */
      */
     if (match_page) {
     if (match_page) {
-        *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
+        *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | CF_BP_PAGE | 1;
     }
     }
     return false;
     return false;
 }
 }
 
 
-static inline bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
+static inline bool check_for_breakpoints(CPUState *cpu, vaddr pc,
                                          uint32_t *cflags)
                                          uint32_t *cflags)
 {
 {
     return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
     return unlikely(!QTAILQ_EMPTY(&cpu->breakpoints)) &&
@@ -413,9 +403,18 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
 {
 {
     CPUState *cpu = env_cpu(env);
     CPUState *cpu = env_cpu(env);
     TranslationBlock *tb;
     TranslationBlock *tb;
-    target_ulong cs_base, pc;
+    vaddr pc;
+    uint64_t cs_base;
     uint32_t flags, cflags;
     uint32_t flags, cflags;
 
 
+    /*
+     * By definition we've just finished a TB, so I/O is OK.
+     * Avoid the possibility of calling cpu_io_recompile() if
+     * a page table walk triggered by tb_lookup() calling
+     * probe_access_internal() happens to touch an MMIO device.
+     * The next TB, if we chain to it, will clear the flag again.
+     */
+    cpu->neg.can_do_io = true;
     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
     cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
 
 
     cflags = curr_cflags(cpu);
     cflags = curr_cflags(cpu);
@@ -448,7 +447,6 @@ const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
 static inline TranslationBlock * QEMU_DISABLE_CFI
 static inline TranslationBlock * QEMU_DISABLE_CFI
 cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
 cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
 {
 {
-    CPUArchState *env = cpu->env_ptr;
     uintptr_t ret;
     uintptr_t ret;
     TranslationBlock *last_tb;
     TranslationBlock *last_tb;
     const void *tb_ptr = itb->tc.ptr;
     const void *tb_ptr = itb->tc.ptr;
@@ -458,8 +456,8 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
     }
     }
 
 
     qemu_thread_jit_execute();
     qemu_thread_jit_execute();
-    ret = tcg_qemu_tb_exec(env, tb_ptr);
-    cpu->can_do_io = 1;
+    ret = tcg_qemu_tb_exec(cpu_env(cpu), tb_ptr);
+    cpu->neg.can_do_io = true;
     qemu_plugin_disable_mem_helpers(cpu);
     qemu_plugin_disable_mem_helpers(cpu);
     /*
     /*
      * TODO: Delay swapping back to the read-write region of the TB
      * TODO: Delay swapping back to the read-write region of the TB
@@ -479,20 +477,21 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
          * counter hit zero); we must restore the guest PC to the address
          * counter hit zero); we must restore the guest PC to the address
          * of the start of the TB.
          * of the start of the TB.
          */
          */
-        CPUClass *cc = CPU_GET_CLASS(cpu);
+        CPUClass *cc = cpu->cc;
+        const TCGCPUOps *tcg_ops = cc->tcg_ops;
 
 
-        if (cc->tcg_ops->synchronize_from_tb) {
-            cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
+        if (tcg_ops->synchronize_from_tb) {
+            tcg_ops->synchronize_from_tb(cpu, last_tb);
         } else {
         } else {
             tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
             tcg_debug_assert(!(tb_cflags(last_tb) & CF_PCREL));
             assert(cc->set_pc);
             assert(cc->set_pc);
             cc->set_pc(cpu, last_tb->pc);
             cc->set_pc(cpu, last_tb->pc);
         }
         }
         if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
         if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
-            target_ulong pc = log_pc(cpu, last_tb);
+            vaddr pc = log_pc(cpu, last_tb);
             if (qemu_log_in_addr_range(pc)) {
             if (qemu_log_in_addr_range(pc)) {
-                qemu_log("Stopped execution of TB chain before %p ["
-                         TARGET_FMT_lx "] %s\n",
+                qemu_log("Stopped execution of TB chain before %p [%016"
+                         VADDR_PRIx "] %s\n",
                          last_tb->tc.ptr, pc, lookup_symbol(pc));
                          last_tb->tc.ptr, pc, lookup_symbol(pc));
             }
             }
         }
         }
@@ -514,27 +513,65 @@ cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
 
 
 static void cpu_exec_enter(CPUState *cpu)
 static void cpu_exec_enter(CPUState *cpu)
 {
 {
-    CPUClass *cc = CPU_GET_CLASS(cpu);
+    const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
 
 
-    if (cc->tcg_ops->cpu_exec_enter) {
-        cc->tcg_ops->cpu_exec_enter(cpu);
+    if (tcg_ops->cpu_exec_enter) {
+        tcg_ops->cpu_exec_enter(cpu);
     }
     }
 }
 }
 
 
 static void cpu_exec_exit(CPUState *cpu)
 static void cpu_exec_exit(CPUState *cpu)
 {
 {
-    CPUClass *cc = CPU_GET_CLASS(cpu);
+    const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
 
 
-    if (cc->tcg_ops->cpu_exec_exit) {
-        cc->tcg_ops->cpu_exec_exit(cpu);
+    if (tcg_ops->cpu_exec_exit) {
+        tcg_ops->cpu_exec_exit(cpu);
     }
     }
 }
 }
 
 
+static void cpu_exec_longjmp_cleanup(CPUState *cpu)
+{
+    /* Non-buggy compilers preserve this; assert the correct value. */
+    g_assert(cpu == current_cpu);
+
+#ifdef CONFIG_USER_ONLY
+    clear_helper_retaddr();
+    if (have_mmap_lock()) {
+        mmap_unlock();
+    }
+#else
+    /*
+     * For softmmu, a tlb_fill fault during translation will land here,
+     * and we need to release any page locks held.  In system mode we
+     * have one tcg_ctx per thread, so we know it was this cpu doing
+     * the translation.
+     *
+     * Alternative 1: Install a cleanup to be called via an exception
+     * handling safe longjmp.  It seems plausible that all our hosts
+     * support such a thing.  We'd have to properly register unwind info
+     * for the JIT for EH, rather that just for GDB.
+     *
+     * Alternative 2: Set and restore cpu->jmp_env in tb_gen_code to
+     * capture the cpu_loop_exit longjmp, perform the cleanup, and
+     * jump again to arrive here.
+     */
+    if (tcg_ctx->gen_tb) {
+        tb_unlock_pages(tcg_ctx->gen_tb);
+        tcg_ctx->gen_tb = NULL;
+    }
+#endif
+    if (bql_locked()) {
+        bql_unlock();
+    }
+    assert_no_pages_locked();
+}
+
 void cpu_exec_step_atomic(CPUState *cpu)
 void cpu_exec_step_atomic(CPUState *cpu)
 {
 {
-    CPUArchState *env = cpu->env_ptr;
+    CPUArchState *env = cpu_env(cpu);
     TranslationBlock *tb;
     TranslationBlock *tb;
-    target_ulong cs_base, pc;
+    vaddr pc;
+    uint64_t cs_base;
     uint32_t flags, cflags;
     uint32_t flags, cflags;
     int tb_exit;
     int tb_exit;
 
 
@@ -571,16 +608,7 @@ void cpu_exec_step_atomic(CPUState *cpu)
         cpu_tb_exec(cpu, tb, &tb_exit);
         cpu_tb_exec(cpu, tb, &tb_exit);
         cpu_exec_exit(cpu);
         cpu_exec_exit(cpu);
     } else {
     } else {
-#ifndef CONFIG_SOFTMMU
-        clear_helper_retaddr();
-        if (have_mmap_lock()) {
-            mmap_unlock();
-        }
-#endif
-        if (qemu_mutex_iothread_locked()) {
-            qemu_mutex_unlock_iothread();
-        }
-        assert_no_pages_locked();
+        cpu_exec_longjmp_cleanup(cpu);
     }
     }
 
 
     /*
     /*
@@ -651,16 +679,10 @@ static inline bool cpu_handle_halt(CPUState *cpu)
 {
 {
 #ifndef CONFIG_USER_ONLY
 #ifndef CONFIG_USER_ONLY
     if (cpu->halted) {
     if (cpu->halted) {
-#if defined(TARGET_I386)
-        if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
-            X86CPU *x86_cpu = X86_CPU(cpu);
-            qemu_mutex_lock_iothread();
-            apic_poll_irq(x86_cpu->apic_state);
-            cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
-            qemu_mutex_unlock_iothread();
-        }
-#endif /* TARGET_I386 */
-        if (!cpu_has_work(cpu)) {
+        const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+        bool leave_halt = tcg_ops->cpu_exec_halt(cpu);
+
+        if (!leave_halt) {
             return true;
             return true;
         }
         }
 
 
@@ -673,7 +695,7 @@ static inline bool cpu_handle_halt(CPUState *cpu)
 
 
 static inline void cpu_handle_debug_exception(CPUState *cpu)
 static inline void cpu_handle_debug_exception(CPUState *cpu)
 {
 {
-    CPUClass *cc = CPU_GET_CLASS(cpu);
+    const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
     CPUWatchpoint *wp;
     CPUWatchpoint *wp;
 
 
     if (!cpu->watchpoint_hit) {
     if (!cpu->watchpoint_hit) {
@@ -682,8 +704,8 @@ static inline void cpu_handle_debug_exception(CPUState *cpu)
         }
         }
     }
     }
 
 
-    if (cc->tcg_ops->debug_excp_handler) {
-        cc->tcg_ops->debug_excp_handler(cpu);
+    if (tcg_ops->debug_excp_handler) {
+        tcg_ops->debug_excp_handler(cpu);
     }
     }
 }
 }
 
 
@@ -692,7 +714,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
     if (cpu->exception_index < 0) {
     if (cpu->exception_index < 0) {
 #ifndef CONFIG_USER_ONLY
 #ifndef CONFIG_USER_ONLY
         if (replay_has_exception()
         if (replay_has_exception()
-            && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
+            && cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0) {
             /* Execute just one insn to trigger exception pending in the log */
             /* Execute just one insn to trigger exception pending in the log */
             cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
             cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
                 | CF_NOIRQ | 1;
                 | CF_NOIRQ | 1;
@@ -700,6 +722,7 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
 #endif
 #endif
         return false;
         return false;
     }
     }
+
     if (cpu->exception_index >= EXCP_INTERRUPT) {
     if (cpu->exception_index >= EXCP_INTERRUPT) {
         /* exit request from the cpu execution loop */
         /* exit request from the cpu execution loop */
         *ret = cpu->exception_index;
         *ret = cpu->exception_index;
@@ -708,62 +731,59 @@ static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
         }
         }
         cpu->exception_index = -1;
         cpu->exception_index = -1;
         return true;
         return true;
-    } else {
+    }
+
 #if defined(CONFIG_USER_ONLY)
 #if defined(CONFIG_USER_ONLY)
-        /* if user mode only, we simulate a fake exception
-           which will be handled outside the cpu execution
-           loop */
+    /*
+     * If user mode only, we simulate a fake exception which will be
+     * handled outside the cpu execution loop.
+     */
 #if defined(TARGET_I386)
 #if defined(TARGET_I386)
-        CPUClass *cc = CPU_GET_CLASS(cpu);
-        cc->tcg_ops->fake_user_interrupt(cpu);
+    const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+    tcg_ops->fake_user_interrupt(cpu);
 #endif /* TARGET_I386 */
 #endif /* TARGET_I386 */
-        *ret = cpu->exception_index;
-        cpu->exception_index = -1;
-        return true;
+    *ret = cpu->exception_index;
+    cpu->exception_index = -1;
+    return true;
 #else
 #else
-        if (replay_exception()) {
-            CPUClass *cc = CPU_GET_CLASS(cpu);
-            qemu_mutex_lock_iothread();
-            cc->tcg_ops->do_interrupt(cpu);
-            qemu_mutex_unlock_iothread();
-            cpu->exception_index = -1;
-
-            if (unlikely(cpu->singlestep_enabled)) {
-                /*
-                 * After processing the exception, ensure an EXCP_DEBUG is
-                 * raised when single-stepping so that GDB doesn't miss the
-                 * next instruction.
-                 */
-                *ret = EXCP_DEBUG;
-                cpu_handle_debug_exception(cpu);
-                return true;
-            }
-        } else if (!replay_has_interrupt()) {
-            /* give a chance to iothread in replay mode */
-            *ret = EXCP_INTERRUPT;
+    if (replay_exception()) {
+        const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
+
+        bql_lock();
+        tcg_ops->do_interrupt(cpu);
+        bql_unlock();
+        cpu->exception_index = -1;
+
+        if (unlikely(cpu->singlestep_enabled)) {
+            /*
+             * After processing the exception, ensure an EXCP_DEBUG is
+             * raised when single-stepping so that GDB doesn't miss the
+             * next instruction.
+             */
+            *ret = EXCP_DEBUG;
+            cpu_handle_debug_exception(cpu);
             return true;
             return true;
         }
         }
-#endif
+    } else if (!replay_has_interrupt()) {
+        /* give a chance to iothread in replay mode */
+        *ret = EXCP_INTERRUPT;
+        return true;
     }
     }
+#endif
 
 
     return false;
     return false;
 }
 }
 
 
-#ifndef CONFIG_USER_ONLY
-/*
- * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
- * "real" interrupt event later. It does not need to be recorded for
- * replay purposes.
- */
-static inline bool need_replay_interrupt(int interrupt_request)
+static inline bool icount_exit_request(CPUState *cpu)
 {
 {
-#if defined(TARGET_I386)
-    return !(interrupt_request & CPU_INTERRUPT_POLL);
-#else
-    return true;
-#endif
+    if (!icount_enabled()) {
+        return false;
+    }
+    if (cpu->cflags_next_tb != -1 && !(cpu->cflags_next_tb & CF_USE_ICOUNT)) {
+        return false;
+    }
+    return cpu->neg.icount_decr.u16.low + cpu->icount_extra == 0;
 }
 }
-#endif /* !CONFIG_USER_ONLY */
 
 
 static inline bool cpu_handle_interrupt(CPUState *cpu,
 static inline bool cpu_handle_interrupt(CPUState *cpu,
                                         TranslationBlock **last_tb)
                                         TranslationBlock **last_tb)
@@ -782,11 +802,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
      * Ensure zeroing happens before reading cpu->exit_request or
      * Ensure zeroing happens before reading cpu->exit_request or
      * cpu->interrupt_request (see also smp_wmb in cpu_exit())
      * cpu->interrupt_request (see also smp_wmb in cpu_exit())
      */
      */
-    qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
+    qatomic_set_mb(&cpu->neg.icount_decr.u16.high, 0);
 
 
     if (unlikely(qatomic_read(&cpu->interrupt_request))) {
     if (unlikely(qatomic_read(&cpu->interrupt_request))) {
         int interrupt_request;
         int interrupt_request;
-        qemu_mutex_lock_iothread();
+        bql_lock();
         interrupt_request = cpu->interrupt_request;
         interrupt_request = cpu->interrupt_request;
         if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
         if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
             /* Mask out external interrupts for this step. */
             /* Mask out external interrupts for this step. */
@@ -795,7 +815,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
         if (interrupt_request & CPU_INTERRUPT_DEBUG) {
         if (interrupt_request & CPU_INTERRUPT_DEBUG) {
             cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
             cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
             cpu->exception_index = EXCP_DEBUG;
             cpu->exception_index = EXCP_DEBUG;
-            qemu_mutex_unlock_iothread();
+            bql_unlock();
             return true;
             return true;
         }
         }
 #if !defined(CONFIG_USER_ONLY)
 #if !defined(CONFIG_USER_ONLY)
@@ -806,7 +826,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
             cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
             cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
             cpu->halted = 1;
             cpu->halted = 1;
             cpu->exception_index = EXCP_HLT;
             cpu->exception_index = EXCP_HLT;
-            qemu_mutex_unlock_iothread();
+            bql_unlock();
             return true;
             return true;
         }
         }
 #if defined(TARGET_I386)
 #if defined(TARGET_I386)
@@ -817,14 +837,14 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
             cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
             do_cpu_init(x86_cpu);
             do_cpu_init(x86_cpu);
             cpu->exception_index = EXCP_HALTED;
             cpu->exception_index = EXCP_HALTED;
-            qemu_mutex_unlock_iothread();
+            bql_unlock();
             return true;
             return true;
         }
         }
 #else
 #else
         else if (interrupt_request & CPU_INTERRUPT_RESET) {
         else if (interrupt_request & CPU_INTERRUPT_RESET) {
             replay_interrupt();
             replay_interrupt();
             cpu_reset(cpu);
             cpu_reset(cpu);
-            qemu_mutex_unlock_iothread();
+            bql_unlock();
             return true;
             return true;
         }
         }
 #endif /* !TARGET_I386 */
 #endif /* !TARGET_I386 */
@@ -833,11 +853,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
            True when it is, and we should restart on a new TB,
            True when it is, and we should restart on a new TB,
            and via longjmp via cpu_loop_exit.  */
            and via longjmp via cpu_loop_exit.  */
         else {
         else {
-            CPUClass *cc = CPU_GET_CLASS(cpu);
+            const TCGCPUOps *tcg_ops = cpu->cc->tcg_ops;
 
 
-            if (cc->tcg_ops->cpu_exec_interrupt &&
-                cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
-                if (need_replay_interrupt(interrupt_request)) {
+            if (tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
+                if (!tcg_ops->need_replay_interrupt ||
+                    tcg_ops->need_replay_interrupt(interrupt_request)) {
                     replay_interrupt();
                     replay_interrupt();
                 }
                 }
                 /*
                 /*
@@ -847,7 +867,7 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
                  */
                  */
                 if (unlikely(cpu->singlestep_enabled)) {
                 if (unlikely(cpu->singlestep_enabled)) {
                     cpu->exception_index = EXCP_DEBUG;
                     cpu->exception_index = EXCP_DEBUG;
-                    qemu_mutex_unlock_iothread();
+                    bql_unlock();
                     return true;
                     return true;
                 }
                 }
                 cpu->exception_index = -1;
                 cpu->exception_index = -1;
@@ -866,14 +886,11 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
         }
         }
 
 
         /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
         /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
-        qemu_mutex_unlock_iothread();
+        bql_unlock();
     }
     }
 
 
     /* Finally, check if we need to exit to the main loop.  */
     /* Finally, check if we need to exit to the main loop.  */
-    if (unlikely(qatomic_read(&cpu->exit_request))
-        || (icount_enabled()
-            && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
-            && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
+    if (unlikely(qatomic_read(&cpu->exit_request)) || icount_exit_request(cpu)) {
         qatomic_set(&cpu->exit_request, 0);
         qatomic_set(&cpu->exit_request, 0);
         if (cpu->exception_index == -1) {
         if (cpu->exception_index == -1) {
             cpu->exception_index = EXCP_INTERRUPT;
             cpu->exception_index = EXCP_INTERRUPT;
@@ -885,11 +902,9 @@ static inline bool cpu_handle_interrupt(CPUState *cpu,
 }
 }
 
 
 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
 static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
-                                    target_ulong pc,
-                                    TranslationBlock **last_tb, int *tb_exit)
+                                    vaddr pc, TranslationBlock **last_tb,
+                                    int *tb_exit)
 {
 {
-    int32_t insns_left;
-
     trace_exec_tb(tb, pc);
     trace_exec_tb(tb, pc);
     tb = cpu_tb_exec(cpu, tb, tb_exit);
     tb = cpu_tb_exec(cpu, tb, tb_exit);
     if (*tb_exit != TB_EXIT_REQUESTED) {
     if (*tb_exit != TB_EXIT_REQUESTED) {
@@ -898,8 +913,7 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
     }
     }
 
 
     *last_tb = NULL;
     *last_tb = NULL;
-    insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
-    if (insns_left < 0) {
+    if (cpu_loop_exit_requested(cpu)) {
         /* Something asked us to stop executing chained TBs; just
         /* Something asked us to stop executing chained TBs; just
          * continue round the main loop. Whatever requested the exit
          * continue round the main loop. Whatever requested the exit
          * will also have set something else (eg exit_request or
          * will also have set something else (eg exit_request or
@@ -916,8 +930,8 @@ static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
     /* Ensure global icount has gone forward */
     /* Ensure global icount has gone forward */
     icount_update(cpu);
     icount_update(cpu);
     /* Refill decrementer and continue execution.  */
     /* Refill decrementer and continue execution.  */
-    insns_left = MIN(0xffff, cpu->icount_budget);
-    cpu_neg(cpu)->icount_decr.u16.low = insns_left;
+    int32_t insns_left = MIN(0xffff, cpu->icount_budget);
+    cpu->neg.icount_decr.u16.low = insns_left;
     cpu->icount_extra = cpu->icount_budget - insns_left;
     cpu->icount_extra = cpu->icount_budget - insns_left;
 
 
     /*
     /*
@@ -947,10 +961,11 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
 
 
         while (!cpu_handle_interrupt(cpu, &last_tb)) {
         while (!cpu_handle_interrupt(cpu, &last_tb)) {
             TranslationBlock *tb;
             TranslationBlock *tb;
-            target_ulong cs_base, pc;
+            vaddr pc;
+            uint64_t cs_base;
             uint32_t flags, cflags;
             uint32_t flags, cflags;
 
 
-            cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
+            cpu_get_tb_cpu_state(cpu_env(cpu), &pc, &cs_base, &flags);
 
 
             /*
             /*
              * When requested, use an exact setting for cflags for the next
              * When requested, use an exact setting for cflags for the next
@@ -985,14 +1000,8 @@ cpu_exec_loop(CPUState *cpu, SyncClocks *sc)
                  */
                  */
                 h = tb_jmp_cache_hash_func(pc);
                 h = tb_jmp_cache_hash_func(pc);
                 jc = cpu->tb_jmp_cache;
                 jc = cpu->tb_jmp_cache;
-                if (cflags & CF_PCREL) {
-                    jc->array[h].pc = pc;
-                    /* Ensure pc is written first. */
-                    qatomic_store_release(&jc->array[h].tb, tb);
-                } else {
-                    /* Use the pc value already stored in tb->pc. */
-                    qatomic_set(&jc->array[h].tb, tb);
-                }
+                jc->array[h].pc = pc;
+                qatomic_set(&jc->array[h].tb, tb);
             }
             }
 
 
 #ifndef CONFIG_USER_ONLY
 #ifndef CONFIG_USER_ONLY
@@ -1025,20 +1034,7 @@ static int cpu_exec_setjmp(CPUState *cpu, SyncClocks *sc)
 {
 {
     /* Prepare setjmp context for exception handling. */
     /* Prepare setjmp context for exception handling. */
     if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
     if (unlikely(sigsetjmp(cpu->jmp_env, 0) != 0)) {
-        /* Non-buggy compilers preserve this; assert the correct value. */
-        g_assert(cpu == current_cpu);
-
-#ifndef CONFIG_SOFTMMU
-        clear_helper_retaddr();
-        if (have_mmap_lock()) {
-            mmap_unlock();
-        }
-#endif
-        if (qemu_mutex_iothread_locked()) {
-            qemu_mutex_unlock_iothread();
-        }
-
-        assert_no_pages_locked();
+        cpu_exec_longjmp_cleanup(cpu);
     }
     }
 
 
     return cpu_exec_loop(cpu, sc);
     return cpu_exec_loop(cpu, sc);
@@ -1056,7 +1052,7 @@ int cpu_exec(CPUState *cpu)
         return EXCP_HALTED;
         return EXCP_HALTED;
     }
     }
 
 
-    rcu_read_lock();
+    RCU_READ_LOCK_GUARD();
     cpu_exec_enter(cpu);
     cpu_exec_enter(cpu);
 
 
     /*
     /*
@@ -1070,18 +1066,20 @@ int cpu_exec(CPUState *cpu)
     ret = cpu_exec_setjmp(cpu, &sc);
     ret = cpu_exec_setjmp(cpu, &sc);
 
 
     cpu_exec_exit(cpu);
     cpu_exec_exit(cpu);
-    rcu_read_unlock();
-
     return ret;
     return ret;
 }
 }
 
 
-void tcg_exec_realizefn(CPUState *cpu, Error **errp)
+bool tcg_exec_realizefn(CPUState *cpu, Error **errp)
 {
 {
     static bool tcg_target_initialized;
     static bool tcg_target_initialized;
-    CPUClass *cc = CPU_GET_CLASS(cpu);
 
 
     if (!tcg_target_initialized) {
     if (!tcg_target_initialized) {
-        cc->tcg_ops->initialize();
+        /* Check mandatory TCGCPUOps handlers */
+#ifndef CONFIG_USER_ONLY
+        assert(cpu->cc->tcg_ops->cpu_exec_halt);
+        assert(cpu->cc->tcg_ops->cpu_exec_interrupt);
+#endif /* !CONFIG_USER_ONLY */
+        cpu->cc->tcg_ops->initialize();
         tcg_target_initialized = true;
         tcg_target_initialized = true;
     }
     }
 
 
@@ -1091,6 +1089,8 @@ void tcg_exec_realizefn(CPUState *cpu, Error **errp)
     tcg_iommu_init_notifier_list(cpu);
     tcg_iommu_init_notifier_list(cpu);
 #endif /* !CONFIG_USER_ONLY */
 #endif /* !CONFIG_USER_ONLY */
     /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
     /* qemu_plugin_vcpu_init_hook delayed until cpu_index assigned. */
+
+    return true;
 }
 }
 
 
 /* undo the initializations in reverse order */
 /* undo the initializations in reverse order */

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 258 - 345
accel/tcg/cputlb.c


+ 32 - 28
softmmu/icount.c → accel/tcg/icount-common.c

@@ -27,7 +27,6 @@
 #include "migration/vmstate.h"
 #include "migration/vmstate.h"
 #include "qapi/error.h"
 #include "qapi/error.h"
 #include "qemu/error-report.h"
 #include "qemu/error-report.h"
-#include "exec/exec-all.h"
 #include "sysemu/cpus.h"
 #include "sysemu/cpus.h"
 #include "sysemu/qtest.h"
 #include "sysemu/qtest.h"
 #include "qemu/main-loop.h"
 #include "qemu/main-loop.h"
@@ -38,7 +37,7 @@
 #include "hw/core/cpu.h"
 #include "hw/core/cpu.h"
 #include "sysemu/cpu-timers.h"
 #include "sysemu/cpu-timers.h"
 #include "sysemu/cpu-throttle.h"
 #include "sysemu/cpu-throttle.h"
-#include "timers-state.h"
+#include "sysemu/cpu-timers-internal.h"
 
 
 /*
 /*
  * ICOUNT: Instruction Counter
  * ICOUNT: Instruction Counter
@@ -50,21 +49,19 @@ static bool icount_sleep = true;
 /* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
 /* Arbitrarily pick 1MIPS as the minimum allowable speed.  */
 #define MAX_ICOUNT_SHIFT 10
 #define MAX_ICOUNT_SHIFT 10
 
 
-/*
- * 0 = Do not count executed instructions.
- * 1 = Fixed conversion of insn to ns via "shift" option
- * 2 = Runtime adaptive algorithm to compute shift
- */
-int use_icount;
+/* Do not count executed instructions */
+ICountMode use_icount = ICOUNT_DISABLED;
 
 
 static void icount_enable_precise(void)
 static void icount_enable_precise(void)
 {
 {
-    use_icount = 1;
+    /* Fixed conversion of insn to ns via "shift" option */
+    use_icount = ICOUNT_PRECISE;
 }
 }
 
 
 static void icount_enable_adaptive(void)
 static void icount_enable_adaptive(void)
 {
 {
-    use_icount = 2;
+    /* Runtime adaptive algorithm to compute shift */
+    use_icount = ICOUNT_ADAPTATIVE;
 }
 }
 
 
 /*
 /*
@@ -75,7 +72,7 @@ static void icount_enable_adaptive(void)
 static int64_t icount_get_executed(CPUState *cpu)
 static int64_t icount_get_executed(CPUState *cpu)
 {
 {
     return (cpu->icount_budget -
     return (cpu->icount_budget -
-            (cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra));
+            (cpu->neg.icount_decr.u16.low + cpu->icount_extra));
 }
 }
 
 
 /*
 /*
@@ -111,7 +108,7 @@ static int64_t icount_get_raw_locked(void)
     CPUState *cpu = current_cpu;
     CPUState *cpu = current_cpu;
 
 
     if (cpu && cpu->running) {
     if (cpu && cpu->running) {
-        if (!cpu->can_do_io) {
+        if (!cpu->neg.can_do_io) {
             error_report("Bad icount read");
             error_report("Bad icount read");
             exit(1);
             exit(1);
         }
         }
@@ -257,13 +254,18 @@ static void icount_warp_rt(void)
         int64_t warp_delta;
         int64_t warp_delta;
 
 
         warp_delta = clock - timers_state.vm_clock_warp_start;
         warp_delta = clock - timers_state.vm_clock_warp_start;
-        if (icount_enabled() == 2) {
+        if (icount_enabled() == ICOUNT_ADAPTATIVE) {
             /*
             /*
-             * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too
-             * far ahead of real time.
+             * In adaptive mode, do not let QEMU_CLOCK_VIRTUAL run too far
+             * ahead of real time (it might already be ahead so careful not
+             * to go backwards).
              */
              */
             int64_t cur_icount = icount_get_locked();
             int64_t cur_icount = icount_get_locked();
             int64_t delta = clock - cur_icount;
             int64_t delta = clock - cur_icount;
+
+            if (delta < 0) {
+                delta = 0;
+            }
             warp_delta = MIN(warp_delta, delta);
             warp_delta = MIN(warp_delta, delta);
         }
         }
         qatomic_set_i64(&timers_state.qemu_icount_bias,
         qatomic_set_i64(&timers_state.qemu_icount_bias,
@@ -320,7 +322,7 @@ void icount_start_warp_timer(void)
              * vCPU is sleeping and warp can't be started.
              * vCPU is sleeping and warp can't be started.
              * It is probably a race condition: notification sent
              * It is probably a race condition: notification sent
              * to vCPU was processed in advance and vCPU went to sleep.
              * to vCPU was processed in advance and vCPU went to sleep.
-             * Therefore we have to wake it up for doing someting.
+             * Therefore we have to wake it up for doing something.
              */
              */
             if (replay_has_event()) {
             if (replay_has_event()) {
                 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
                 qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
@@ -334,10 +336,8 @@ void icount_start_warp_timer(void)
     deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
     deadline = qemu_clock_deadline_ns_all(QEMU_CLOCK_VIRTUAL,
                                           ~QEMU_TIMER_ATTR_EXTERNAL);
                                           ~QEMU_TIMER_ATTR_EXTERNAL);
     if (deadline < 0) {
     if (deadline < 0) {
-        static bool notified;
-        if (!icount_sleep && !notified) {
-            warn_report("icount sleep disabled and no active timers");
-            notified = true;
+        if (!icount_sleep) {
+            warn_report_once("icount sleep disabled and no active timers");
         }
         }
         return;
         return;
     }
     }
@@ -415,7 +415,7 @@ void icount_account_warp_timer(void)
     icount_warp_rt();
     icount_warp_rt();
 }
 }
 
 
-void icount_configure(QemuOpts *opts, Error **errp)
+bool icount_configure(QemuOpts *opts, Error **errp)
 {
 {
     const char *option = qemu_opt_get(opts, "shift");
     const char *option = qemu_opt_get(opts, "shift");
     bool sleep = qemu_opt_get_bool(opts, "sleep", true);
     bool sleep = qemu_opt_get_bool(opts, "sleep", true);
@@ -425,27 +425,28 @@ void icount_configure(QemuOpts *opts, Error **errp)
     if (!option) {
     if (!option) {
         if (qemu_opt_get(opts, "align") != NULL) {
         if (qemu_opt_get(opts, "align") != NULL) {
             error_setg(errp, "Please specify shift option when using align");
             error_setg(errp, "Please specify shift option when using align");
+            return false;
         }
         }
-        return;
+        return true;
     }
     }
 
 
     if (align && !sleep) {
     if (align && !sleep) {
         error_setg(errp, "align=on and sleep=off are incompatible");
         error_setg(errp, "align=on and sleep=off are incompatible");
-        return;
+        return false;
     }
     }
 
 
     if (strcmp(option, "auto") != 0) {
     if (strcmp(option, "auto") != 0) {
         if (qemu_strtol(option, NULL, 0, &time_shift) < 0
         if (qemu_strtol(option, NULL, 0, &time_shift) < 0
             || time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
             || time_shift < 0 || time_shift > MAX_ICOUNT_SHIFT) {
             error_setg(errp, "icount: Invalid shift value");
             error_setg(errp, "icount: Invalid shift value");
-            return;
+            return false;
         }
         }
     } else if (icount_align_option) {
     } else if (icount_align_option) {
         error_setg(errp, "shift=auto and align=on are incompatible");
         error_setg(errp, "shift=auto and align=on are incompatible");
-        return;
+        return false;
     } else if (!icount_sleep) {
     } else if (!icount_sleep) {
         error_setg(errp, "shift=auto and sleep=off are incompatible");
         error_setg(errp, "shift=auto and sleep=off are incompatible");
-        return;
+        return false;
     }
     }
 
 
     icount_sleep = sleep;
     icount_sleep = sleep;
@@ -459,7 +460,7 @@ void icount_configure(QemuOpts *opts, Error **errp)
     if (time_shift >= 0) {
     if (time_shift >= 0) {
         timers_state.icount_time_shift = time_shift;
         timers_state.icount_time_shift = time_shift;
         icount_enable_precise();
         icount_enable_precise();
-        return;
+        return true;
     }
     }
 
 
     icount_enable_adaptive();
     icount_enable_adaptive();
@@ -487,11 +488,14 @@ void icount_configure(QemuOpts *opts, Error **errp)
     timer_mod(timers_state.icount_vm_timer,
     timer_mod(timers_state.icount_vm_timer,
                    qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
                    qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
                    NANOSECONDS_PER_SECOND / 10);
                    NANOSECONDS_PER_SECOND / 10);
+    return true;
 }
 }
 
 
 void icount_notify_exit(void)
 void icount_notify_exit(void)
 {
 {
-    if (icount_enabled() && current_cpu) {
+    assert(icount_enabled());
+
+    if (current_cpu) {
         qemu_cpu_kick(current_cpu);
         qemu_cpu_kick(current_cpu);
         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
         qemu_clock_notify(QEMU_CLOCK_VIRTUAL);
     }
     }

+ 59 - 0
accel/tcg/internal-common.h

@@ -0,0 +1,59 @@
+/*
+ * Internal execution defines for qemu (target agnostic)
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_INTERNAL_COMMON_H
+#define ACCEL_TCG_INTERNAL_COMMON_H
+
+#include "exec/cpu-common.h"
+#include "exec/translation-block.h"
+
+extern int64_t max_delay;
+extern int64_t max_advance;
+
+extern bool one_insn_per_tb;
+
+/*
+ * Return true if CS is not running in parallel with other cpus, either
+ * because there are no other cpus or we are within an exclusive context.
+ */
+static inline bool cpu_in_serial_context(CPUState *cs)
+{
+    return !tcg_cflags_has(cs, CF_PARALLEL) || cpu_in_exclusive_context(cs);
+}
+
+/**
+ * cpu_plugin_mem_cbs_enabled() - are plugin memory callbacks enabled?
+ * @cs: CPUState pointer
+ *
+ * The memory callbacks are installed if a plugin has instrumented an
+ * instruction for memory. This can be useful to know if you want to
+ * force a slow path for a series of memory accesses.
+ */
+static inline bool cpu_plugin_mem_cbs_enabled(const CPUState *cpu)
+{
+#ifdef CONFIG_PLUGIN
+    return !!cpu->neg.plugin_mem_cbs;
+#else
+    return false;
+#endif
+}
+
+TranslationBlock *tb_gen_code(CPUState *cpu, vaddr pc,
+                              uint64_t cs_base, uint32_t flags,
+                              int cflags);
+void page_init(void);
+void tb_htable_init(void);
+void tb_reset_jump(TranslationBlock *tb, int n);
+TranslationBlock *tb_link_page(TranslationBlock *tb);
+void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
+                               uintptr_t host_pc);
+
+bool tcg_exec_realizefn(CPUState *cpu, Error **errp);
+void tcg_exec_unrealizefn(CPUState *cpu);
+
+#endif

+ 118 - 0
accel/tcg/internal-target.h

@@ -0,0 +1,118 @@
+/*
+ * Internal execution defines for qemu (target specific)
+ *
+ *  Copyright (c) 2003 Fabrice Bellard
+ *
+ * SPDX-License-Identifier: LGPL-2.1-or-later
+ */
+
+#ifndef ACCEL_TCG_INTERNAL_TARGET_H
+#define ACCEL_TCG_INTERNAL_TARGET_H
+
+#include "exec/exec-all.h"
+#include "exec/translate-all.h"
+
+/*
+ * Access to the various translations structures need to be serialised
+ * via locks for consistency.  In user-mode emulation access to the
+ * memory related structures are protected with mmap_lock.
+ * In !user-mode we use per-page locks.
+ */
+#ifdef CONFIG_USER_ONLY
+#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
+#else
+#define assert_memory_lock()
+#endif
+
+#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
+void assert_no_pages_locked(void);
+#else
+static inline void assert_no_pages_locked(void) { }
+#endif
+
+#ifdef CONFIG_USER_ONLY
+static inline void page_table_config_init(void) { }
+#else
+void page_table_config_init(void);
+#endif
+
+#ifdef CONFIG_USER_ONLY
+/*
+ * For user-only, page_protect sets the page read-only.
+ * Since most execution is already on read-only pages, and we'd need to
+ * account for other TBs on the same page, defer undoing any page protection
+ * until we receive the write fault.
+ */
+static inline void tb_lock_page0(tb_page_addr_t p0)
+{
+    page_protect(p0);
+}
+
+static inline void tb_lock_page1(tb_page_addr_t p0, tb_page_addr_t p1)
+{
+    page_protect(p1);
+}
+
+static inline void tb_unlock_page1(tb_page_addr_t p0, tb_page_addr_t p1) { }
+static inline void tb_unlock_pages(TranslationBlock *tb) { }
+#else
+void tb_lock_page0(tb_page_addr_t);
+void tb_lock_page1(tb_page_addr_t, tb_page_addr_t);
+void tb_unlock_page1(tb_page_addr_t, tb_page_addr_t);
+void tb_unlock_pages(TranslationBlock *);
+#endif
+
+#ifdef CONFIG_SOFTMMU
+void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
+                                   unsigned size,
+                                   uintptr_t retaddr);
+G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
+#endif /* CONFIG_SOFTMMU */
+
+bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
+
+/* Return the current PC from CPU, which may be cached in TB. */
+static inline vaddr log_pc(CPUState *cpu, const TranslationBlock *tb)
+{
+    if (tb_cflags(tb) & CF_PCREL) {
+        return cpu->cc->get_pc(cpu);
+    } else {
+        return tb->pc;
+    }
+}
+
+/**
+ * tcg_req_mo:
+ * @type: TCGBar
+ *
+ * Filter @type to the barrier that is required for the guest
+ * memory ordering vs the host memory ordering.  A non-zero
+ * result indicates that some barrier is required.
+ *
+ * If TCG_GUEST_DEFAULT_MO is not defined, assume that the
+ * guest requires strict ordering.
+ *
+ * This is a macro so that it's constant even without optimization.
+ */
+#ifdef TCG_GUEST_DEFAULT_MO
+# define tcg_req_mo(type) \
+    ((type) & TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO)
+#else
+# define tcg_req_mo(type) ((type) & ~TCG_TARGET_DEFAULT_MO)
+#endif
+
+/**
+ * cpu_req_mo:
+ * @type: TCGBar
+ *
+ * If tcg_req_mo indicates a barrier for @type is required
+ * for the guest memory model, issue a host memory barrier.
+ */
+#define cpu_req_mo(type)          \
+    do {                          \
+        if (tcg_req_mo(type)) {   \
+            smp_mb();             \
+        }                         \
+    } while (0)
+
+#endif /* ACCEL_TCG_INTERNAL_H */

+ 0 - 70
accel/tcg/internal.h

@@ -1,70 +0,0 @@
-/*
- * Internal execution defines for qemu
- *
- *  Copyright (c) 2003 Fabrice Bellard
- *
- * SPDX-License-Identifier: LGPL-2.1-or-later
- */
-
-#ifndef ACCEL_TCG_INTERNAL_H
-#define ACCEL_TCG_INTERNAL_H
-
-#include "exec/exec-all.h"
-
-/*
- * Access to the various translations structures need to be serialised
- * via locks for consistency.  In user-mode emulation access to the
- * memory related structures are protected with mmap_lock.
- * In !user-mode we use per-page locks.
- */
-#ifdef CONFIG_SOFTMMU
-#define assert_memory_lock()
-#else
-#define assert_memory_lock() tcg_debug_assert(have_mmap_lock())
-#endif
-
-#if defined(CONFIG_SOFTMMU) && defined(CONFIG_DEBUG_TCG)
-void assert_no_pages_locked(void);
-#else
-static inline void assert_no_pages_locked(void) { }
-#endif
-
-#ifdef CONFIG_USER_ONLY
-static inline void page_table_config_init(void) { }
-#else
-void page_table_config_init(void);
-#endif
-
-#ifdef CONFIG_SOFTMMU
-void tb_invalidate_phys_range_fast(ram_addr_t ram_addr,
-                                   unsigned size,
-                                   uintptr_t retaddr);
-G_NORETURN void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
-#endif /* CONFIG_SOFTMMU */
-
-TranslationBlock *tb_gen_code(CPUState *cpu, target_ulong pc,
-                              target_ulong cs_base, uint32_t flags,
-                              int cflags);
-void page_init(void);
-void tb_htable_init(void);
-void tb_reset_jump(TranslationBlock *tb, int n);
-TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
-                               tb_page_addr_t phys_page2);
-bool tb_invalidate_phys_page_unwind(tb_page_addr_t addr, uintptr_t pc);
-void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
-                               uintptr_t host_pc);
-
-/* Return the current PC from CPU, which may be cached in TB. */
-static inline target_ulong log_pc(CPUState *cpu, const TranslationBlock *tb)
-{
-    if (tb_cflags(tb) & CF_PCREL) {
-        return cpu->cc->get_pc(cpu);
-    } else {
-        return tb->pc;
-    }
-}
-
-extern int64_t max_delay;
-extern int64_t max_advance;
-
-#endif /* ACCEL_TCG_INTERNAL_H */

+ 1111 - 0
accel/tcg/ldst_atomicity.c.inc

@@ -0,0 +1,1111 @@
+/*
+ * Routines common to user and system emulation of load/store.
+ *
+ *  Copyright (c) 2022 Linaro, Ltd.
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2 or later.
+ * See the COPYING file in the top-level directory.
+ */
+
+#include "host/load-extract-al16-al8.h.inc"
+#include "host/store-insert-al16.h.inc"
+
+#ifdef CONFIG_ATOMIC64
+# define HAVE_al8          true
+#else
+# define HAVE_al8          false
+#endif
+#define HAVE_al8_fast      (ATOMIC_REG_SIZE >= 8)
+
+/**
+ * required_atomicity:
+ *
+ * Return the lg2 bytes of atomicity required by @memop for @p.
+ * If the operation must be split into two operations to be
+ * examined separately for atomicity, return -lg2.
+ */
+static int required_atomicity(CPUState *cpu, uintptr_t p, MemOp memop)
+{
+    MemOp atom = memop & MO_ATOM_MASK;
+    MemOp size = memop & MO_SIZE;
+    MemOp half = size ? size - 1 : 0;
+    unsigned tmp;
+    int atmax;
+
+    switch (atom) {
+    case MO_ATOM_NONE:
+        atmax = MO_8;
+        break;
+
+    case MO_ATOM_IFALIGN_PAIR:
+        size = half;
+        /* fall through */
+
+    case MO_ATOM_IFALIGN:
+        tmp = (1 << size) - 1;
+        atmax = p & tmp ? MO_8 : size;
+        break;
+
+    case MO_ATOM_WITHIN16:
+        tmp = p & 15;
+        atmax = (tmp + (1 << size) <= 16 ? size : MO_8);
+        break;
+
+    case MO_ATOM_WITHIN16_PAIR:
+        tmp = p & 15;
+        if (tmp + (1 << size) <= 16) {
+            atmax = size;
+        } else if (tmp + (1 << half) == 16) {
+            /*
+             * The pair exactly straddles the boundary.
+             * Both halves are naturally aligned and atomic.
+             */
+            atmax = half;
+        } else {
+            /*
+             * One of the pair crosses the boundary, and is non-atomic.
+             * The other of the pair does not cross, and is atomic.
+             */
+            atmax = -half;
+        }
+        break;
+
+    case MO_ATOM_SUBALIGN:
+        /*
+         * Examine the alignment of p to determine if there are subobjects
+         * that must be aligned.  Note that we only really need ctz4() --
+         * any more significant bits are discarded by the immediately
+         * following comparison.
+         */
+        tmp = ctz32(p);
+        atmax = MIN(size, tmp);
+        break;
+
+    default:
+        g_assert_not_reached();
+    }
+
+    /*
+     * Here we have the architectural atomicity of the operation.
+     * However, when executing in a serial context, we need no extra
+     * host atomicity in order to avoid racing.  This reduction
+     * avoids looping with cpu_loop_exit_atomic.
+     */
+    if (cpu_in_serial_context(cpu)) {
+        return MO_8;
+    }
+    return atmax;
+}
+
+/**
+ * load_atomic2:
+ * @pv: host address
+ *
+ * Atomically load 2 aligned bytes from @pv.
+ */
+static inline uint16_t load_atomic2(void *pv)
+{
+    uint16_t *p = __builtin_assume_aligned(pv, 2);
+    return qatomic_read(p);
+}
+
+/**
+ * load_atomic4:
+ * @pv: host address
+ *
+ * Atomically load 4 aligned bytes from @pv.
+ */
+static inline uint32_t load_atomic4(void *pv)
+{
+    uint32_t *p = __builtin_assume_aligned(pv, 4);
+    return qatomic_read(p);
+}
+
+/**
+ * load_atomic8:
+ * @pv: host address
+ *
+ * Atomically load 8 aligned bytes from @pv.
+ */
+static inline uint64_t load_atomic8(void *pv)
+{
+    uint64_t *p = __builtin_assume_aligned(pv, 8);
+
+    qemu_build_assert(HAVE_al8);
+    return qatomic_read__nocheck(p);
+}
+
+/**
+ * load_atomic8_or_exit:
+ * @cpu: generic cpu state
+ * @ra: host unwind address
+ * @pv: host address
+ *
+ * Atomically load 8 aligned bytes from @pv.
+ * If this is not possible, longjmp out to restart serially.
+ */
+static uint64_t load_atomic8_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
+{
+    if (HAVE_al8) {
+        return load_atomic8(pv);
+    }
+
+#ifdef CONFIG_USER_ONLY
+    /*
+     * If the page is not writable, then assume the value is immutable
+     * and requires no locking.  This ignores the case of MAP_SHARED with
+     * another process, because the fallback start_exclusive solution
+     * provides no protection across processes.
+     */
+    WITH_MMAP_LOCK_GUARD() {
+        if (!page_check_range(h2g(pv), 8, PAGE_WRITE_ORG)) {
+            uint64_t *p = __builtin_assume_aligned(pv, 8);
+            return *p;
+        }
+    }
+#endif
+
+    /* Ultimate fallback: re-execute in serial context. */
+    cpu_loop_exit_atomic(cpu, ra);
+}
+
+/**
+ * load_atomic16_or_exit:
+ * @cpu: generic cpu state
+ * @ra: host unwind address
+ * @pv: host address
+ *
+ * Atomically load 16 aligned bytes from @pv.
+ * If this is not possible, longjmp out to restart serially.
+ */
+static Int128 load_atomic16_or_exit(CPUState *cpu, uintptr_t ra, void *pv)
+{
+    Int128 *p = __builtin_assume_aligned(pv, 16);
+
+    if (HAVE_ATOMIC128_RO) {
+        return atomic16_read_ro(p);
+    }
+
+    /*
+     * We can only use cmpxchg to emulate a load if the page is writable.
+     * If the page is not writable, then assume the value is immutable
+     * and requires no locking.  This ignores the case of MAP_SHARED with
+     * another process, because the fallback start_exclusive solution
+     * provides no protection across processes.
+     *
+     * In system mode all guest pages are writable.  For user mode,
+     * we must take mmap_lock so that the query remains valid until
+     * the write is complete -- tests/tcg/multiarch/munmap-pthread.c
+     * is an example that can race.
+     */
+    WITH_MMAP_LOCK_GUARD() {
+#ifdef CONFIG_USER_ONLY
+        if (!page_check_range(h2g(p), 16, PAGE_WRITE_ORG)) {
+            return *p;
+        }
+#endif
+        if (HAVE_ATOMIC128_RW) {
+            return atomic16_read_rw(p);
+        }
+    }
+
+    /* Ultimate fallback: re-execute in serial context. */
+    cpu_loop_exit_atomic(cpu, ra);
+}
+
+/**
+ * load_atom_extract_al4x2:
+ * @pv: host address
+ *
+ * Load 4 bytes from @p, from two sequential atomic 4-byte loads.
+ */
+static uint32_t load_atom_extract_al4x2(void *pv)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int sh = (pi & 3) * 8;
+    uint32_t a, b;
+
+    pv = (void *)(pi & ~3);
+    a = load_atomic4(pv);
+    b = load_atomic4(pv + 4);
+
+    if (HOST_BIG_ENDIAN) {
+        return (a << sh) | (b >> (-sh & 31));
+    } else {
+        return (a >> sh) | (b << (-sh & 31));
+    }
+}
+
+/**
+ * load_atom_extract_al8x2:
+ * @pv: host address
+ *
+ * Load 8 bytes from @p, from two sequential atomic 8-byte loads.
+ */
+static uint64_t load_atom_extract_al8x2(void *pv)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int sh = (pi & 7) * 8;
+    uint64_t a, b;
+
+    pv = (void *)(pi & ~7);
+    a = load_atomic8(pv);
+    b = load_atomic8(pv + 8);
+
+    if (HOST_BIG_ENDIAN) {
+        return (a << sh) | (b >> (-sh & 63));
+    } else {
+        return (a >> sh) | (b << (-sh & 63));
+    }
+}
+
+/**
+ * load_atom_extract_al8_or_exit:
+ * @cpu: generic cpu state
+ * @ra: host unwind address
+ * @pv: host address
+ * @s: object size in bytes, @s <= 4.
+ *
+ * Atomically load @s bytes from @p, when p % s != 0, and [p, p+s-1] does
+ * not cross an 8-byte boundary.  This means that we can perform an atomic
+ * 8-byte load and extract.
+ * The value is returned in the low bits of a uint32_t.
+ */
+static uint32_t load_atom_extract_al8_or_exit(CPUState *cpu, uintptr_t ra,
+                                              void *pv, int s)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int o = pi & 7;
+    int shr = (HOST_BIG_ENDIAN ? 8 - s - o : o) * 8;
+
+    pv = (void *)(pi & ~7);
+    return load_atomic8_or_exit(cpu, ra, pv) >> shr;
+}
+
+/**
+ * load_atom_extract_al16_or_exit:
+ * @cpu: generic cpu state
+ * @ra: host unwind address
+ * @p: host address
+ * @s: object size in bytes, @s <= 8.
+ *
+ * Atomically load @s bytes from @p, when p % 16 < 8
+ * and p % 16 + s > 8.  I.e. does not cross a 16-byte
+ * boundary, but *does* cross an 8-byte boundary.
+ * This is the slow version, so we must have eliminated
+ * any faster load_atom_extract_al8_or_exit case.
+ *
+ * If this is not possible, longjmp out to restart serially.
+ */
+static uint64_t load_atom_extract_al16_or_exit(CPUState *cpu, uintptr_t ra,
+                                               void *pv, int s)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int o = pi & 7;
+    int shr = (HOST_BIG_ENDIAN ? 16 - s - o : o) * 8;
+    Int128 r;
+
+    /*
+     * Note constraints above: p & 8 must be clear.
+     * Provoke SIGBUS if possible otherwise.
+     */
+    pv = (void *)(pi & ~7);
+    r = load_atomic16_or_exit(cpu, ra, pv);
+
+    r = int128_urshift(r, shr);
+    return int128_getlo(r);
+}
+
+/**
+ * load_atom_4_by_2:
+ * @pv: host address
+ *
+ * Load 4 bytes from @pv, with two 2-byte atomic loads.
+ */
+static inline uint32_t load_atom_4_by_2(void *pv)
+{
+    uint32_t a = load_atomic2(pv);
+    uint32_t b = load_atomic2(pv + 2);
+
+    if (HOST_BIG_ENDIAN) {
+        return (a << 16) | b;
+    } else {
+        return (b << 16) | a;
+    }
+}
+
+/**
+ * load_atom_8_by_2:
+ * @pv: host address
+ *
+ * Load 8 bytes from @pv, with four 2-byte atomic loads.
+ */
+static inline uint64_t load_atom_8_by_2(void *pv)
+{
+    uint32_t a = load_atom_4_by_2(pv);
+    uint32_t b = load_atom_4_by_2(pv + 4);
+
+    if (HOST_BIG_ENDIAN) {
+        return ((uint64_t)a << 32) | b;
+    } else {
+        return ((uint64_t)b << 32) | a;
+    }
+}
+
+/**
+ * load_atom_8_by_4:
+ * @pv: host address
+ *
+ * Load 8 bytes from @pv, with two 4-byte atomic loads.
+ */
+static inline uint64_t load_atom_8_by_4(void *pv)
+{
+    uint32_t a = load_atomic4(pv);
+    uint32_t b = load_atomic4(pv + 4);
+
+    if (HOST_BIG_ENDIAN) {
+        return ((uint64_t)a << 32) | b;
+    } else {
+        return ((uint64_t)b << 32) | a;
+    }
+}
+
+/**
+ * load_atom_8_by_8_or_4:
+ * @pv: host address
+ *
+ * Load 8 bytes from aligned @pv, with at least 4-byte atomicity.
+ */
+static inline uint64_t load_atom_8_by_8_or_4(void *pv)
+{
+    if (HAVE_al8_fast) {
+        return load_atomic8(pv);
+    } else {
+        return load_atom_8_by_4(pv);
+    }
+}
+
+/**
+ * load_atom_2:
+ * @p: host address
+ * @memop: the full memory op
+ *
+ * Load 2 bytes from @p, honoring the atomicity of @memop.
+ */
+static uint16_t load_atom_2(CPUState *cpu, uintptr_t ra,
+                            void *pv, MemOp memop)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int atmax;
+
+    if (likely((pi & 1) == 0)) {
+        return load_atomic2(pv);
+    }
+    if (HAVE_ATOMIC128_RO) {
+        intptr_t left_in_page = -(pi | TARGET_PAGE_MASK);
+        if (likely(left_in_page > 8)) {
+            return load_atom_extract_al16_or_al8(pv, 2);
+        }
+    }
+
+    atmax = required_atomicity(cpu, pi, memop);
+    switch (atmax) {
+    case MO_8:
+        return lduw_he_p(pv);
+    case MO_16:
+        /* The only case remaining is MO_ATOM_WITHIN16. */
+        if (!HAVE_al8_fast && (pi & 3) == 1) {
+            /* Big or little endian, we want the middle two bytes. */
+            return load_atomic4(pv - 1) >> 8;
+        }
+        if ((pi & 15) != 7) {
+            return load_atom_extract_al8_or_exit(cpu, ra, pv, 2);
+        }
+        return load_atom_extract_al16_or_exit(cpu, ra, pv, 2);
+    default:
+        g_assert_not_reached();
+    }
+}
+
+/**
+ * load_atom_4:
+ * @p: host address
+ * @memop: the full memory op
+ *
+ * Load 4 bytes from @p, honoring the atomicity of @memop.
+ */
+static uint32_t load_atom_4(CPUState *cpu, uintptr_t ra,
+                            void *pv, MemOp memop)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int atmax;
+
+    if (likely((pi & 3) == 0)) {
+        return load_atomic4(pv);
+    }
+    if (HAVE_ATOMIC128_RO) {
+        intptr_t left_in_page = -(pi | TARGET_PAGE_MASK);
+        if (likely(left_in_page > 8)) {
+            return load_atom_extract_al16_or_al8(pv, 4);
+        }
+    }
+
+    atmax = required_atomicity(cpu, pi, memop);
+    switch (atmax) {
+    case MO_8:
+    case MO_16:
+    case -MO_16:
+        /*
+         * For MO_ATOM_IFALIGN, this is more atomicity than required,
+         * but it's trivially supported on all hosts, better than 4
+         * individual byte loads (when the host requires alignment),
+         * and overlaps with the MO_ATOM_SUBALIGN case of p % 2 == 0.
+         */
+        return load_atom_extract_al4x2(pv);
+    case MO_32:
+        if (!(pi & 4)) {
+            return load_atom_extract_al8_or_exit(cpu, ra, pv, 4);
+        }
+        return load_atom_extract_al16_or_exit(cpu, ra, pv, 4);
+    default:
+        g_assert_not_reached();
+    }
+}
+
+/**
+ * load_atom_8:
+ * @p: host address
+ * @memop: the full memory op
+ *
+ * Load 8 bytes from @p, honoring the atomicity of @memop.
+ */
+static uint64_t load_atom_8(CPUState *cpu, uintptr_t ra,
+                            void *pv, MemOp memop)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int atmax;
+
+    /*
+     * If the host does not support 8-byte atomics, wait until we have
+     * examined the atomicity parameters below.
+     */
+    if (HAVE_al8 && likely((pi & 7) == 0)) {
+        return load_atomic8(pv);
+    }
+    if (HAVE_ATOMIC128_RO) {
+        return load_atom_extract_al16_or_al8(pv, 8);
+    }
+
+    atmax = required_atomicity(cpu, pi, memop);
+    if (atmax == MO_64) {
+        if (!HAVE_al8 && (pi & 7) == 0) {
+            load_atomic8_or_exit(cpu, ra, pv);
+        }
+        return load_atom_extract_al16_or_exit(cpu, ra, pv, 8);
+    }
+    if (HAVE_al8_fast) {
+        return load_atom_extract_al8x2(pv);
+    }
+    switch (atmax) {
+    case MO_8:
+        return ldq_he_p(pv);
+    case MO_16:
+        return load_atom_8_by_2(pv);
+    case MO_32:
+        return load_atom_8_by_4(pv);
+    case -MO_32:
+        if (HAVE_al8) {
+            return load_atom_extract_al8x2(pv);
+        }
+        cpu_loop_exit_atomic(cpu, ra);
+    default:
+        g_assert_not_reached();
+    }
+}
+
+/**
+ * load_atom_16:
+ * @p: host address
+ * @memop: the full memory op
+ *
+ * Load 16 bytes from @p, honoring the atomicity of @memop.
+ */
+static Int128 load_atom_16(CPUState *cpu, uintptr_t ra,
+                           void *pv, MemOp memop)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int atmax;
+    Int128 r;
+    uint64_t a, b;
+
+    /*
+     * If the host does not support 16-byte atomics, wait until we have
+     * examined the atomicity parameters below.
+     */
+    if (HAVE_ATOMIC128_RO && likely((pi & 15) == 0)) {
+        return atomic16_read_ro(pv);
+    }
+
+    atmax = required_atomicity(cpu, pi, memop);
+    switch (atmax) {
+    case MO_8:
+        memcpy(&r, pv, 16);
+        return r;
+    case MO_16:
+        a = load_atom_8_by_2(pv);
+        b = load_atom_8_by_2(pv + 8);
+        break;
+    case MO_32:
+        a = load_atom_8_by_4(pv);
+        b = load_atom_8_by_4(pv + 8);
+        break;
+    case MO_64:
+        if (!HAVE_al8) {
+            cpu_loop_exit_atomic(cpu, ra);
+        }
+        a = load_atomic8(pv);
+        b = load_atomic8(pv + 8);
+        break;
+    case -MO_64:
+        if (!HAVE_al8) {
+            cpu_loop_exit_atomic(cpu, ra);
+        }
+        a = load_atom_extract_al8x2(pv);
+        b = load_atom_extract_al8x2(pv + 8);
+        break;
+    case MO_128:
+        return load_atomic16_or_exit(cpu, ra, pv);
+    default:
+        g_assert_not_reached();
+    }
+    return int128_make128(HOST_BIG_ENDIAN ? b : a, HOST_BIG_ENDIAN ? a : b);
+}
+
+/**
+ * store_atomic2:
+ * @pv: host address
+ * @val: value to store
+ *
+ * Atomically store 2 aligned bytes to @pv.
+ */
+static inline void store_atomic2(void *pv, uint16_t val)
+{
+    uint16_t *p = __builtin_assume_aligned(pv, 2);
+    qatomic_set(p, val);
+}
+
+/**
+ * store_atomic4:
+ * @pv: host address
+ * @val: value to store
+ *
+ * Atomically store 4 aligned bytes to @pv.
+ */
+static inline void store_atomic4(void *pv, uint32_t val)
+{
+    uint32_t *p = __builtin_assume_aligned(pv, 4);
+    qatomic_set(p, val);
+}
+
+/**
+ * store_atomic8:
+ * @pv: host address
+ * @val: value to store
+ *
+ * Atomically store 8 aligned bytes to @pv.
+ */
+static inline void store_atomic8(void *pv, uint64_t val)
+{
+    uint64_t *p = __builtin_assume_aligned(pv, 8);
+
+    qemu_build_assert(HAVE_al8);
+    qatomic_set__nocheck(p, val);
+}
+
+/**
+ * store_atom_4x2
+ */
+static inline void store_atom_4_by_2(void *pv, uint32_t val)
+{
+    store_atomic2(pv, val >> (HOST_BIG_ENDIAN ? 16 : 0));
+    store_atomic2(pv + 2, val >> (HOST_BIG_ENDIAN ? 0 : 16));
+}
+
+/**
+ * store_atom_8_by_2
+ */
+static inline void store_atom_8_by_2(void *pv, uint64_t val)
+{
+    store_atom_4_by_2(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0));
+    store_atom_4_by_2(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32));
+}
+
+/**
+ * store_atom_8_by_4
+ */
+static inline void store_atom_8_by_4(void *pv, uint64_t val)
+{
+    store_atomic4(pv, val >> (HOST_BIG_ENDIAN ? 32 : 0));
+    store_atomic4(pv + 4, val >> (HOST_BIG_ENDIAN ? 0 : 32));
+}
+
+/**
+ * store_atom_insert_al4:
+ * @p: host address
+ * @val: shifted value to store
+ * @msk: mask for value to store
+ *
+ * Atomically store @val to @p, masked by @msk.
+ */
+static void store_atom_insert_al4(uint32_t *p, uint32_t val, uint32_t msk)
+{
+    uint32_t old, new;
+
+    p = __builtin_assume_aligned(p, 4);
+    old = qatomic_read(p);
+    do {
+        new = (old & ~msk) | val;
+    } while (!__atomic_compare_exchange_n(p, &old, new, true,
+                                          __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+}
+
+/**
+ * store_atom_insert_al8:
+ * @p: host address
+ * @val: shifted value to store
+ * @msk: mask for value to store
+ *
+ * Atomically store @val to @p masked by @msk.
+ */
+static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
+{
+    uint64_t old, new;
+
+    qemu_build_assert(HAVE_al8);
+    p = __builtin_assume_aligned(p, 8);
+    old = qatomic_read__nocheck(p);
+    do {
+        new = (old & ~msk) | val;
+    } while (!__atomic_compare_exchange_n(p, &old, new, true,
+                                          __ATOMIC_RELAXED, __ATOMIC_RELAXED));
+}
+
+/**
+ * store_bytes_leN:
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * Store @size bytes at @p.  The bytes to store are extracted in little-endian order
+ * from @val_le; return the bytes of @val_le beyond @size that have not been stored.
+ */
+static uint64_t store_bytes_leN(void *pv, int size, uint64_t val_le)
+{
+    uint8_t *p = pv;
+    for (int i = 0; i < size; i++, val_le >>= 8) {
+        p[i] = val_le;
+    }
+    return val_le;
+}
+
+/**
+ * store_parts_leN
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * As store_bytes_leN, but atomically on each aligned part.
+ */
+G_GNUC_UNUSED
+static uint64_t store_parts_leN(void *pv, int size, uint64_t val_le)
+{
+    do {
+        int n;
+
+        /* Find minimum of alignment and size */
+        switch (((uintptr_t)pv | size) & 7) {
+        case 4:
+            store_atomic4(pv, le32_to_cpu(val_le));
+            val_le >>= 32;
+            n = 4;
+            break;
+        case 2:
+        case 6:
+            store_atomic2(pv, le16_to_cpu(val_le));
+            val_le >>= 16;
+            n = 2;
+            break;
+        default:
+            *(uint8_t *)pv = val_le;
+            val_le >>= 8;
+            n = 1;
+            break;
+        case 0:
+            g_assert_not_reached();
+        }
+        pv += n;
+        size -= n;
+    } while (size != 0);
+
+    return val_le;
+}
+
+/**
+ * store_whole_le4
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * As store_bytes_leN, but atomically as a whole.
+ * Four aligned bytes are guaranteed to cover the store.
+ */
+static uint64_t store_whole_le4(void *pv, int size, uint64_t val_le)
+{
+    int sz = size * 8;
+    int o = (uintptr_t)pv & 3;
+    int sh = o * 8;
+    uint32_t m = MAKE_64BIT_MASK(0, sz);
+    uint32_t v;
+
+    if (HOST_BIG_ENDIAN) {
+        v = bswap32(val_le) >> sh;
+        m = bswap32(m) >> sh;
+    } else {
+        v = val_le << sh;
+        m <<= sh;
+    }
+    store_atom_insert_al4(pv - o, v, m);
+    return val_le >> sz;
+}
+
+/**
+ * store_whole_le8
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * As store_bytes_leN, but atomically as a whole.
+ * Eight aligned bytes are guaranteed to cover the store.
+ */
+static uint64_t store_whole_le8(void *pv, int size, uint64_t val_le)
+{
+    int sz = size * 8;
+    int o = (uintptr_t)pv & 7;
+    int sh = o * 8;
+    uint64_t m = MAKE_64BIT_MASK(0, sz);
+    uint64_t v;
+
+    qemu_build_assert(HAVE_al8);
+    if (HOST_BIG_ENDIAN) {
+        v = bswap64(val_le) >> sh;
+        m = bswap64(m) >> sh;
+    } else {
+        v = val_le << sh;
+        m <<= sh;
+    }
+    store_atom_insert_al8(pv - o, v, m);
+    return val_le >> sz;
+}
+
+/**
+ * store_whole_le16
+ * @pv: host address
+ * @size: number of bytes to store
+ * @val_le: data to store
+ *
+ * As store_bytes_leN, but atomically as a whole.
+ * 16 aligned bytes are guaranteed to cover the store.
+ */
+static uint64_t store_whole_le16(void *pv, int size, Int128 val_le)
+{
+    int sz = size * 8;
+    int o = (uintptr_t)pv & 15;
+    int sh = o * 8;
+    Int128 m, v;
+
+    qemu_build_assert(HAVE_CMPXCHG128);
+
+    /* Like MAKE_64BIT_MASK(0, sz), but larger. */
+    if (sz <= 64) {
+        m = int128_make64(MAKE_64BIT_MASK(0, sz));
+    } else {
+        m = int128_make128(-1, MAKE_64BIT_MASK(0, sz - 64));
+    }
+
+    if (HOST_BIG_ENDIAN) {
+        v = int128_urshift(bswap128(val_le), sh);
+        m = int128_urshift(bswap128(m), sh);
+    } else {
+        v = int128_lshift(val_le, sh);
+        m = int128_lshift(m, sh);
+    }
+    store_atom_insert_al16(pv - o, v, m);
+
+    if (sz <= 64) {
+        return 0;
+    }
+    return int128_gethi(val_le) >> (sz - 64);
+}
+
+/**
+ * store_atom_2:
+ * @p: host address
+ * @val: the value to store
+ * @memop: the full memory op
+ *
+ * Store 2 bytes to @p, honoring the atomicity of @memop.
+ */
+static void store_atom_2(CPUState *cpu, uintptr_t ra,
+                         void *pv, MemOp memop, uint16_t val)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int atmax;
+
+    if (likely((pi & 1) == 0)) {
+        store_atomic2(pv, val);
+        return;
+    }
+
+    atmax = required_atomicity(cpu, pi, memop);
+    if (atmax == MO_8) {
+        stw_he_p(pv, val);
+        return;
+    }
+
+    /*
+     * The only case remaining is MO_ATOM_WITHIN16.
+     * Big or little endian, we want the middle two bytes in each test.
+     */
+    if ((pi & 3) == 1) {
+        store_atom_insert_al4(pv - 1, (uint32_t)val << 8, MAKE_64BIT_MASK(8, 16));
+        return;
+    } else if ((pi & 7) == 3) {
+        if (HAVE_al8) {
+            store_atom_insert_al8(pv - 3, (uint64_t)val << 24, MAKE_64BIT_MASK(24, 16));
+            return;
+        }
+    } else if ((pi & 15) == 7) {
+        if (HAVE_CMPXCHG128) {
+            Int128 v = int128_lshift(int128_make64(val), 56);
+            Int128 m = int128_lshift(int128_make64(0xffff), 56);
+            store_atom_insert_al16(pv - 7, v, m);
+            return;
+        }
+    } else {
+        g_assert_not_reached();
+    }
+
+    cpu_loop_exit_atomic(cpu, ra);
+}
+
+/**
+ * store_atom_4:
+ * @p: host address
+ * @val: the value to store
+ * @memop: the full memory op
+ *
+ * Store 4 bytes to @p, honoring the atomicity of @memop.
+ */
+static void store_atom_4(CPUState *cpu, uintptr_t ra,
+                         void *pv, MemOp memop, uint32_t val)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int atmax;
+
+    if (likely((pi & 3) == 0)) {
+        store_atomic4(pv, val);
+        return;
+    }
+
+    atmax = required_atomicity(cpu, pi, memop);
+    switch (atmax) {
+    case MO_8:
+        stl_he_p(pv, val);
+        return;
+    case MO_16:
+        store_atom_4_by_2(pv, val);
+        return;
+    case -MO_16:
+        {
+            uint32_t val_le = cpu_to_le32(val);
+            int s2 = pi & 3;
+            int s1 = 4 - s2;
+
+            switch (s2) {
+            case 1:
+                val_le = store_whole_le4(pv, s1, val_le);
+                *(uint8_t *)(pv + 3) = val_le;
+                break;
+            case 3:
+                *(uint8_t *)pv = val_le;
+                store_whole_le4(pv + 1, s2, val_le >> 8);
+                break;
+            case 0: /* aligned */
+            case 2: /* atmax MO_16 */
+            default:
+                g_assert_not_reached();
+            }
+        }
+        return;
+    case MO_32:
+        if ((pi & 7) < 4) {
+            if (HAVE_al8) {
+                store_whole_le8(pv, 4, cpu_to_le32(val));
+                return;
+            }
+        } else {
+            if (HAVE_CMPXCHG128) {
+                store_whole_le16(pv, 4, int128_make64(cpu_to_le32(val)));
+                return;
+            }
+        }
+        cpu_loop_exit_atomic(cpu, ra);
+    default:
+        g_assert_not_reached();
+    }
+}
+
+/**
+ * store_atom_8:
+ * @p: host address
+ * @val: the value to store
+ * @memop: the full memory op
+ *
+ * Store 8 bytes to @p, honoring the atomicity of @memop.
+ */
+static void store_atom_8(CPUState *cpu, uintptr_t ra,
+                         void *pv, MemOp memop, uint64_t val)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    int atmax;
+
+    if (HAVE_al8 && likely((pi & 7) == 0)) {
+        store_atomic8(pv, val);
+        return;
+    }
+
+    atmax = required_atomicity(cpu, pi, memop);
+    switch (atmax) {
+    case MO_8:
+        stq_he_p(pv, val);
+        return;
+    case MO_16:
+        store_atom_8_by_2(pv, val);
+        return;
+    case MO_32:
+        store_atom_8_by_4(pv, val);
+        return;
+    case -MO_32:
+        if (HAVE_al8) {
+            uint64_t val_le = cpu_to_le64(val);
+            int s2 = pi & 7;
+            int s1 = 8 - s2;
+
+            switch (s2) {
+            case 1 ... 3:
+                val_le = store_whole_le8(pv, s1, val_le);
+                store_bytes_leN(pv + s1, s2, val_le);
+                break;
+            case 5 ... 7:
+                val_le = store_bytes_leN(pv, s1, val_le);
+                store_whole_le8(pv + s1, s2, val_le);
+                break;
+            case 0: /* aligned */
+            case 4: /* atmax MO_32 */
+            default:
+                g_assert_not_reached();
+            }
+            return;
+        }
+        break;
+    case MO_64:
+        if (HAVE_CMPXCHG128) {
+            store_whole_le16(pv, 8, int128_make64(cpu_to_le64(val)));
+            return;
+        }
+        break;
+    default:
+        g_assert_not_reached();
+    }
+    cpu_loop_exit_atomic(cpu, ra);
+}
+
+/**
+ * store_atom_16:
+ * @p: host address
+ * @val: the value to store
+ * @memop: the full memory op
+ *
+ * Store 16 bytes to @p, honoring the atomicity of @memop.
+ */
+static void store_atom_16(CPUState *cpu, uintptr_t ra,
+                          void *pv, MemOp memop, Int128 val)
+{
+    uintptr_t pi = (uintptr_t)pv;
+    uint64_t a, b;
+    int atmax;
+
+    if (HAVE_ATOMIC128_RW && likely((pi & 15) == 0)) {
+        atomic16_set(pv, val);
+        return;
+    }
+
+    atmax = required_atomicity(cpu, pi, memop);
+
+    a = HOST_BIG_ENDIAN ? int128_gethi(val) : int128_getlo(val);
+    b = HOST_BIG_ENDIAN ? int128_getlo(val) : int128_gethi(val);
+    switch (atmax) {
+    case MO_8:
+        memcpy(pv, &val, 16);
+        return;
+    case MO_16:
+        store_atom_8_by_2(pv, a);
+        store_atom_8_by_2(pv + 8, b);
+        return;
+    case MO_32:
+        store_atom_8_by_4(pv, a);
+        store_atom_8_by_4(pv + 8, b);
+        return;
+    case MO_64:
+        if (HAVE_al8) {
+            store_atomic8(pv, a);
+            store_atomic8(pv + 8, b);
+            return;
+        }
+        break;
+    case -MO_64:
+        if (HAVE_CMPXCHG128) {
+            uint64_t val_le;
+            int s2 = pi & 15;
+            int s1 = 16 - s2;
+
+            if (HOST_BIG_ENDIAN) {
+                val = bswap128(val);
+            }
+            switch (s2) {
+            case 1 ... 7:
+                val_le = store_whole_le16(pv, s1, val);
+                store_bytes_leN(pv + s1, s2, val_le);
+                break;
+            case 9 ... 15:
+                store_bytes_leN(pv, s1, int128_getlo(val));
+                val = int128_urshift(val, s1 * 8);
+                store_whole_le16(pv + s1, s2, val);
+                break;
+            case 0: /* aligned */
+            case 8: /* atmax MO_64 */
+            default:
+                g_assert_not_reached();
+            }
+            return;
+        }
+        break;
+    case MO_128:
+        break;
+    default:
+        g_assert_not_reached();
+    }
+    cpu_loop_exit_atomic(cpu, ra);
+}

+ 269 - 26
accel/tcg/ldst_common.c.inc

@@ -8,6 +8,235 @@
  * This work is licensed under the terms of the GNU GPL, version 2 or later.
  * This work is licensed under the terms of the GNU GPL, version 2 or later.
  * See the COPYING file in the top-level directory.
  * See the COPYING file in the top-level directory.
  */
  */
+/*
+ * Load helpers for tcg-ldst.h
+ */
+
+tcg_target_ulong helper_ldub_mmu(CPUArchState *env, uint64_t addr,
+                                 MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
+    return do_ld1_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
+}
+
+tcg_target_ulong helper_lduw_mmu(CPUArchState *env, uint64_t addr,
+                                 MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
+    return do_ld2_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
+}
+
+tcg_target_ulong helper_ldul_mmu(CPUArchState *env, uint64_t addr,
+                                 MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
+    return do_ld4_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
+}
+
+uint64_t helper_ldq_mmu(CPUArchState *env, uint64_t addr,
+                        MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
+    return do_ld8_mmu(env_cpu(env), addr, oi, retaddr, MMU_DATA_LOAD);
+}
+
+/*
+ * Provide signed versions of the load routines as well.  We can of course
+ * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
+ */
+
+tcg_target_ulong helper_ldsb_mmu(CPUArchState *env, uint64_t addr,
+                                 MemOpIdx oi, uintptr_t retaddr)
+{
+    return (int8_t)helper_ldub_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_ldsw_mmu(CPUArchState *env, uint64_t addr,
+                                 MemOpIdx oi, uintptr_t retaddr)
+{
+    return (int16_t)helper_lduw_mmu(env, addr, oi, retaddr);
+}
+
+tcg_target_ulong helper_ldsl_mmu(CPUArchState *env, uint64_t addr,
+                                 MemOpIdx oi, uintptr_t retaddr)
+{
+    return (int32_t)helper_ldul_mmu(env, addr, oi, retaddr);
+}
+
+Int128 helper_ld16_mmu(CPUArchState *env, uint64_t addr,
+                       MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+    return do_ld16_mmu(env_cpu(env), addr, oi, retaddr);
+}
+
+Int128 helper_ld_i128(CPUArchState *env, uint64_t addr, uint32_t oi)
+{
+    return helper_ld16_mmu(env, addr, oi, GETPC());
+}
+
+/*
+ * Store helpers for tcg-ldst.h
+ */
+
+void helper_stb_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+                    MemOpIdx oi, uintptr_t ra)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_8);
+    do_st1_mmu(env_cpu(env), addr, val, oi, ra);
+}
+
+void helper_stw_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+                    MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
+    do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
+}
+
+void helper_stl_mmu(CPUArchState *env, uint64_t addr, uint32_t val,
+                    MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
+    do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
+}
+
+void helper_stq_mmu(CPUArchState *env, uint64_t addr, uint64_t val,
+                    MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
+    do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
+}
+
+void helper_st16_mmu(CPUArchState *env, uint64_t addr, Int128 val,
+                     MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+    do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
+}
+
+void helper_st_i128(CPUArchState *env, uint64_t addr, Int128 val, MemOpIdx oi)
+{
+    helper_st16_mmu(env, addr, val, oi, GETPC());
+}
+
+/*
+ * Load helpers for cpu_ldst.h
+ */
+
+static void plugin_load_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
+{
+    if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
+        qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+    }
+}
+
+uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
+{
+    uint8_t ret;
+
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_UB);
+    ret = do_ld1_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
+    plugin_load_cb(env, addr, oi);
+    return ret;
+}
+
+uint16_t cpu_ldw_mmu(CPUArchState *env, abi_ptr addr,
+                     MemOpIdx oi, uintptr_t ra)
+{
+    uint16_t ret;
+
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
+    ret = do_ld2_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
+    plugin_load_cb(env, addr, oi);
+    return ret;
+}
+
+uint32_t cpu_ldl_mmu(CPUArchState *env, abi_ptr addr,
+                     MemOpIdx oi, uintptr_t ra)
+{
+    uint32_t ret;
+
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
+    ret = do_ld4_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
+    plugin_load_cb(env, addr, oi);
+    return ret;
+}
+
+uint64_t cpu_ldq_mmu(CPUArchState *env, abi_ptr addr,
+                     MemOpIdx oi, uintptr_t ra)
+{
+    uint64_t ret;
+
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
+    ret = do_ld8_mmu(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
+    plugin_load_cb(env, addr, oi);
+    return ret;
+}
+
+Int128 cpu_ld16_mmu(CPUArchState *env, abi_ptr addr,
+                    MemOpIdx oi, uintptr_t ra)
+{
+    Int128 ret;
+
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+    ret = do_ld16_mmu(env_cpu(env), addr, oi, ra);
+    plugin_load_cb(env, addr, oi);
+    return ret;
+}
+
+/*
+ * Store helpers for cpu_ldst.h
+ */
+
+static void plugin_store_cb(CPUArchState *env, abi_ptr addr, MemOpIdx oi)
+{
+    if (cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
+        qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
+    }
+}
+
+void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
+                 MemOpIdx oi, uintptr_t retaddr)
+{
+    helper_stb_mmu(env, addr, val, oi, retaddr);
+    plugin_store_cb(env, addr, oi);
+}
+
+void cpu_stw_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
+                 MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_16);
+    do_st2_mmu(env_cpu(env), addr, val, oi, retaddr);
+    plugin_store_cb(env, addr, oi);
+}
+
+void cpu_stl_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
+                    MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_32);
+    do_st4_mmu(env_cpu(env), addr, val, oi, retaddr);
+    plugin_store_cb(env, addr, oi);
+}
+
+void cpu_stq_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
+                 MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_64);
+    do_st8_mmu(env_cpu(env), addr, val, oi, retaddr);
+    plugin_store_cb(env, addr, oi);
+}
+
+void cpu_st16_mmu(CPUArchState *env, abi_ptr addr, Int128 val,
+                  MemOpIdx oi, uintptr_t retaddr)
+{
+    tcg_debug_assert((get_memop(oi) & MO_SIZE) == MO_128);
+    do_st16_mmu(env_cpu(env), addr, val, oi, retaddr);
+    plugin_store_cb(env, addr, oi);
+}
+
+/*
+ * Wrappers of the above
+ */
 
 
 uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
 uint32_t cpu_ldub_mmuidx_ra(CPUArchState *env, abi_ptr addr,
                             int mmu_idx, uintptr_t ra)
                             int mmu_idx, uintptr_t ra)
@@ -26,7 +255,7 @@ uint32_t cpu_lduw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
                                int mmu_idx, uintptr_t ra)
                                int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
-    return cpu_ldw_be_mmu(env, addr, oi, ra);
+    return cpu_ldw_mmu(env, addr, oi, ra);
 }
 }
 
 
 int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
 int cpu_ldsw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@@ -39,21 +268,21 @@ uint32_t cpu_ldl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
                               int mmu_idx, uintptr_t ra)
                               int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
-    return cpu_ldl_be_mmu(env, addr, oi, ra);
+    return cpu_ldl_mmu(env, addr, oi, ra);
 }
 }
 
 
 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
 uint64_t cpu_ldq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr,
                               int mmu_idx, uintptr_t ra)
                               int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
-    return cpu_ldq_be_mmu(env, addr, oi, ra);
+    return cpu_ldq_mmu(env, addr, oi, ra);
 }
 }
 
 
 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
 uint32_t cpu_lduw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
                                int mmu_idx, uintptr_t ra)
                                int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
-    return cpu_ldw_le_mmu(env, addr, oi, ra);
+    return cpu_ldw_mmu(env, addr, oi, ra);
 }
 }
 
 
 int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
 int cpu_ldsw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
@@ -66,14 +295,14 @@ uint32_t cpu_ldl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
                               int mmu_idx, uintptr_t ra)
                               int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
-    return cpu_ldl_le_mmu(env, addr, oi, ra);
+    return cpu_ldl_mmu(env, addr, oi, ra);
 }
 }
 
 
 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
 uint64_t cpu_ldq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr,
                               int mmu_idx, uintptr_t ra)
                               int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
-    return cpu_ldq_le_mmu(env, addr, oi, ra);
+    return cpu_ldq_mmu(env, addr, oi, ra);
 }
 }
 
 
 void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
 void cpu_stb_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
@@ -87,49 +316,50 @@ void cpu_stw_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
                           int mmu_idx, uintptr_t ra)
                           int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_BEUW | MO_UNALN, mmu_idx);
-    cpu_stw_be_mmu(env, addr, val, oi, ra);
+    cpu_stw_mmu(env, addr, val, oi, ra);
 }
 }
 
 
 void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
 void cpu_stl_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
                           int mmu_idx, uintptr_t ra)
                           int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_BEUL | MO_UNALN, mmu_idx);
-    cpu_stl_be_mmu(env, addr, val, oi, ra);
+    cpu_stl_mmu(env, addr, val, oi, ra);
 }
 }
 
 
 void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
 void cpu_stq_be_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
                           int mmu_idx, uintptr_t ra)
                           int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_BEUQ | MO_UNALN, mmu_idx);
-    cpu_stq_be_mmu(env, addr, val, oi, ra);
+    cpu_stq_mmu(env, addr, val, oi, ra);
 }
 }
 
 
 void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
 void cpu_stw_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
                           int mmu_idx, uintptr_t ra)
                           int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_LEUW | MO_UNALN, mmu_idx);
-    cpu_stw_le_mmu(env, addr, val, oi, ra);
+    cpu_stw_mmu(env, addr, val, oi, ra);
 }
 }
 
 
 void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
 void cpu_stl_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint32_t val,
                           int mmu_idx, uintptr_t ra)
                           int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_LEUL | MO_UNALN, mmu_idx);
-    cpu_stl_le_mmu(env, addr, val, oi, ra);
+    cpu_stl_mmu(env, addr, val, oi, ra);
 }
 }
 
 
 void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
 void cpu_stq_le_mmuidx_ra(CPUArchState *env, abi_ptr addr, uint64_t val,
                           int mmu_idx, uintptr_t ra)
                           int mmu_idx, uintptr_t ra)
 {
 {
     MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
     MemOpIdx oi = make_memop_idx(MO_LEUQ | MO_UNALN, mmu_idx);
-    cpu_stq_le_mmu(env, addr, val, oi, ra);
+    cpu_stq_mmu(env, addr, val, oi, ra);
 }
 }
 
 
 /*--------------------------*/
 /*--------------------------*/
 
 
 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 uint32_t cpu_ldub_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 {
 {
-    return cpu_ldub_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    return cpu_ldub_mmuidx_ra(env, addr, mmu_index, ra);
 }
 }
 
 
 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
@@ -139,7 +369,8 @@ int cpu_ldsb_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 
 
 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 uint32_t cpu_lduw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 {
 {
-    return cpu_lduw_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    return cpu_lduw_be_mmuidx_ra(env, addr, mmu_index, ra);
 }
 }
 
 
 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
@@ -149,17 +380,20 @@ int cpu_ldsw_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 
 
 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 uint32_t cpu_ldl_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 {
 {
-    return cpu_ldl_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    return cpu_ldl_be_mmuidx_ra(env, addr, mmu_index, ra);
 }
 }
 
 
 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 uint64_t cpu_ldq_be_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 {
 {
-    return cpu_ldq_be_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    return cpu_ldq_be_mmuidx_ra(env, addr, mmu_index, ra);
 }
 }
 
 
 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 uint32_t cpu_lduw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 {
 {
-    return cpu_lduw_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    return cpu_lduw_le_mmuidx_ra(env, addr, mmu_index, ra);
 }
 }
 
 
 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
@@ -169,54 +403,63 @@ int cpu_ldsw_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 
 
 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 uint32_t cpu_ldl_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 {
 {
-    return cpu_ldl_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    return cpu_ldl_le_mmuidx_ra(env, addr, mmu_index, ra);
 }
 }
 
 
 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 uint64_t cpu_ldq_le_data_ra(CPUArchState *env, abi_ptr addr, uintptr_t ra)
 {
 {
-    return cpu_ldq_le_mmuidx_ra(env, addr, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    return cpu_ldq_le_mmuidx_ra(env, addr, mmu_index, ra);
 }
 }
 
 
 void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
 void cpu_stb_data_ra(CPUArchState *env, abi_ptr addr,
                      uint32_t val, uintptr_t ra)
                      uint32_t val, uintptr_t ra)
 {
 {
-    cpu_stb_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    cpu_stb_mmuidx_ra(env, addr, val, mmu_index, ra);
 }
 }
 
 
 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
 void cpu_stw_be_data_ra(CPUArchState *env, abi_ptr addr,
                         uint32_t val, uintptr_t ra)
                         uint32_t val, uintptr_t ra)
 {
 {
-    cpu_stw_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    cpu_stw_be_mmuidx_ra(env, addr, val, mmu_index, ra);
 }
 }
 
 
 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
 void cpu_stl_be_data_ra(CPUArchState *env, abi_ptr addr,
                         uint32_t val, uintptr_t ra)
                         uint32_t val, uintptr_t ra)
 {
 {
-    cpu_stl_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    cpu_stl_be_mmuidx_ra(env, addr, val, mmu_index, ra);
 }
 }
 
 
 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
 void cpu_stq_be_data_ra(CPUArchState *env, abi_ptr addr,
                         uint64_t val, uintptr_t ra)
                         uint64_t val, uintptr_t ra)
 {
 {
-    cpu_stq_be_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    cpu_stq_be_mmuidx_ra(env, addr, val, mmu_index, ra);
 }
 }
 
 
 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
 void cpu_stw_le_data_ra(CPUArchState *env, abi_ptr addr,
                         uint32_t val, uintptr_t ra)
                         uint32_t val, uintptr_t ra)
 {
 {
-    cpu_stw_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    cpu_stw_le_mmuidx_ra(env, addr, val, mmu_index, ra);
 }
 }
 
 
 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
 void cpu_stl_le_data_ra(CPUArchState *env, abi_ptr addr,
                         uint32_t val, uintptr_t ra)
                         uint32_t val, uintptr_t ra)
 {
 {
-    cpu_stl_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    cpu_stl_le_mmuidx_ra(env, addr, val, mmu_index, ra);
 }
 }
 
 
 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
 void cpu_stq_le_data_ra(CPUArchState *env, abi_ptr addr,
                         uint64_t val, uintptr_t ra)
                         uint64_t val, uintptr_t ra)
 {
 {
-    cpu_stq_le_mmuidx_ra(env, addr, val, cpu_mmu_index(env, false), ra);
+    int mmu_index = cpu_mmu_index(env_cpu(env), false);
+    cpu_stq_le_mmuidx_ra(env, addr, val, mmu_index, ra);
 }
 }
 
 
 /*--------------------------*/
 /*--------------------------*/

+ 18 - 11
accel/tcg/meson.build

@@ -1,7 +1,9 @@
-tcg_ss = ss.source_set()
-tcg_ss.add(files(
-  'tcg-all.c',
+common_ss.add(when: 'CONFIG_TCG', if_true: files(
   'cpu-exec-common.c',
   'cpu-exec-common.c',
+))
+tcg_specific_ss = ss.source_set()
+tcg_specific_ss.add(files(
+  'tcg-all.c',
   'cpu-exec.c',
   'cpu-exec.c',
   'tb-maint.c',
   'tb-maint.c',
   'tcg-runtime-gvec.c',
   'tcg-runtime-gvec.c',
@@ -9,19 +11,24 @@ tcg_ss.add(files(
   'translate-all.c',
   'translate-all.c',
   'translator.c',
   'translator.c',
 ))
 ))
-tcg_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
-tcg_ss.add(when: 'CONFIG_SOFTMMU', if_false: files('user-exec-stub.c'))
-tcg_ss.add(when: 'CONFIG_PLUGIN', if_true: [files('plugin-gen.c')])
-tcg_ss.add(when: libdw, if_true: files('debuginfo.c'))
-tcg_ss.add(when: 'CONFIG_LINUX', if_true: files('perf.c'))
-specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_ss)
+tcg_specific_ss.add(when: 'CONFIG_USER_ONLY', if_true: files('user-exec.c'))
+tcg_specific_ss.add(when: 'CONFIG_SYSTEM_ONLY', if_false: files('user-exec-stub.c'))
+if get_option('plugins')
+  tcg_specific_ss.add(files('plugin-gen.c'))
+endif
+specific_ss.add_all(when: 'CONFIG_TCG', if_true: tcg_specific_ss)
 
 
-specific_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
+specific_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
   'cputlb.c',
   'cputlb.c',
+  'watchpoint.c',
+))
+
+system_ss.add(when: ['CONFIG_TCG'], if_true: files(
+  'icount-common.c',
   'monitor.c',
   'monitor.c',
 ))
 ))
 
 
-tcg_module_ss.add(when: ['CONFIG_SOFTMMU', 'CONFIG_TCG'], if_true: files(
+tcg_module_ss.add(when: ['CONFIG_SYSTEM_ONLY', 'CONFIG_TCG'], if_true: files(
   'tcg-accel-ops.c',
   'tcg-accel-ops.c',
   'tcg-accel-ops-mttcg.c',
   'tcg-accel-ops-mttcg.c',
   'tcg-accel-ops-icount.c',
   'tcg-accel-ops-icount.c',

+ 170 - 32
accel/tcg/monitor.c

@@ -7,6 +7,8 @@
  */
  */
 
 
 #include "qemu/osdep.h"
 #include "qemu/osdep.h"
+#include "qemu/accel.h"
+#include "qemu/qht.h"
 #include "qapi/error.h"
 #include "qapi/error.h"
 #include "qapi/type-helpers.h"
 #include "qapi/type-helpers.h"
 #include "qapi/qapi-commands-machine.h"
 #include "qapi/qapi-commands-machine.h"
@@ -14,7 +16,9 @@
 #include "sysemu/cpus.h"
 #include "sysemu/cpus.h"
 #include "sysemu/cpu-timers.h"
 #include "sysemu/cpu-timers.h"
 #include "sysemu/tcg.h"
 #include "sysemu/tcg.h"
-#include "internal.h"
+#include "tcg/tcg.h"
+#include "internal-common.h"
+#include "tb-context.h"
 
 
 
 
 static void dump_drift_info(GString *buf)
 static void dump_drift_info(GString *buf)
@@ -36,6 +40,165 @@ static void dump_drift_info(GString *buf)
     }
     }
 }
 }
 
 
+static void dump_accel_info(GString *buf)
+{
+    AccelState *accel = current_accel();
+    bool one_insn_per_tb = object_property_get_bool(OBJECT(accel),
+                                                    "one-insn-per-tb",
+                                                    &error_fatal);
+
+    g_string_append_printf(buf, "Accelerator settings:\n");
+    g_string_append_printf(buf, "one-insn-per-tb: %s\n\n",
+                           one_insn_per_tb ? "on" : "off");
+}
+
+static void print_qht_statistics(struct qht_stats hst, GString *buf)
+{
+    uint32_t hgram_opts;
+    size_t hgram_bins;
+    char *hgram;
+
+    if (!hst.head_buckets) {
+        return;
+    }
+    g_string_append_printf(buf, "TB hash buckets     %zu/%zu "
+                           "(%0.2f%% head buckets used)\n",
+                           hst.used_head_buckets, hst.head_buckets,
+                           (double)hst.used_head_buckets /
+                           hst.head_buckets * 100);
+
+    hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
+    hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
+    if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
+        hgram_opts |= QDIST_PR_NODECIMAL;
+    }
+    hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
+    g_string_append_printf(buf, "TB hash occupancy   %0.2f%% avg chain occ. "
+                           "Histogram: %s\n",
+                           qdist_avg(&hst.occupancy) * 100, hgram);
+    g_free(hgram);
+
+    hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
+    hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
+    if (hgram_bins > 10) {
+        hgram_bins = 10;
+    } else {
+        hgram_bins = 0;
+        hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
+    }
+    hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
+    g_string_append_printf(buf, "TB hash avg chain   %0.3f buckets. "
+                           "Histogram: %s\n",
+                           qdist_avg(&hst.chain), hgram);
+    g_free(hgram);
+}
+
+struct tb_tree_stats {
+    size_t nb_tbs;
+    size_t host_size;
+    size_t target_size;
+    size_t max_target_size;
+    size_t direct_jmp_count;
+    size_t direct_jmp2_count;
+    size_t cross_page;
+};
+
+static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
+{
+    const TranslationBlock *tb = value;
+    struct tb_tree_stats *tst = data;
+
+    tst->nb_tbs++;
+    tst->host_size += tb->tc.size;
+    tst->target_size += tb->size;
+    if (tb->size > tst->max_target_size) {
+        tst->max_target_size = tb->size;
+    }
+    if (tb->page_addr[1] != -1) {
+        tst->cross_page++;
+    }
+    if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
+        tst->direct_jmp_count++;
+        if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
+            tst->direct_jmp2_count++;
+        }
+    }
+    return false;
+}
+
+static void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
+{
+    CPUState *cpu;
+    size_t full = 0, part = 0, elide = 0;
+
+    CPU_FOREACH(cpu) {
+        full += qatomic_read(&cpu->neg.tlb.c.full_flush_count);
+        part += qatomic_read(&cpu->neg.tlb.c.part_flush_count);
+        elide += qatomic_read(&cpu->neg.tlb.c.elide_flush_count);
+    }
+    *pfull = full;
+    *ppart = part;
+    *pelide = elide;
+}
+
+static void tcg_dump_info(GString *buf)
+{
+    g_string_append_printf(buf, "[TCG profiler not compiled]\n");
+}
+
+static void dump_exec_info(GString *buf)
+{
+    struct tb_tree_stats tst = {};
+    struct qht_stats hst;
+    size_t nb_tbs, flush_full, flush_part, flush_elide;
+
+    tcg_tb_foreach(tb_tree_stats_iter, &tst);
+    nb_tbs = tst.nb_tbs;
+    /* XXX: avoid using doubles ? */
+    g_string_append_printf(buf, "Translation buffer state:\n");
+    /*
+     * Report total code size including the padding and TB structs;
+     * otherwise users might think "-accel tcg,tb-size" is not honoured.
+     * For avg host size we use the precise numbers from tb_tree_stats though.
+     */
+    g_string_append_printf(buf, "gen code size       %zu/%zu\n",
+                           tcg_code_size(), tcg_code_capacity());
+    g_string_append_printf(buf, "TB count            %zu\n", nb_tbs);
+    g_string_append_printf(buf, "TB avg target size  %zu max=%zu bytes\n",
+                           nb_tbs ? tst.target_size / nb_tbs : 0,
+                           tst.max_target_size);
+    g_string_append_printf(buf, "TB avg host size    %zu bytes "
+                           "(expansion ratio: %0.1f)\n",
+                           nb_tbs ? tst.host_size / nb_tbs : 0,
+                           tst.target_size ?
+                           (double)tst.host_size / tst.target_size : 0);
+    g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
+                           tst.cross_page,
+                           nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
+    g_string_append_printf(buf, "direct jump count   %zu (%zu%%) "
+                           "(2 jumps=%zu %zu%%)\n",
+                           tst.direct_jmp_count,
+                           nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
+                           tst.direct_jmp2_count,
+                           nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
+
+    qht_statistics_init(&tb_ctx.htable, &hst);
+    print_qht_statistics(hst, buf);
+    qht_statistics_destroy(&hst);
+
+    g_string_append_printf(buf, "\nStatistics:\n");
+    g_string_append_printf(buf, "TB flush count      %u\n",
+                           qatomic_read(&tb_ctx.tb_flush_count));
+    g_string_append_printf(buf, "TB invalidate count %u\n",
+                           qatomic_read(&tb_ctx.tb_phys_invalidate_count));
+
+    tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
+    g_string_append_printf(buf, "TLB full flushes    %zu\n", flush_full);
+    g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
+    g_string_append_printf(buf, "TLB elided flushes  %zu\n", flush_elide);
+    tcg_dump_info(buf);
+}
+
 HumanReadableText *qmp_x_query_jit(Error **errp)
 HumanReadableText *qmp_x_query_jit(Error **errp)
 {
 {
     g_autoptr(GString) buf = g_string_new("");
     g_autoptr(GString) buf = g_string_new("");
@@ -45,12 +208,18 @@ HumanReadableText *qmp_x_query_jit(Error **errp)
         return NULL;
         return NULL;
     }
     }
 
 
+    dump_accel_info(buf);
     dump_exec_info(buf);
     dump_exec_info(buf);
     dump_drift_info(buf);
     dump_drift_info(buf);
 
 
     return human_readable_text_from_str(buf);
     return human_readable_text_from_str(buf);
 }
 }
 
 
+static void tcg_dump_op_count(GString *buf)
+{
+    g_string_append_printf(buf, "[TCG profiler not compiled]\n");
+}
+
 HumanReadableText *qmp_x_query_opcount(Error **errp)
 HumanReadableText *qmp_x_query_opcount(Error **errp)
 {
 {
     g_autoptr(GString) buf = g_string_new("");
     g_autoptr(GString) buf = g_string_new("");
@@ -66,37 +235,6 @@ HumanReadableText *qmp_x_query_opcount(Error **errp)
     return human_readable_text_from_str(buf);
     return human_readable_text_from_str(buf);
 }
 }
 
 
-#ifdef CONFIG_PROFILER
-
-int64_t dev_time;
-
-HumanReadableText *qmp_x_query_profile(Error **errp)
-{
-    g_autoptr(GString) buf = g_string_new("");
-    static int64_t last_cpu_exec_time;
-    int64_t cpu_exec_time;
-    int64_t delta;
-
-    cpu_exec_time = tcg_cpu_exec_time();
-    delta = cpu_exec_time - last_cpu_exec_time;
-
-    g_string_append_printf(buf, "async time  %" PRId64 " (%0.3f)\n",
-                           dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
-    g_string_append_printf(buf, "qemu time   %" PRId64 " (%0.3f)\n",
-                           delta, delta / (double)NANOSECONDS_PER_SECOND);
-    last_cpu_exec_time = cpu_exec_time;
-    dev_time = 0;
-
-    return human_readable_text_from_str(buf);
-}
-#else
-HumanReadableText *qmp_x_query_profile(Error **errp)
-{
-    error_setg(errp, "Internal profiler not compiled");
-    return NULL;
-}
-#endif
-
 static void hmp_tcg_register(void)
 static void hmp_tcg_register(void)
 {
 {
     monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);
     monitor_register_hmp_info_hrt("jit", qmp_x_query_jit);

Файлын зөрүү хэтэрхий том тул дарагдсан байна
+ 308 - 781
accel/tcg/plugin-gen.c


+ 0 - 4
accel/tcg/plugin-helpers.h

@@ -1,4 +0,0 @@
-#ifdef CONFIG_PLUGIN
-DEF_HELPER_FLAGS_2(plugin_vcpu_udata_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, ptr)
-DEF_HELPER_FLAGS_4(plugin_vcpu_mem_cb, TCG_CALL_NO_RWG | TCG_CALL_PLUGIN, void, i32, i32, i64, ptr)
-#endif

+ 8 - 8
accel/tcg/tb-hash.h

@@ -35,16 +35,16 @@
 #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
 #define TB_JMP_ADDR_MASK (TB_JMP_PAGE_SIZE - 1)
 #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
 #define TB_JMP_PAGE_MASK (TB_JMP_CACHE_SIZE - TB_JMP_PAGE_SIZE)
 
 
-static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
+static inline unsigned int tb_jmp_cache_hash_page(vaddr pc)
 {
 {
-    target_ulong tmp;
+    vaddr tmp;
     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
     return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
     return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
 }
 }
 
 
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
 {
 {
-    target_ulong tmp;
+    vaddr tmp;
     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
     tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
     return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
     return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
            | (tmp & TB_JMP_ADDR_MASK));
            | (tmp & TB_JMP_ADDR_MASK));
@@ -53,7 +53,7 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
 #else
 #else
 
 
 /* In user-mode we can get better hashing because we do not have a TLB */
 /* In user-mode we can get better hashing because we do not have a TLB */
-static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
+static inline unsigned int tb_jmp_cache_hash_func(vaddr pc)
 {
 {
     return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
     return (pc ^ (pc >> TB_JMP_CACHE_BITS)) & (TB_JMP_CACHE_SIZE - 1);
 }
 }
@@ -61,10 +61,10 @@ static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
 #endif /* CONFIG_SOFTMMU */
 #endif /* CONFIG_SOFTMMU */
 
 
 static inline
 static inline
-uint32_t tb_hash_func(tb_page_addr_t phys_pc, target_ulong pc, uint32_t flags,
-                      uint32_t cf_mask, uint32_t trace_vcpu_dstate)
+uint32_t tb_hash_func(tb_page_addr_t phys_pc, vaddr pc,
+                      uint32_t flags, uint64_t flags2, uint32_t cf_mask)
 {
 {
-    return qemu_xxhash7(phys_pc, pc, flags, cf_mask, trace_vcpu_dstate);
+    return qemu_xxhash8(phys_pc, pc, flags2, flags, cf_mask);
 }
 }
 
 
 #endif
 #endif

+ 11 - 6
accel/tcg/tb-jmp-cache.h

@@ -9,20 +9,25 @@
 #ifndef ACCEL_TCG_TB_JMP_CACHE_H
 #ifndef ACCEL_TCG_TB_JMP_CACHE_H
 #define ACCEL_TCG_TB_JMP_CACHE_H
 #define ACCEL_TCG_TB_JMP_CACHE_H
 
 
+#include "qemu/rcu.h"
+#include "exec/cpu-common.h"
+
 #define TB_JMP_CACHE_BITS 12
 #define TB_JMP_CACHE_BITS 12
 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
 #define TB_JMP_CACHE_SIZE (1 << TB_JMP_CACHE_BITS)
 
 
 /*
 /*
- * Accessed in parallel; all accesses to 'tb' must be atomic.
- * For CF_PCREL, accesses to 'pc' must be protected by a
- * load_acquire/store_release to 'tb'.
+ * Invalidated in parallel; all accesses to 'tb' must be atomic.
+ * A valid entry is read/written by a single CPU, therefore there is
+ * no need for qatomic_rcu_read() and pc is always consistent with a
+ * non-NULL value of 'tb'.  Strictly speaking pc is only needed for
+ * CF_PCREL, but it's used always for simplicity.
  */
  */
-struct CPUJumpCache {
+typedef struct CPUJumpCache {
     struct rcu_head rcu;
     struct rcu_head rcu;
     struct {
     struct {
         TranslationBlock *tb;
         TranslationBlock *tb;
-        target_ulong pc;
+        vaddr pc;
     } array[TB_JMP_CACHE_SIZE];
     } array[TB_JMP_CACHE_SIZE];
-};
+} CPUJumpCache;
 
 
 #endif /* ACCEL_TCG_TB_JMP_CACHE_H */
 #endif /* ACCEL_TCG_TB_JMP_CACHE_H */

+ 150 - 153
accel/tcg/tb-maint.c

@@ -1,5 +1,5 @@
 /*
 /*
- * Translation Block Maintaince
+ * Translation Block Maintenance
  *
  *
  *  Copyright (c) 2003 Fabrice Bellard
  *  Copyright (c) 2003 Fabrice Bellard
  *
  *
@@ -23,6 +23,7 @@
 #include "exec/cputlb.h"
 #include "exec/cputlb.h"
 #include "exec/log.h"
 #include "exec/log.h"
 #include "exec/exec-all.h"
 #include "exec/exec-all.h"
+#include "exec/page-protection.h"
 #include "exec/tb-flush.h"
 #include "exec/tb-flush.h"
 #include "exec/translate-all.h"
 #include "exec/translate-all.h"
 #include "sysemu/tcg.h"
 #include "sysemu/tcg.h"
@@ -30,7 +31,8 @@
 #include "tcg/tcg-apple-jit.h"
 #include "tcg/tcg-apple-jit.h"
 #include "tb-hash.h"
 #include "tb-hash.h"
 #include "tb-context.h"
 #include "tb-context.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
 
 
 
 
 /* List iterators for lists of tagged pointers in TranslationBlock. */
 /* List iterators for lists of tagged pointers in TranslationBlock. */
@@ -51,7 +53,6 @@ static bool tb_cmp(const void *ap, const void *bp)
             a->cs_base == b->cs_base &&
             a->cs_base == b->cs_base &&
             a->flags == b->flags &&
             a->flags == b->flags &&
             (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
             (tb_cflags(a) & ~CF_INVALID) == (tb_cflags(b) & ~CF_INVALID) &&
-            a->trace_vcpu_dstate == b->trace_vcpu_dstate &&
             tb_page_addr0(a) == tb_page_addr0(b) &&
             tb_page_addr0(a) == tb_page_addr0(b) &&
             tb_page_addr1(a) == tb_page_addr1(b));
             tb_page_addr1(a) == tb_page_addr1(b));
 }
 }
@@ -72,17 +73,7 @@ typedef struct PageDesc PageDesc;
  */
  */
 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
 #define assert_page_locked(pd) tcg_debug_assert(have_mmap_lock())
 
 
-static inline void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
-                                  PageDesc **ret_p2, tb_page_addr_t phys2,
-                                  bool alloc)
-{
-    *ret_p1 = NULL;
-    *ret_p2 = NULL;
-}
-
-static inline void page_unlock(PageDesc *pd) { }
-static inline void page_lock_tb(const TranslationBlock *tb) { }
-static inline void page_unlock_tb(const TranslationBlock *tb) { }
+static inline void tb_lock_pages(const TranslationBlock *tb) { }
 
 
 /*
 /*
  * For user-only, since we are protecting all of memory with a single lock,
  * For user-only, since we are protecting all of memory with a single lock,
@@ -98,9 +89,9 @@ static void tb_remove_all(void)
 }
 }
 
 
 /* Call with mmap_lock held. */
 /* Call with mmap_lock held. */
-static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
+static void tb_record(TranslationBlock *tb)
 {
 {
-    target_ulong addr;
+    vaddr addr;
     int flags;
     int flags;
 
 
     assert_memory_lock();
     assert_memory_lock();
@@ -219,13 +210,12 @@ static PageDesc *page_find_alloc(tb_page_addr_t index, bool alloc)
 {
 {
     PageDesc *pd;
     PageDesc *pd;
     void **lp;
     void **lp;
-    int i;
 
 
     /* Level 1.  Always allocated.  */
     /* Level 1.  Always allocated.  */
     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
     lp = l1_map + ((index >> v_l1_shift) & (v_l1_size - 1));
 
 
     /* Level 2..N-1.  */
     /* Level 2..N-1.  */
-    for (i = v_l2_levels; i > 0; i--) {
+    for (int i = v_l2_levels; i > 0; i--) {
         void **p = qatomic_rcu_read(lp);
         void **p = qatomic_rcu_read(lp);
 
 
         if (p == NULL) {
         if (p == NULL) {
@@ -393,12 +383,108 @@ static void page_lock(PageDesc *pd)
     qemu_spin_lock(&pd->lock);
     qemu_spin_lock(&pd->lock);
 }
 }
 
 
+/* Like qemu_spin_trylock, returns false on success */
+static bool page_trylock(PageDesc *pd)
+{
+    bool busy = qemu_spin_trylock(&pd->lock);
+    if (!busy) {
+        page_lock__debug(pd);
+    }
+    return busy;
+}
+
 static void page_unlock(PageDesc *pd)
 static void page_unlock(PageDesc *pd)
 {
 {
     qemu_spin_unlock(&pd->lock);
     qemu_spin_unlock(&pd->lock);
     page_unlock__debug(pd);
     page_unlock__debug(pd);
 }
 }
 
 
+void tb_lock_page0(tb_page_addr_t paddr)
+{
+    page_lock(page_find_alloc(paddr >> TARGET_PAGE_BITS, true));
+}
+
+void tb_lock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
+{
+    tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+    tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+    PageDesc *pd0, *pd1;
+
+    if (pindex0 == pindex1) {
+        /* Identical pages, and the first page is already locked. */
+        return;
+    }
+
+    pd1 = page_find_alloc(pindex1, true);
+    if (pindex0 < pindex1) {
+        /* Correct locking order, we may block. */
+        page_lock(pd1);
+        return;
+    }
+
+    /* Incorrect locking order, we cannot block lest we deadlock. */
+    if (!page_trylock(pd1)) {
+        return;
+    }
+
+    /*
+     * Drop the lock on page0 and get both page locks in the right order.
+     * Restart translation via longjmp.
+     */
+    pd0 = page_find_alloc(pindex0, false);
+    page_unlock(pd0);
+    page_lock(pd1);
+    page_lock(pd0);
+    siglongjmp(tcg_ctx->jmp_trans, -3);
+}
+
+void tb_unlock_page1(tb_page_addr_t paddr0, tb_page_addr_t paddr1)
+{
+    tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+    tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+
+    if (pindex0 != pindex1) {
+        page_unlock(page_find_alloc(pindex1, false));
+    }
+}
+
+static void tb_lock_pages(TranslationBlock *tb)
+{
+    tb_page_addr_t paddr0 = tb_page_addr0(tb);
+    tb_page_addr_t paddr1 = tb_page_addr1(tb);
+    tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+    tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+
+    if (unlikely(paddr0 == -1)) {
+        return;
+    }
+    if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
+        if (pindex0 < pindex1) {
+            page_lock(page_find_alloc(pindex0, true));
+            page_lock(page_find_alloc(pindex1, true));
+            return;
+        }
+        page_lock(page_find_alloc(pindex1, true));
+    }
+    page_lock(page_find_alloc(pindex0, true));
+}
+
+void tb_unlock_pages(TranslationBlock *tb)
+{
+    tb_page_addr_t paddr0 = tb_page_addr0(tb);
+    tb_page_addr_t paddr1 = tb_page_addr1(tb);
+    tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+    tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+
+    if (unlikely(paddr0 == -1)) {
+        return;
+    }
+    if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
+        page_unlock(page_find_alloc(pindex1, false));
+    }
+    page_unlock(page_find_alloc(pindex0, false));
+}
+
 static inline struct page_entry *
 static inline struct page_entry *
 page_entry_new(PageDesc *pd, tb_page_addr_t index)
 page_entry_new(PageDesc *pd, tb_page_addr_t index)
 {
 {
@@ -422,13 +508,10 @@ static void page_entry_destroy(gpointer p)
 /* returns false on success */
 /* returns false on success */
 static bool page_entry_trylock(struct page_entry *pe)
 static bool page_entry_trylock(struct page_entry *pe)
 {
 {
-    bool busy;
-
-    busy = qemu_spin_trylock(&pe->pd->lock);
+    bool busy = page_trylock(pe->pd);
     if (!busy) {
     if (!busy) {
         g_assert(!pe->locked);
         g_assert(!pe->locked);
         pe->locked = true;
         pe->locked = true;
-        page_lock__debug(pe->pd);
     }
     }
     return busy;
     return busy;
 }
 }
@@ -606,8 +689,7 @@ static void tb_remove_all(void)
  * Add the tb in the target page and protect it if necessary.
  * Add the tb in the target page and protect it if necessary.
  * Called with @p->lock held.
  * Called with @p->lock held.
  */
  */
-static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
-                               unsigned int n)
+static void tb_page_add(PageDesc *p, TranslationBlock *tb, unsigned int n)
 {
 {
     bool page_already_protected;
     bool page_already_protected;
 
 
@@ -627,15 +709,21 @@ static inline void tb_page_add(PageDesc *p, TranslationBlock *tb,
     }
     }
 }
 }
 
 
-static void tb_record(TranslationBlock *tb, PageDesc *p1, PageDesc *p2)
+static void tb_record(TranslationBlock *tb)
 {
 {
-    tb_page_add(p1, tb, 0);
-    if (unlikely(p2)) {
-        tb_page_add(p2, tb, 1);
+    tb_page_addr_t paddr0 = tb_page_addr0(tb);
+    tb_page_addr_t paddr1 = tb_page_addr1(tb);
+    tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+    tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+
+    assert(paddr0 != -1);
+    if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
+        tb_page_add(page_find_alloc(pindex1, false), tb, 1);
     }
     }
+    tb_page_add(page_find_alloc(pindex0, false), tb, 0);
 }
 }
 
 
-static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
+static void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
 {
 {
     TranslationBlock *tb1;
     TranslationBlock *tb1;
     uintptr_t *pprev;
     uintptr_t *pprev;
@@ -655,74 +743,16 @@ static inline void tb_page_remove(PageDesc *pd, TranslationBlock *tb)
 
 
 static void tb_remove(TranslationBlock *tb)
 static void tb_remove(TranslationBlock *tb)
 {
 {
-    PageDesc *pd;
-
-    pd = page_find(tb->page_addr[0] >> TARGET_PAGE_BITS);
-    tb_page_remove(pd, tb);
-    if (unlikely(tb->page_addr[1] != -1)) {
-        pd = page_find(tb->page_addr[1] >> TARGET_PAGE_BITS);
-        tb_page_remove(pd, tb);
-    }
-}
-
-static void page_lock_pair(PageDesc **ret_p1, tb_page_addr_t phys1,
-                           PageDesc **ret_p2, tb_page_addr_t phys2, bool alloc)
-{
-    PageDesc *p1, *p2;
-    tb_page_addr_t page1;
-    tb_page_addr_t page2;
-
-    assert_memory_lock();
-    g_assert(phys1 != -1);
-
-    page1 = phys1 >> TARGET_PAGE_BITS;
-    page2 = phys2 >> TARGET_PAGE_BITS;
-
-    p1 = page_find_alloc(page1, alloc);
-    if (ret_p1) {
-        *ret_p1 = p1;
-    }
-    if (likely(phys2 == -1)) {
-        page_lock(p1);
-        return;
-    } else if (page1 == page2) {
-        page_lock(p1);
-        if (ret_p2) {
-            *ret_p2 = p1;
-        }
-        return;
-    }
-    p2 = page_find_alloc(page2, alloc);
-    if (ret_p2) {
-        *ret_p2 = p2;
-    }
-    if (page1 < page2) {
-        page_lock(p1);
-        page_lock(p2);
-    } else {
-        page_lock(p2);
-        page_lock(p1);
-    }
-}
-
-/* lock the page(s) of a TB in the correct acquisition order */
-static void page_lock_tb(const TranslationBlock *tb)
-{
-    page_lock_pair(NULL, tb_page_addr0(tb), NULL, tb_page_addr1(tb), false);
-}
-
-static void page_unlock_tb(const TranslationBlock *tb)
-{
-    PageDesc *p1 = page_find(tb_page_addr0(tb) >> TARGET_PAGE_BITS);
-
-    page_unlock(p1);
-    if (unlikely(tb_page_addr1(tb) != -1)) {
-        PageDesc *p2 = page_find(tb_page_addr1(tb) >> TARGET_PAGE_BITS);
-
-        if (p2 != p1) {
-            page_unlock(p2);
-        }
+    tb_page_addr_t paddr0 = tb_page_addr0(tb);
+    tb_page_addr_t paddr1 = tb_page_addr1(tb);
+    tb_page_addr_t pindex0 = paddr0 >> TARGET_PAGE_BITS;
+    tb_page_addr_t pindex1 = paddr1 >> TARGET_PAGE_BITS;
+
+    assert(paddr0 != -1);
+    if (unlikely(paddr1 != -1) && pindex0 != pindex1) {
+        tb_page_remove(page_find_alloc(pindex1, false), tb);
     }
     }
+    tb_page_remove(page_find_alloc(pindex0, false), tb);
 }
 }
 #endif /* CONFIG_USER_ONLY */
 #endif /* CONFIG_USER_ONLY */
 
 
@@ -747,7 +777,7 @@ static void do_tb_flush(CPUState *cpu, run_on_cpu_data tb_flush_count)
 
 
     tcg_region_reset_all();
     tcg_region_reset_all();
     /* XXX: flush processor icache at this point if cache flush is expensive */
     /* XXX: flush processor icache at this point if cache flush is expensive */
-    qatomic_mb_set(&tb_ctx.tb_flush_count, tb_ctx.tb_flush_count + 1);
+    qatomic_inc(&tb_ctx.tb_flush_count);
 
 
 done:
 done:
     mmap_unlock();
     mmap_unlock();
@@ -759,9 +789,9 @@ done:
 void tb_flush(CPUState *cpu)
 void tb_flush(CPUState *cpu)
 {
 {
     if (tcg_enabled()) {
     if (tcg_enabled()) {
-        unsigned tb_flush_count = qatomic_mb_read(&tb_ctx.tb_flush_count);
+        unsigned tb_flush_count = qatomic_read(&tb_ctx.tb_flush_count);
 
 
-        if (cpu_in_exclusive_context(cpu)) {
+        if (cpu_in_serial_context(cpu)) {
             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
             do_tb_flush(cpu, RUN_ON_CPU_HOST_INT(tb_flush_count));
         } else {
         } else {
             async_safe_run_on_cpu(cpu, do_tb_flush,
             async_safe_run_on_cpu(cpu, do_tb_flush,
@@ -889,7 +919,7 @@ static void do_tb_phys_invalidate(TranslationBlock *tb, bool rm_from_page_list)
     /* remove the TB from the hash list */
     /* remove the TB from the hash list */
     phys_pc = tb_page_addr0(tb);
     phys_pc = tb_page_addr0(tb);
     h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc),
     h = tb_hash_func(phys_pc, (orig_cflags & CF_PCREL ? 0 : tb->pc),
-                     tb->flags, orig_cflags, tb->trace_vcpu_dstate);
+                     tb->flags, tb->cs_base, orig_cflags);
     if (!qht_remove(&tb_ctx.htable, tb, h)) {
     if (!qht_remove(&tb_ctx.htable, tb, h)) {
         return;
         return;
     }
     }
@@ -927,18 +957,16 @@ static void tb_phys_invalidate__locked(TranslationBlock *tb)
 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
 void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
 {
 {
     if (page_addr == -1 && tb_page_addr0(tb) != -1) {
     if (page_addr == -1 && tb_page_addr0(tb) != -1) {
-        page_lock_tb(tb);
+        tb_lock_pages(tb);
         do_tb_phys_invalidate(tb, true);
         do_tb_phys_invalidate(tb, true);
-        page_unlock_tb(tb);
+        tb_unlock_pages(tb);
     } else {
     } else {
         do_tb_phys_invalidate(tb, false);
         do_tb_phys_invalidate(tb, false);
     }
     }
 }
 }
 
 
 /*
 /*
- * Add a new TB and link it to the physical page tables. phys_page2 is
- * (-1) to indicate that only one page contains the TB.
- *
+ * Add a new TB and link it to the physical page tables.
  * Called with mmap_lock held for user-mode emulation.
  * Called with mmap_lock held for user-mode emulation.
  *
  *
  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
  * Returns a pointer @tb, or a pointer to an existing TB that matches @tb.
@@ -946,43 +974,29 @@ void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr)
  * for the same block of guest code that @tb corresponds to. In that case,
  * for the same block of guest code that @tb corresponds to. In that case,
  * the caller should discard the original @tb, and use instead the returned TB.
  * the caller should discard the original @tb, and use instead the returned TB.
  */
  */
-TranslationBlock *tb_link_page(TranslationBlock *tb, tb_page_addr_t phys_pc,
-                               tb_page_addr_t phys_page2)
+TranslationBlock *tb_link_page(TranslationBlock *tb)
 {
 {
-    PageDesc *p;
-    PageDesc *p2 = NULL;
     void *existing_tb = NULL;
     void *existing_tb = NULL;
     uint32_t h;
     uint32_t h;
 
 
     assert_memory_lock();
     assert_memory_lock();
     tcg_debug_assert(!(tb->cflags & CF_INVALID));
     tcg_debug_assert(!(tb->cflags & CF_INVALID));
 
 
-    /*
-     * Add the TB to the page list, acquiring first the pages's locks.
-     * We keep the locks held until after inserting the TB in the hash table,
-     * so that if the insertion fails we know for sure that the TBs are still
-     * in the page descriptors.
-     * Note that inserting into the hash table first isn't an option, since
-     * we can only insert TBs that are fully initialized.
-     */
-    page_lock_pair(&p, phys_pc, &p2, phys_page2, true);
-    tb_record(tb, p, p2);
+    tb_record(tb);
 
 
     /* add in the hash table */
     /* add in the hash table */
-    h = tb_hash_func(phys_pc, (tb->cflags & CF_PCREL ? 0 : tb->pc),
-                     tb->flags, tb->cflags, tb->trace_vcpu_dstate);
+    h = tb_hash_func(tb_page_addr0(tb), (tb->cflags & CF_PCREL ? 0 : tb->pc),
+                     tb->flags, tb->cs_base, tb->cflags);
     qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
     qht_insert(&tb_ctx.htable, tb, h, &existing_tb);
 
 
     /* remove TB from the page(s) if we couldn't insert it */
     /* remove TB from the page(s) if we couldn't insert it */
     if (unlikely(existing_tb)) {
     if (unlikely(existing_tb)) {
         tb_remove(tb);
         tb_remove(tb);
-        tb = existing_tb;
+        tb_unlock_pages(tb);
+        return existing_tb;
     }
     }
 
 
-    if (p2 && p2 != p) {
-        page_unlock(p2);
-    }
-    page_unlock(p);
+    tb_unlock_pages(tb);
     return tb;
     return tb;
 }
 }
 
 
@@ -1009,7 +1023,7 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
  * Called with mmap_lock held for user-mode emulation
  * Called with mmap_lock held for user-mode emulation
  * NOTE: this function must not be called while a TB is running.
  * NOTE: this function must not be called while a TB is running.
  */
  */
-void tb_invalidate_phys_page(tb_page_addr_t addr)
+static void tb_invalidate_phys_page(tb_page_addr_t addr)
 {
 {
     tb_page_addr_t start, last;
     tb_page_addr_t start, last;
 
 
@@ -1094,6 +1108,9 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
     TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
     TranslationBlock *current_tb = retaddr ? tcg_tb_lookup(retaddr) : NULL;
 #endif /* TARGET_HAS_PRECISE_SMC */
 #endif /* TARGET_HAS_PRECISE_SMC */
 
 
+    /* Range may not cross a page. */
+    tcg_debug_assert(((start ^ last) & TARGET_PAGE_MASK) == 0);
+
     /*
     /*
      * We remove all the TBs in the range [start, last].
      * We remove all the TBs in the range [start, last].
      * XXX: see if in some cases it could be faster to invalidate all the code
      * XXX: see if in some cases it could be faster to invalidate all the code
@@ -1145,28 +1162,6 @@ tb_invalidate_phys_page_range__locked(struct page_collection *pages,
 #endif
 #endif
 }
 }
 
 
-/*
- * Invalidate all TBs which intersect with the target physical
- * address page @addr.
- */
-void tb_invalidate_phys_page(tb_page_addr_t addr)
-{
-    struct page_collection *pages;
-    tb_page_addr_t start, last;
-    PageDesc *p;
-
-    p = page_find(addr >> TARGET_PAGE_BITS);
-    if (p == NULL) {
-        return;
-    }
-
-    start = addr & TARGET_PAGE_MASK;
-    last = addr | ~TARGET_PAGE_MASK;
-    pages = page_collection_lock(start, last);
-    tb_invalidate_phys_page_range__locked(pages, p, start, last, 0);
-    page_collection_unlock(pages);
-}
-
 /*
 /*
  * Invalidate all TBs which intersect with the target physical address range
  * Invalidate all TBs which intersect with the target physical address range
  * [start;last]. NOTE: start and end may refer to *different* physical pages.
  * [start;last]. NOTE: start and end may refer to *different* physical pages.
@@ -1184,15 +1179,17 @@ void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t last)
     index_last = last >> TARGET_PAGE_BITS;
     index_last = last >> TARGET_PAGE_BITS;
     for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
     for (index = start >> TARGET_PAGE_BITS; index <= index_last; index++) {
         PageDesc *pd = page_find(index);
         PageDesc *pd = page_find(index);
-        tb_page_addr_t bound;
+        tb_page_addr_t page_start, page_last;
 
 
         if (pd == NULL) {
         if (pd == NULL) {
             continue;
             continue;
         }
         }
         assert_page_locked(pd);
         assert_page_locked(pd);
-        bound = (index << TARGET_PAGE_BITS) | ~TARGET_PAGE_MASK;
-        bound = MIN(bound, last);
-        tb_invalidate_phys_page_range__locked(pages, pd, start, bound, 0);
+        page_start = index << TARGET_PAGE_BITS;
+        page_last = page_start | ~TARGET_PAGE_MASK;
+        page_last = MIN(page_last, last);
+        tb_invalidate_phys_page_range__locked(pages, pd,
+                                              page_start, page_last, 0);
     }
     }
     page_collection_unlock(pages);
     page_collection_unlock(pages);
 }
 }

+ 24 - 11
accel/tcg/tcg-accel-ops-icount.c

@@ -89,7 +89,20 @@ void icount_handle_deadline(void)
     }
     }
 }
 }
 
 
-void icount_prepare_for_run(CPUState *cpu)
+/* Distribute the budget evenly across all CPUs */
+int64_t icount_percpu_budget(int cpu_count)
+{
+    int64_t limit = icount_get_limit();
+    int64_t timeslice = limit / cpu_count;
+
+    if (timeslice == 0) {
+        timeslice = limit;
+    }
+
+    return timeslice;
+}
+
+void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget)
 {
 {
     int insns_left;
     int insns_left;
 
 
@@ -98,24 +111,24 @@ void icount_prepare_for_run(CPUState *cpu)
      * each vCPU execution. However u16.high can be raised
      * each vCPU execution. However u16.high can be raised
      * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
      * asynchronously by cpu_exit/cpu_interrupt/tcg_handle_interrupt
      */
      */
-    g_assert(cpu_neg(cpu)->icount_decr.u16.low == 0);
+    g_assert(cpu->neg.icount_decr.u16.low == 0);
     g_assert(cpu->icount_extra == 0);
     g_assert(cpu->icount_extra == 0);
 
 
-    cpu->icount_budget = icount_get_limit();
+    replay_mutex_lock();
+
+    cpu->icount_budget = MIN(icount_get_limit(), cpu_budget);
     insns_left = MIN(0xffff, cpu->icount_budget);
     insns_left = MIN(0xffff, cpu->icount_budget);
-    cpu_neg(cpu)->icount_decr.u16.low = insns_left;
+    cpu->neg.icount_decr.u16.low = insns_left;
     cpu->icount_extra = cpu->icount_budget - insns_left;
     cpu->icount_extra = cpu->icount_budget - insns_left;
 
 
-    replay_mutex_lock();
-
     if (cpu->icount_budget == 0) {
     if (cpu->icount_budget == 0) {
         /*
         /*
-         * We're called without the iothread lock, so must take it while
+         * We're called without the BQL, so must take it while
          * we're calling timer handlers.
          * we're calling timer handlers.
          */
          */
-        qemu_mutex_lock_iothread();
+        bql_lock();
         icount_notify_aio_contexts();
         icount_notify_aio_contexts();
-        qemu_mutex_unlock_iothread();
+        bql_unlock();
     }
     }
 }
 }
 
 
@@ -125,7 +138,7 @@ void icount_process_data(CPUState *cpu)
     icount_update(cpu);
     icount_update(cpu);
 
 
     /* Reset the counters */
     /* Reset the counters */
-    cpu_neg(cpu)->icount_decr.u16.low = 0;
+    cpu->neg.icount_decr.u16.low = 0;
     cpu->icount_extra = 0;
     cpu->icount_extra = 0;
     cpu->icount_budget = 0;
     cpu->icount_budget = 0;
 
 
@@ -140,7 +153,7 @@ void icount_handle_interrupt(CPUState *cpu, int mask)
 
 
     tcg_handle_interrupt(cpu, mask);
     tcg_handle_interrupt(cpu, mask);
     if (qemu_cpu_is_self(cpu) &&
     if (qemu_cpu_is_self(cpu) &&
-        !cpu->can_do_io
+        !cpu->neg.can_do_io
         && (mask & ~old_mask) != 0) {
         && (mask & ~old_mask) != 0) {
         cpu_abort(cpu, "Raised interrupt while not in I/O function");
         cpu_abort(cpu, "Raised interrupt while not in I/O function");
     }
     }

+ 2 - 1
accel/tcg/tcg-accel-ops-icount.h

@@ -11,7 +11,8 @@
 #define TCG_ACCEL_OPS_ICOUNT_H
 #define TCG_ACCEL_OPS_ICOUNT_H
 
 
 void icount_handle_deadline(void);
 void icount_handle_deadline(void);
-void icount_prepare_for_run(CPUState *cpu);
+void icount_prepare_for_run(CPUState *cpu, int64_t cpu_budget);
+int64_t icount_percpu_budget(int cpu_count);
 void icount_process_data(CPUState *cpu);
 void icount_process_data(CPUState *cpu);
 
 
 void icount_handle_interrupt(CPUState *cpu, int mask);
 void icount_handle_interrupt(CPUState *cpu, int mask);

+ 13 - 26
accel/tcg/tcg-accel-ops-mttcg.c

@@ -32,7 +32,7 @@
 #include "qemu/guest-random.h"
 #include "qemu/guest-random.h"
 #include "exec/exec-all.h"
 #include "exec/exec-all.h"
 #include "hw/boards.h"
 #include "hw/boards.h"
-
+#include "tcg/startup.h"
 #include "tcg-accel-ops.h"
 #include "tcg-accel-ops.h"
 #include "tcg-accel-ops-mttcg.h"
 #include "tcg-accel-ops-mttcg.h"
 
 
@@ -76,11 +76,11 @@ static void *mttcg_cpu_thread_fn(void *arg)
     rcu_add_force_rcu_notifier(&force_rcu.notifier);
     rcu_add_force_rcu_notifier(&force_rcu.notifier);
     tcg_register_thread();
     tcg_register_thread();
 
 
-    qemu_mutex_lock_iothread();
+    bql_lock();
     qemu_thread_get_self(cpu->thread);
     qemu_thread_get_self(cpu->thread);
 
 
     cpu->thread_id = qemu_get_thread_id();
     cpu->thread_id = qemu_get_thread_id();
-    cpu->can_do_io = 1;
+    cpu->neg.can_do_io = true;
     current_cpu = cpu;
     current_cpu = cpu;
     cpu_thread_signal_created(cpu);
     cpu_thread_signal_created(cpu);
     qemu_guest_random_seed_thread_part2(cpu->random_seed);
     qemu_guest_random_seed_thread_part2(cpu->random_seed);
@@ -91,40 +91,35 @@ static void *mttcg_cpu_thread_fn(void *arg)
     do {
     do {
         if (cpu_can_run(cpu)) {
         if (cpu_can_run(cpu)) {
             int r;
             int r;
-            qemu_mutex_unlock_iothread();
-            r = tcg_cpus_exec(cpu);
-            qemu_mutex_lock_iothread();
+            bql_unlock();
+            r = tcg_cpu_exec(cpu);
+            bql_lock();
             switch (r) {
             switch (r) {
             case EXCP_DEBUG:
             case EXCP_DEBUG:
                 cpu_handle_guest_debug(cpu);
                 cpu_handle_guest_debug(cpu);
                 break;
                 break;
             case EXCP_HALTED:
             case EXCP_HALTED:
                 /*
                 /*
-                 * during start-up the vCPU is reset and the thread is
-                 * kicked several times. If we don't ensure we go back
-                 * to sleep in the halted state we won't cleanly
-                 * start-up when the vCPU is enabled.
-                 *
-                 * cpu->halted should ensure we sleep in wait_io_event
+                 * Usually cpu->halted is set, but may have already been
+                 * reset by another thread by the time we arrive here.
                  */
                  */
-                g_assert(cpu->halted);
                 break;
                 break;
             case EXCP_ATOMIC:
             case EXCP_ATOMIC:
-                qemu_mutex_unlock_iothread();
+                bql_unlock();
                 cpu_exec_step_atomic(cpu);
                 cpu_exec_step_atomic(cpu);
-                qemu_mutex_lock_iothread();
+                bql_lock();
             default:
             default:
                 /* Ignore everything else? */
                 /* Ignore everything else? */
                 break;
                 break;
             }
             }
         }
         }
 
 
-        qatomic_mb_set(&cpu->exit_request, 0);
+        qatomic_set_mb(&cpu->exit_request, 0);
         qemu_wait_io_event(cpu);
         qemu_wait_io_event(cpu);
     } while (!cpu->unplug || cpu_can_run(cpu));
     } while (!cpu->unplug || cpu_can_run(cpu));
 
 
-    tcg_cpus_destroy(cpu);
-    qemu_mutex_unlock_iothread();
+    tcg_cpu_destroy(cpu);
+    bql_unlock();
     rcu_remove_force_rcu_notifier(&force_rcu.notifier);
     rcu_remove_force_rcu_notifier(&force_rcu.notifier);
     rcu_unregister_thread();
     rcu_unregister_thread();
     return NULL;
     return NULL;
@@ -142,18 +137,10 @@ void mttcg_start_vcpu_thread(CPUState *cpu)
     g_assert(tcg_enabled());
     g_assert(tcg_enabled());
     tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
     tcg_cpu_init_cflags(cpu, current_machine->smp.max_cpus > 1);
 
 
-    cpu->thread = g_new0(QemuThread, 1);
-    cpu->halt_cond = g_malloc0(sizeof(QemuCond));
-    qemu_cond_init(cpu->halt_cond);
-
     /* create a thread per vCPU with TCG (MTTCG) */
     /* create a thread per vCPU with TCG (MTTCG) */
     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
     snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "CPU %d/TCG",
              cpu->cpu_index);
              cpu->cpu_index);
 
 
     qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn,
     qemu_thread_create(cpu->thread, thread_name, mttcg_cpu_thread_fn,
                        cpu, QEMU_THREAD_JOINABLE);
                        cpu, QEMU_THREAD_JOINABLE);
-
-#ifdef _WIN32
-    cpu->hThread = qemu_thread_get_handle(cpu->thread);
-#endif
 }
 }

+ 66 - 30
accel/tcg/tcg-accel-ops-rr.c

@@ -24,6 +24,7 @@
  */
  */
 
 
 #include "qemu/osdep.h"
 #include "qemu/osdep.h"
+#include "qemu/lockable.h"
 #include "sysemu/tcg.h"
 #include "sysemu/tcg.h"
 #include "sysemu/replay.h"
 #include "sysemu/replay.h"
 #include "sysemu/cpu-timers.h"
 #include "sysemu/cpu-timers.h"
@@ -31,7 +32,7 @@
 #include "qemu/notify.h"
 #include "qemu/notify.h"
 #include "qemu/guest-random.h"
 #include "qemu/guest-random.h"
 #include "exec/exec-all.h"
 #include "exec/exec-all.h"
-
+#include "tcg/startup.h"
 #include "tcg-accel-ops.h"
 #include "tcg-accel-ops.h"
 #include "tcg-accel-ops-rr.h"
 #include "tcg-accel-ops-rr.h"
 #include "tcg-accel-ops-icount.h"
 #include "tcg-accel-ops-icount.h"
@@ -71,11 +72,13 @@ static void rr_kick_next_cpu(void)
 {
 {
     CPUState *cpu;
     CPUState *cpu;
     do {
     do {
-        cpu = qatomic_mb_read(&rr_current_cpu);
+        cpu = qatomic_read(&rr_current_cpu);
         if (cpu) {
         if (cpu) {
             cpu_exit(cpu);
             cpu_exit(cpu);
         }
         }
-    } while (cpu != qatomic_mb_read(&rr_current_cpu));
+        /* Finish kicking this cpu before reading again.  */
+        smp_mb();
+    } while (cpu != qatomic_read(&rr_current_cpu));
 }
 }
 
 
 static void rr_kick_thread(void *opaque)
 static void rr_kick_thread(void *opaque)
@@ -108,7 +111,7 @@ static void rr_wait_io_event(void)
 
 
     while (all_cpu_threads_idle()) {
     while (all_cpu_threads_idle()) {
         rr_stop_kick_timer();
         rr_stop_kick_timer();
-        qemu_cond_wait_iothread(first_cpu->halt_cond);
+        qemu_cond_wait_bql(first_cpu->halt_cond);
     }
     }
 
 
     rr_start_kick_timer();
     rr_start_kick_timer();
@@ -128,7 +131,7 @@ static void rr_deal_with_unplugged_cpus(void)
 
 
     CPU_FOREACH(cpu) {
     CPU_FOREACH(cpu) {
         if (cpu->unplug && !cpu_can_run(cpu)) {
         if (cpu->unplug && !cpu_can_run(cpu)) {
-            tcg_cpus_destroy(cpu);
+            tcg_cpu_destroy(cpu);
             break;
             break;
         }
         }
     }
     }
@@ -139,6 +142,33 @@ static void rr_force_rcu(Notifier *notify, void *data)
     rr_kick_next_cpu();
     rr_kick_next_cpu();
 }
 }
 
 
+/*
+ * Calculate the number of CPUs that we will process in a single iteration of
+ * the main CPU thread loop so that we can fairly distribute the instruction
+ * count across CPUs.
+ *
+ * The CPU count is cached based on the CPU list generation ID to avoid
+ * iterating the list every time.
+ */
+static int rr_cpu_count(void)
+{
+    static unsigned int last_gen_id = ~0;
+    static int cpu_count;
+    CPUState *cpu;
+
+    QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
+
+    if (cpu_list_generation_id_get() != last_gen_id) {
+        cpu_count = 0;
+        CPU_FOREACH(cpu) {
+            ++cpu_count;
+        }
+        last_gen_id = cpu_list_generation_id_get();
+    }
+
+    return cpu_count;
+}
+
 /*
 /*
  * In the single-threaded case each vCPU is simulated in turn. If
  * In the single-threaded case each vCPU is simulated in turn. If
  * there is more than a single vCPU we create a simple timer to kick
  * there is more than a single vCPU we create a simple timer to kick
@@ -158,17 +188,17 @@ static void *rr_cpu_thread_fn(void *arg)
     rcu_add_force_rcu_notifier(&force_rcu);
     rcu_add_force_rcu_notifier(&force_rcu);
     tcg_register_thread();
     tcg_register_thread();
 
 
-    qemu_mutex_lock_iothread();
+    bql_lock();
     qemu_thread_get_self(cpu->thread);
     qemu_thread_get_self(cpu->thread);
 
 
     cpu->thread_id = qemu_get_thread_id();
     cpu->thread_id = qemu_get_thread_id();
-    cpu->can_do_io = 1;
+    cpu->neg.can_do_io = true;
     cpu_thread_signal_created(cpu);
     cpu_thread_signal_created(cpu);
     qemu_guest_random_seed_thread_part2(cpu->random_seed);
     qemu_guest_random_seed_thread_part2(cpu->random_seed);
 
 
     /* wait for initial kick-off after machine start */
     /* wait for initial kick-off after machine start */
     while (first_cpu->stopped) {
     while (first_cpu->stopped) {
-        qemu_cond_wait_iothread(first_cpu->halt_cond);
+        qemu_cond_wait_bql(first_cpu->halt_cond);
 
 
         /* process any pending work */
         /* process any pending work */
         CPU_FOREACH(cpu) {
         CPU_FOREACH(cpu) {
@@ -185,11 +215,16 @@ static void *rr_cpu_thread_fn(void *arg)
     cpu->exit_request = 1;
     cpu->exit_request = 1;
 
 
     while (1) {
     while (1) {
-        qemu_mutex_unlock_iothread();
+        /* Only used for icount_enabled() */
+        int64_t cpu_budget = 0;
+
+        bql_unlock();
         replay_mutex_lock();
         replay_mutex_lock();
-        qemu_mutex_lock_iothread();
+        bql_lock();
 
 
         if (icount_enabled()) {
         if (icount_enabled()) {
+            int cpu_count = rr_cpu_count();
+
             /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
             /* Account partial waits to QEMU_CLOCK_VIRTUAL.  */
             icount_account_warp_timer();
             icount_account_warp_timer();
             /*
             /*
@@ -197,6 +232,8 @@ static void *rr_cpu_thread_fn(void *arg)
              * waking up the I/O thread and waiting for completion.
              * waking up the I/O thread and waiting for completion.
              */
              */
             icount_handle_deadline();
             icount_handle_deadline();
+
+            cpu_budget = icount_percpu_budget(cpu_count);
         }
         }
 
 
         replay_mutex_unlock();
         replay_mutex_unlock();
@@ -206,8 +243,9 @@ static void *rr_cpu_thread_fn(void *arg)
         }
         }
 
 
         while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
         while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
+            /* Store rr_current_cpu before evaluating cpu_can_run().  */
+            qatomic_set_mb(&rr_current_cpu, cpu);
 
 
-            qatomic_mb_set(&rr_current_cpu, cpu);
             current_cpu = cpu;
             current_cpu = cpu;
 
 
             qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
             qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
@@ -216,23 +254,23 @@ static void *rr_cpu_thread_fn(void *arg)
             if (cpu_can_run(cpu)) {
             if (cpu_can_run(cpu)) {
                 int r;
                 int r;
 
 
-                qemu_mutex_unlock_iothread();
+                bql_unlock();
                 if (icount_enabled()) {
                 if (icount_enabled()) {
-                    icount_prepare_for_run(cpu);
+                    icount_prepare_for_run(cpu, cpu_budget);
                 }
                 }
-                r = tcg_cpus_exec(cpu);
+                r = tcg_cpu_exec(cpu);
                 if (icount_enabled()) {
                 if (icount_enabled()) {
                     icount_process_data(cpu);
                     icount_process_data(cpu);
                 }
                 }
-                qemu_mutex_lock_iothread();
+                bql_lock();
 
 
                 if (r == EXCP_DEBUG) {
                 if (r == EXCP_DEBUG) {
                     cpu_handle_guest_debug(cpu);
                     cpu_handle_guest_debug(cpu);
                     break;
                     break;
                 } else if (r == EXCP_ATOMIC) {
                 } else if (r == EXCP_ATOMIC) {
-                    qemu_mutex_unlock_iothread();
+                    bql_unlock();
                     cpu_exec_step_atomic(cpu);
                     cpu_exec_step_atomic(cpu);
-                    qemu_mutex_lock_iothread();
+                    bql_lock();
                     break;
                     break;
                 }
                 }
             } else if (cpu->stop) {
             } else if (cpu->stop) {
@@ -245,11 +283,11 @@ static void *rr_cpu_thread_fn(void *arg)
             cpu = CPU_NEXT(cpu);
             cpu = CPU_NEXT(cpu);
         } /* while (cpu && !cpu->exit_request).. */
         } /* while (cpu && !cpu->exit_request).. */
 
 
-        /* Does not need qatomic_mb_set because a spurious wakeup is okay.  */
+        /* Does not need a memory barrier because a spurious wakeup is okay.  */
         qatomic_set(&rr_current_cpu, NULL);
         qatomic_set(&rr_current_cpu, NULL);
 
 
         if (cpu && cpu->exit_request) {
         if (cpu && cpu->exit_request) {
-            qatomic_mb_set(&cpu->exit_request, 0);
+            qatomic_set_mb(&cpu->exit_request, 0);
         }
         }
 
 
         if (icount_enabled() && all_cpu_threads_idle()) {
         if (icount_enabled() && all_cpu_threads_idle()) {
@@ -279,27 +317,25 @@ void rr_start_vcpu_thread(CPUState *cpu)
     tcg_cpu_init_cflags(cpu, false);
     tcg_cpu_init_cflags(cpu, false);
 
 
     if (!single_tcg_cpu_thread) {
     if (!single_tcg_cpu_thread) {
-        cpu->thread = g_new0(QemuThread, 1);
-        cpu->halt_cond = g_new0(QemuCond, 1);
-        qemu_cond_init(cpu->halt_cond);
+        single_tcg_halt_cond = cpu->halt_cond;
+        single_tcg_cpu_thread = cpu->thread;
 
 
         /* share a single thread for all cpus with TCG */
         /* share a single thread for all cpus with TCG */
         snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
         snprintf(thread_name, VCPU_THREAD_NAME_SIZE, "ALL CPUs/TCG");
         qemu_thread_create(cpu->thread, thread_name,
         qemu_thread_create(cpu->thread, thread_name,
                            rr_cpu_thread_fn,
                            rr_cpu_thread_fn,
                            cpu, QEMU_THREAD_JOINABLE);
                            cpu, QEMU_THREAD_JOINABLE);
-
-        single_tcg_halt_cond = cpu->halt_cond;
-        single_tcg_cpu_thread = cpu->thread;
-#ifdef _WIN32
-        cpu->hThread = qemu_thread_get_handle(cpu->thread);
-#endif
     } else {
     } else {
-        /* we share the thread */
+        /* we share the thread, dump spare data */
+        g_free(cpu->thread);
+        qemu_cond_destroy(cpu->halt_cond);
+        g_free(cpu->halt_cond);
         cpu->thread = single_tcg_cpu_thread;
         cpu->thread = single_tcg_cpu_thread;
         cpu->halt_cond = single_tcg_halt_cond;
         cpu->halt_cond = single_tcg_halt_cond;
+
+        /* copy the stuff done at start of rr_cpu_thread_fn */
         cpu->thread_id = first_cpu->thread_id;
         cpu->thread_id = first_cpu->thread_id;
-        cpu->can_do_io = 1;
+        cpu->neg.can_do_io = 1;
         cpu->created = true;
         cpu->created = true;
     }
     }
 }
 }

+ 17 - 16
accel/tcg/tcg-accel-ops.c

@@ -34,7 +34,10 @@
 #include "qemu/timer.h"
 #include "qemu/timer.h"
 #include "exec/exec-all.h"
 #include "exec/exec-all.h"
 #include "exec/hwaddr.h"
 #include "exec/hwaddr.h"
-#include "exec/gdbstub.h"
+#include "exec/tb-flush.h"
+#include "gdbstub/enums.h"
+
+#include "hw/core/cpu.h"
 
 
 #include "tcg-accel-ops.h"
 #include "tcg-accel-ops.h"
 #include "tcg-accel-ops-mttcg.h"
 #include "tcg-accel-ops-mttcg.h"
@@ -64,38 +67,35 @@ void tcg_cpu_init_cflags(CPUState *cpu, bool parallel)
     // We'll let C handle it, since the overhead is similar.
     // We'll let C handle it, since the overhead is similar.
     cflags |= CF_NO_GOTO_PTR;
     cflags |= CF_NO_GOTO_PTR;
 #endif
 #endif
-    cpu->tcg_cflags |= cflags;
+    tcg_cflags_set(cpu, cflags);
 }
 }
 
 
-void tcg_cpus_destroy(CPUState *cpu)
+void tcg_cpu_destroy(CPUState *cpu)
 {
 {
     cpu_thread_signal_destroyed(cpu);
     cpu_thread_signal_destroyed(cpu);
 }
 }
 
 
-int tcg_cpus_exec(CPUState *cpu)
+int tcg_cpu_exec(CPUState *cpu)
 {
 {
     int ret;
     int ret;
-#ifdef CONFIG_PROFILER
-    int64_t ti;
-#endif
     assert(tcg_enabled());
     assert(tcg_enabled());
-#ifdef CONFIG_PROFILER
-    ti = profile_getclock();
-#endif
     cpu_exec_start(cpu);
     cpu_exec_start(cpu);
     ret = cpu_exec(cpu);
     ret = cpu_exec(cpu);
     cpu_exec_end(cpu);
     cpu_exec_end(cpu);
-#ifdef CONFIG_PROFILER
-    qatomic_set(&tcg_ctx->prof.cpu_exec_time,
-                tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
-#endif
     return ret;
     return ret;
 }
 }
 
 
+static void tcg_cpu_reset_hold(CPUState *cpu)
+{
+    tcg_flush_jmp_cache(cpu);
+
+    tlb_flush(cpu);
+}
+
 /* mask must never be zero, except for A20 change call */
 /* mask must never be zero, except for A20 change call */
 void tcg_handle_interrupt(CPUState *cpu, int mask)
 void tcg_handle_interrupt(CPUState *cpu, int mask)
 {
 {
-    g_assert(qemu_mutex_iothread_locked());
+    g_assert(bql_locked());
 
 
     cpu->interrupt_request |= mask;
     cpu->interrupt_request |= mask;
 
 
@@ -106,7 +106,7 @@ void tcg_handle_interrupt(CPUState *cpu, int mask)
     if (!qemu_cpu_is_self(cpu)) {
     if (!qemu_cpu_is_self(cpu)) {
         qemu_cpu_kick(cpu);
         qemu_cpu_kick(cpu);
     } else {
     } else {
-        qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
+        qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
     }
     }
 }
 }
 
 
@@ -220,6 +220,7 @@ static void tcg_accel_ops_init(AccelOpsClass *ops)
         }
         }
     }
     }
 
 
+    ops->cpu_reset_hold = tcg_cpu_reset_hold;
     ops->supports_guest_debug = tcg_supports_guest_debug;
     ops->supports_guest_debug = tcg_supports_guest_debug;
     ops->insert_breakpoint = tcg_insert_breakpoint;
     ops->insert_breakpoint = tcg_insert_breakpoint;
     ops->remove_breakpoint = tcg_remove_breakpoint;
     ops->remove_breakpoint = tcg_remove_breakpoint;

+ 2 - 2
accel/tcg/tcg-accel-ops.h

@@ -14,8 +14,8 @@
 
 
 #include "sysemu/cpus.h"
 #include "sysemu/cpus.h"
 
 
-void tcg_cpus_destroy(CPUState *cpu);
-int tcg_cpus_exec(CPUState *cpu);
+void tcg_cpu_destroy(CPUState *cpu);
+int tcg_cpu_exec(CPUState *cpu);
 void tcg_handle_interrupt(CPUState *cpu, int mask);
 void tcg_handle_interrupt(CPUState *cpu, int mask);
 void tcg_cpu_init_cflags(CPUState *cpu, bool parallel);
 void tcg_cpu_init_cflags(CPUState *cpu, bool parallel);
 
 

+ 37 - 30
accel/tcg/tcg-all.c

@@ -27,21 +27,24 @@
 #include "sysemu/tcg.h"
 #include "sysemu/tcg.h"
 #include "exec/replay-core.h"
 #include "exec/replay-core.h"
 #include "sysemu/cpu-timers.h"
 #include "sysemu/cpu-timers.h"
-#include "tcg/tcg.h"
+#include "tcg/startup.h"
+#include "tcg/oversized-guest.h"
 #include "qapi/error.h"
 #include "qapi/error.h"
 #include "qemu/error-report.h"
 #include "qemu/error-report.h"
 #include "qemu/accel.h"
 #include "qemu/accel.h"
+#include "qemu/atomic.h"
 #include "qapi/qapi-builtin-visit.h"
 #include "qapi/qapi-builtin-visit.h"
 #include "qemu/units.h"
 #include "qemu/units.h"
 #if !defined(CONFIG_USER_ONLY)
 #if !defined(CONFIG_USER_ONLY)
 #include "hw/boards.h"
 #include "hw/boards.h"
 #endif
 #endif
-#include "internal.h"
+#include "internal-common.h"
 
 
 struct TCGState {
 struct TCGState {
     AccelState parent_obj;
     AccelState parent_obj;
 
 
     bool mttcg_enabled;
     bool mttcg_enabled;
+    bool one_insn_per_tb;
     int splitwx_enabled;
     int splitwx_enabled;
     unsigned long tb_size;
     unsigned long tb_size;
 };
 };
@@ -61,37 +64,23 @@ DECLARE_INSTANCE_CHECKER(TCGState, TCG_STATE,
  * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
  * they can set the appropriate CONFIG flags in ${target}-softmmu.mak
  *
  *
  * Once a guest architecture has been converted to the new primitives
  * Once a guest architecture has been converted to the new primitives
- * there are two remaining limitations to check.
- *
- * - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
- * - The host must have a stronger memory order than the guest
- *
- * It may be possible in future to support strong guests on weak hosts
- * but that will require tagging all load/stores in a guest with their
- * implicit memory order requirements which would likely slow things
- * down a lot.
+ * there is one remaining limitation to check:
+ *   - The guest can't be oversized (e.g. 64 bit guest on 32 bit host)
  */
  */
 
 
-static bool check_tcg_memory_orders_compatible(void)
-{
-#if defined(TCG_GUEST_DEFAULT_MO) && defined(TCG_TARGET_DEFAULT_MO)
-    return (TCG_GUEST_DEFAULT_MO & ~TCG_TARGET_DEFAULT_MO) == 0;
-#else
-    return false;
-#endif
-}
-
 static bool default_mttcg_enabled(void)
 static bool default_mttcg_enabled(void)
 {
 {
     if (icount_enabled() || TCG_OVERSIZED_GUEST) {
     if (icount_enabled() || TCG_OVERSIZED_GUEST) {
         return false;
         return false;
-    } else {
+    }
 #ifdef TARGET_SUPPORTS_MTTCG
 #ifdef TARGET_SUPPORTS_MTTCG
-        return check_tcg_memory_orders_compatible();
+# ifndef TCG_GUEST_DEFAULT_MO
+#  error "TARGET_SUPPORTS_MTTCG without TCG_GUEST_DEFAULT_MO"
+# endif
+    return true;
 #else
 #else
-        return false;
+    return false;
 #endif
 #endif
-    }
 }
 }
 
 
 static void tcg_accel_instance_init(Object *obj)
 static void tcg_accel_instance_init(Object *obj)
@@ -109,6 +98,7 @@ static void tcg_accel_instance_init(Object *obj)
 }
 }
 
 
 bool mttcg_enabled;
 bool mttcg_enabled;
+bool one_insn_per_tb;
 
 
 static int tcg_init_machine(MachineState *ms)
 static int tcg_init_machine(MachineState *ms)
 {
 {
@@ -131,7 +121,7 @@ static int tcg_init_machine(MachineState *ms)
      * There's no guest base to take into account, so go ahead and
      * There's no guest base to take into account, so go ahead and
      * initialize the prologue now.
      * initialize the prologue now.
      */
      */
-    tcg_prologue_init(tcg_ctx);
+    tcg_prologue_init();
 #endif
 #endif
 
 
     return 0;
     return 0;
@@ -158,11 +148,6 @@ static void tcg_set_thread(Object *obj, const char *value, Error **errp)
             warn_report("Guest not yet converted to MTTCG - "
             warn_report("Guest not yet converted to MTTCG - "
                         "you may get unexpected results");
                         "you may get unexpected results");
 #endif
 #endif
-            if (!check_tcg_memory_orders_compatible()) {
-                warn_report("Guest expects a stronger memory ordering "
-                            "than the host provides");
-                error_printf("This may cause strange/hard to debug errors\n");
-            }
             s->mttcg_enabled = true;
             s->mttcg_enabled = true;
         }
         }
     } else if (strcmp(value, "single") == 0) {
     } else if (strcmp(value, "single") == 0) {
@@ -208,6 +193,20 @@ static void tcg_set_splitwx(Object *obj, bool value, Error **errp)
     s->splitwx_enabled = value;
     s->splitwx_enabled = value;
 }
 }
 
 
+static bool tcg_get_one_insn_per_tb(Object *obj, Error **errp)
+{
+    TCGState *s = TCG_STATE(obj);
+    return s->one_insn_per_tb;
+}
+
+static void tcg_set_one_insn_per_tb(Object *obj, bool value, Error **errp)
+{
+    TCGState *s = TCG_STATE(obj);
+    s->one_insn_per_tb = value;
+    /* Set the global also: this changes the behaviour */
+    qatomic_set(&one_insn_per_tb, value);
+}
+
 static int tcg_gdbstub_supported_sstep_flags(void)
 static int tcg_gdbstub_supported_sstep_flags(void)
 {
 {
     /*
     /*
@@ -228,6 +227,8 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
     AccelClass *ac = ACCEL_CLASS(oc);
     AccelClass *ac = ACCEL_CLASS(oc);
     ac->name = "tcg";
     ac->name = "tcg";
     ac->init_machine = tcg_init_machine;
     ac->init_machine = tcg_init_machine;
+    ac->cpu_common_realize = tcg_exec_realizefn;
+    ac->cpu_common_unrealize = tcg_exec_unrealizefn;
     ac->allowed = &tcg_allowed;
     ac->allowed = &tcg_allowed;
     ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
     ac->gdbstub_supported_sstep_flags = tcg_gdbstub_supported_sstep_flags;
 
 
@@ -245,6 +246,12 @@ static void tcg_accel_class_init(ObjectClass *oc, void *data)
         tcg_get_splitwx, tcg_set_splitwx);
         tcg_get_splitwx, tcg_set_splitwx);
     object_class_property_set_description(oc, "split-wx",
     object_class_property_set_description(oc, "split-wx",
         "Map jit pages into separate RW and RX regions");
         "Map jit pages into separate RW and RX regions");
+
+    object_class_property_add_bool(oc, "one-insn-per-tb",
+                                   tcg_get_one_insn_per_tb,
+                                   tcg_set_one_insn_per_tb);
+    object_class_property_set_description(oc, "one-insn-per-tb",
+        "Only put one guest insn in each translation block");
 }
 }
 
 
 static const TypeInfo tcg_accel_type = {
 static const TypeInfo tcg_accel_type = {

+ 38 - 1
accel/tcg/tcg-runtime-gvec.c

@@ -20,7 +20,7 @@
 #include "qemu/osdep.h"
 #include "qemu/osdep.h"
 #include "qemu/host-utils.h"
 #include "qemu/host-utils.h"
 #include "cpu.h"
 #include "cpu.h"
-#include "exec/helper-proto.h"
+#include "exec/helper-proto-common.h"
 #include "tcg/tcg-gvec-desc.h"
 #include "tcg/tcg-gvec-desc.h"
 
 
 
 
@@ -550,6 +550,17 @@ void HELPER(gvec_ands)(void *d, void *a, uint64_t b, uint32_t desc)
     clear_high(d, oprsz, desc);
     clear_high(d, oprsz, desc);
 }
 }
 
 
+void HELPER(gvec_andcs)(void *d, void *a, uint64_t b, uint32_t desc)
+{
+    intptr_t oprsz = simd_oprsz(desc);
+    intptr_t i;
+
+    for (i = 0; i < oprsz; i += sizeof(uint64_t)) {
+        *(uint64_t *)(d + i) = *(uint64_t *)(a + i) & ~b;
+    }
+    clear_high(d, oprsz, desc);
+}
+
 void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
 void HELPER(gvec_xors)(void *d, void *a, uint64_t b, uint32_t desc)
 {
 {
     intptr_t oprsz = simd_oprsz(desc);
     intptr_t oprsz = simd_oprsz(desc);
@@ -1031,6 +1042,32 @@ DO_CMP2(64)
 #undef DO_CMP1
 #undef DO_CMP1
 #undef DO_CMP2
 #undef DO_CMP2
 
 
+#define DO_CMP1(NAME, TYPE, OP)                                            \
+void HELPER(NAME)(void *d, void *a, uint64_t b64, uint32_t desc)           \
+{                                                                          \
+    intptr_t oprsz = simd_oprsz(desc);                                     \
+    TYPE inv = simd_data(desc), b = b64;                                   \
+    for (intptr_t i = 0; i < oprsz; i += sizeof(TYPE)) {                   \
+        *(TYPE *)(d + i) = -((*(TYPE *)(a + i) OP b) ^ inv);               \
+    }                                                                      \
+    clear_high(d, oprsz, desc);                                            \
+}
+
+#define DO_CMP2(SZ) \
+    DO_CMP1(gvec_eqs##SZ, uint##SZ##_t, ==)    \
+    DO_CMP1(gvec_lts##SZ, int##SZ##_t, <)      \
+    DO_CMP1(gvec_les##SZ, int##SZ##_t, <=)     \
+    DO_CMP1(gvec_ltus##SZ, uint##SZ##_t, <)    \
+    DO_CMP1(gvec_leus##SZ, uint##SZ##_t, <=)
+
+DO_CMP2(8)
+DO_CMP2(16)
+DO_CMP2(32)
+DO_CMP2(64)
+
+#undef DO_CMP1
+#undef DO_CMP2
+
 void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
 void HELPER(gvec_ssadd8)(void *d, void *a, void *b, uint32_t desc)
 {
 {
     intptr_t oprsz = simd_oprsz(desc);
     intptr_t oprsz = simd_oprsz(desc);

+ 5 - 1
accel/tcg/tcg-runtime.c

@@ -24,13 +24,17 @@
 #include "qemu/osdep.h"
 #include "qemu/osdep.h"
 #include "qemu/host-utils.h"
 #include "qemu/host-utils.h"
 #include "cpu.h"
 #include "cpu.h"
-#include "exec/helper-proto.h"
+#include "exec/helper-proto-common.h"
 #include "exec/cpu_ldst.h"
 #include "exec/cpu_ldst.h"
 #include "exec/exec-all.h"
 #include "exec/exec-all.h"
 #include "disas/disas.h"
 #include "disas/disas.h"
 #include "exec/log.h"
 #include "exec/log.h"
 #include "tcg/tcg.h"
 #include "tcg/tcg.h"
 
 
+#define HELPER_H  "accel/tcg/tcg-runtime.h"
+#include "exec/helper-info.c.inc"
+#undef  HELPER_H
+
 /* 32-bit helpers */
 /* 32-bit helpers */
 
 
 int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2)
 int32_t HELPER(div_i32)(int32_t arg1, int32_t arg2)

+ 53 - 26
accel/tcg/tcg-runtime.h

@@ -39,62 +39,63 @@ DEF_HELPER_FLAGS_1(exit_atomic, TCG_CALL_NO_WG, noreturn, env)
 DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
 DEF_HELPER_FLAGS_3(memset, TCG_CALL_NO_RWG, ptr, ptr, int, ptr)
 #endif /* IN_HELPER_PROTO */
 #endif /* IN_HELPER_PROTO */
 
 
+DEF_HELPER_FLAGS_3(ld_i128, TCG_CALL_NO_WG, i128, env, i64, i32)
+DEF_HELPER_FLAGS_4(st_i128, TCG_CALL_NO_WG, void, env, i64, i128, i32)
+
 DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
 DEF_HELPER_FLAGS_5(atomic_cmpxchgb, TCG_CALL_NO_WG,
-                   i32, env, tl, i32, i32, i32)
+                   i32, env, i64, i32, i32, i32)
 DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
 DEF_HELPER_FLAGS_5(atomic_cmpxchgw_be, TCG_CALL_NO_WG,
-                   i32, env, tl, i32, i32, i32)
+                   i32, env, i64, i32, i32, i32)
 DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
 DEF_HELPER_FLAGS_5(atomic_cmpxchgw_le, TCG_CALL_NO_WG,
-                   i32, env, tl, i32, i32, i32)
+                   i32, env, i64, i32, i32, i32)
 DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
 DEF_HELPER_FLAGS_5(atomic_cmpxchgl_be, TCG_CALL_NO_WG,
-                   i32, env, tl, i32, i32, i32)
+                   i32, env, i64, i32, i32, i32)
 DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
 DEF_HELPER_FLAGS_5(atomic_cmpxchgl_le, TCG_CALL_NO_WG,
-                   i32, env, tl, i32, i32, i32)
+                   i32, env, i64, i32, i32, i32)
 #ifdef CONFIG_ATOMIC64
 #ifdef CONFIG_ATOMIC64
 DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
 DEF_HELPER_FLAGS_5(atomic_cmpxchgq_be, TCG_CALL_NO_WG,
-                   i64, env, tl, i64, i64, i32)
+                   i64, env, i64, i64, i64, i32)
 DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
 DEF_HELPER_FLAGS_5(atomic_cmpxchgq_le, TCG_CALL_NO_WG,
-                   i64, env, tl, i64, i64, i32)
+                   i64, env, i64, i64, i64, i32)
 #endif
 #endif
-#ifdef CONFIG_CMPXCHG128
+#if HAVE_CMPXCHG128
 DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
 DEF_HELPER_FLAGS_5(atomic_cmpxchgo_be, TCG_CALL_NO_WG,
-                   i128, env, tl, i128, i128, i32)
+                   i128, env, i64, i128, i128, i32)
 DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
 DEF_HELPER_FLAGS_5(atomic_cmpxchgo_le, TCG_CALL_NO_WG,
-                   i128, env, tl, i128, i128, i32)
+                   i128, env, i64, i128, i128, i32)
 #endif
 #endif
 
 
-DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_be, TCG_CALL_NO_WG,
-                   i128, env, tl, i128, i128, i32)
-DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo_le, TCG_CALL_NO_WG,
-                   i128, env, tl, i128, i128, i32)
+DEF_HELPER_FLAGS_5(nonatomic_cmpxchgo, TCG_CALL_NO_WG,
+                   i128, env, i64, i128, i128, i32)
 
 
 #ifdef CONFIG_ATOMIC64
 #ifdef CONFIG_ATOMIC64
 #define GEN_ATOMIC_HELPERS(NAME)                                  \
 #define GEN_ATOMIC_HELPERS(NAME)                                  \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b),              \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b),              \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le),           \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be),           \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le),           \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be),           \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_le),           \
-                       TCG_CALL_NO_WG, i64, env, tl, i64, i32)    \
+                       TCG_CALL_NO_WG, i64, env, i64, i64, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), q_be),           \
-                       TCG_CALL_NO_WG, i64, env, tl, i64, i32)
+                       TCG_CALL_NO_WG, i64, env, i64, i64, i32)
 #else
 #else
 #define GEN_ATOMIC_HELPERS(NAME)                                  \
 #define GEN_ATOMIC_HELPERS(NAME)                                  \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b),              \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), b),              \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_le),           \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), w_be),           \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_le),           \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)    \
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)   \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be),           \
     DEF_HELPER_FLAGS_4(glue(glue(atomic_, NAME), l_be),           \
-                       TCG_CALL_NO_WG, i32, env, tl, i32, i32)
+                       TCG_CALL_NO_WG, i32, env, i64, i32, i32)
 #endif /* CONFIG_ATOMIC64 */
 #endif /* CONFIG_ATOMIC64 */
 
 
 GEN_ATOMIC_HELPERS(fetch_add)
 GEN_ATOMIC_HELPERS(fetch_add)
@@ -217,6 +218,7 @@ DEF_HELPER_FLAGS_4(gvec_nor, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_eqv, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 
 
 DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
 DEF_HELPER_FLAGS_4(gvec_ands, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_andcs, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
 DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
 DEF_HELPER_FLAGS_4(gvec_xors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
 DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
 DEF_HELPER_FLAGS_4(gvec_ors, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
 
 
@@ -295,4 +297,29 @@ DEF_HELPER_FLAGS_4(gvec_leu16, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_leu32, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_4(gvec_leu64, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, i32)
 
 
+DEF_HELPER_FLAGS_4(gvec_eqs8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_eqs16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_eqs32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_eqs64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(gvec_lts8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_lts16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_lts32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_lts64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(gvec_les8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_les16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_les32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_les64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(gvec_ltus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_ltus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_ltus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_ltus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
+DEF_HELPER_FLAGS_4(gvec_leus8, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_leus16, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_leus32, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+DEF_HELPER_FLAGS_4(gvec_leus64, TCG_CALL_NO_RWG, void, ptr, ptr, i64, i32)
+
 DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)
 DEF_HELPER_FLAGS_5(gvec_bitsel, TCG_CALL_NO_RWG, void, ptr, ptr, ptr, ptr, i32)

+ 93 - 222
accel/tcg/translate-all.c

@@ -19,7 +19,6 @@
 
 
 #include "qemu/osdep.h"
 #include "qemu/osdep.h"
 
 
-#define NO_CPU_IO_DEFS
 #include "trace.h"
 #include "trace.h"
 #include "disas/disas.h"
 #include "disas/disas.h"
 #include "exec/exec-all.h"
 #include "exec/exec-all.h"
@@ -63,19 +62,18 @@
 #include "tb-jmp-cache.h"
 #include "tb-jmp-cache.h"
 #include "tb-hash.h"
 #include "tb-hash.h"
 #include "tb-context.h"
 #include "tb-context.h"
-#include "internal.h"
-#include "perf.h"
-
-/* Make sure all possible CPU event bits fit in tb->trace_vcpu_dstate */
-QEMU_BUILD_BUG_ON(CPU_TRACE_DSTATE_MAX_EVENTS >
-                  sizeof_field(TranslationBlock, trace_vcpu_dstate)
-                  * BITS_PER_BYTE);
+#include "internal-common.h"
+#include "internal-target.h"
+#include "tcg/perf.h"
+#include "tcg/insn-start-words.h"
 
 
 TBContext tb_ctx;
 TBContext tb_ctx;
 
 
-/* Encode VAL as a signed leb128 sequence at P.
-   Return P incremented past the encoded value.  */
-static uint8_t *encode_sleb128(uint8_t *p, target_long val)
+/*
+ * Encode VAL as a signed leb128 sequence at P.
+ * Return P incremented past the encoded value.
+ */
+static uint8_t *encode_sleb128(uint8_t *p, int64_t val)
 {
 {
     int more, byte;
     int more, byte;
 
 
@@ -93,21 +91,23 @@ static uint8_t *encode_sleb128(uint8_t *p, target_long val)
     return p;
     return p;
 }
 }
 
 
-/* Decode a signed leb128 sequence at *PP; increment *PP past the
-   decoded value.  Return the decoded value.  */
-static target_long decode_sleb128(const uint8_t **pp)
+/*
+ * Decode a signed leb128 sequence at *PP; increment *PP past the
+ * decoded value.  Return the decoded value.
+ */
+static int64_t decode_sleb128(const uint8_t **pp)
 {
 {
     const uint8_t *p = *pp;
     const uint8_t *p = *pp;
-    target_long val = 0;
+    int64_t val = 0;
     int byte, shift = 0;
     int byte, shift = 0;
 
 
     do {
     do {
         byte = *p++;
         byte = *p++;
-        val |= (target_ulong)(byte & 0x7f) << shift;
+        val |= (int64_t)(byte & 0x7f) << shift;
         shift += 7;
         shift += 7;
     } while (byte & 0x80);
     } while (byte & 0x80);
     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
     if (shift < TARGET_LONG_BITS && (byte & 0x40)) {
-        val |= -(target_ulong)1 << shift;
+        val |= -(int64_t)1 << shift;
     }
     }
 
 
     *pp = p;
     *pp = p;
@@ -129,22 +129,26 @@ static target_long decode_sleb128(const uint8_t **pp)
 static int encode_search(TranslationBlock *tb, uint8_t *block)
 static int encode_search(TranslationBlock *tb, uint8_t *block)
 {
 {
     uint8_t *highwater = tcg_ctx->code_gen_highwater;
     uint8_t *highwater = tcg_ctx->code_gen_highwater;
+    uint64_t *insn_data = tcg_ctx->gen_insn_data;
+    uint16_t *insn_end_off = tcg_ctx->gen_insn_end_off;
     uint8_t *p = block;
     uint8_t *p = block;
     int i, j, n;
     int i, j, n;
 
 
     for (i = 0, n = tb->icount; i < n; ++i) {
     for (i = 0, n = tb->icount; i < n; ++i) {
-        target_ulong prev;
+        uint64_t prev, curr;
 
 
         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
         for (j = 0; j < TARGET_INSN_START_WORDS; ++j) {
             if (i == 0) {
             if (i == 0) {
                 prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
                 prev = (!(tb_cflags(tb) & CF_PCREL) && j == 0 ? tb->pc : 0);
             } else {
             } else {
-                prev = tcg_ctx->gen_insn_data[i - 1][j];
+                prev = insn_data[(i - 1) * TARGET_INSN_START_WORDS + j];
             }
             }
-            p = encode_sleb128(p, tcg_ctx->gen_insn_data[i][j] - prev);
+            curr = insn_data[i * TARGET_INSN_START_WORDS + j];
+            p = encode_sleb128(p, curr - prev);
         }
         }
-        prev = (i == 0 ? 0 : tcg_ctx->gen_insn_end_off[i - 1]);
-        p = encode_sleb128(p, tcg_ctx->gen_insn_end_off[i] - prev);
+        prev = (i == 0 ? 0 : insn_end_off[i - 1]);
+        curr = insn_end_off[i];
+        p = encode_sleb128(p, curr - prev);
 
 
         /* Test for (pending) buffer overflow.  The assumption is that any
         /* Test for (pending) buffer overflow.  The assumption is that any
            one row beginning below the high water mark cannot overrun
            one row beginning below the high water mark cannot overrun
@@ -200,10 +204,6 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
                                uintptr_t host_pc)
                                uintptr_t host_pc)
 {
 {
     uint64_t data[TARGET_INSN_START_WORDS];
     uint64_t data[TARGET_INSN_START_WORDS];
-#ifdef CONFIG_PROFILER
-    TCGProfile *prof = &tcg_ctx->prof;
-    int64_t ti = profile_getclock();
-#endif
     int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
     int insns_left = cpu_unwind_data_from_tb(tb, host_pc, data);
 
 
     if (insns_left < 0) {
     if (insns_left < 0) {
@@ -216,16 +216,10 @@ void cpu_restore_state_from_tb(CPUState *cpu, TranslationBlock *tb,
          * Reset the cycle counter to the start of the block and
          * Reset the cycle counter to the start of the block and
          * shift if to the number of actually executed instructions.
          * shift if to the number of actually executed instructions.
          */
          */
-        cpu_neg(cpu)->icount_decr.u16.low += insns_left;
+        cpu->neg.icount_decr.u16.low += insns_left;
     }
     }
 
 
     cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
     cpu->cc->tcg_ops->restore_state_to_opc(cpu, tb, data);
-
-#ifdef CONFIG_PROFILER
-    qatomic_set(&prof->restore_time,
-                prof->restore_time + profile_getclock() - ti);
-    qatomic_set(&prof->restore_count, prof->restore_count + 1);
-#endif
 }
 }
 
 
 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
 bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc)
@@ -263,7 +257,6 @@ bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data)
 
 
 void page_init(void)
 void page_init(void)
 {
 {
-    page_size_init();
     page_table_config_init();
     page_table_config_init();
 }
 }
 
 
@@ -272,7 +265,7 @@ void page_init(void)
  * Return the size of the generated code, or negative on error.
  * Return the size of the generated code, or negative on error.
  */
  */
 static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
 static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
-                           target_ulong pc, void *host_pc,
+                           vaddr pc, void *host_pc,
                            int *max_insns, int64_t *ti)
                            int *max_insns, int64_t *ti)
 {
 {
     int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
     int ret = sigsetjmp(tcg_ctx->jmp_trans, 0);
@@ -288,29 +281,19 @@ static int setjmp_gen_code(CPUArchState *env, TranslationBlock *tb,
     tcg_ctx->cpu = NULL;
     tcg_ctx->cpu = NULL;
     *max_insns = tb->icount;
     *max_insns = tb->icount;
 
 
-#ifdef CONFIG_PROFILER
-    qatomic_set(&tcg_ctx->prof.tb_count, tcg_ctx->prof.tb_count + 1);
-    qatomic_set(&tcg_ctx->prof.interm_time,
-                tcg_ctx->prof.interm_time + profile_getclock() - *ti);
-    *ti = profile_getclock();
-#endif
-
     return tcg_gen_code(tcg_ctx, tb, pc);
     return tcg_gen_code(tcg_ctx, tb, pc);
 }
 }
 
 
 /* Called with mmap_lock held for user mode emulation.  */
 /* Called with mmap_lock held for user mode emulation.  */
 TranslationBlock *tb_gen_code(CPUState *cpu,
 TranslationBlock *tb_gen_code(CPUState *cpu,
-                              target_ulong pc, target_ulong cs_base,
+                              vaddr pc, uint64_t cs_base,
                               uint32_t flags, int cflags)
                               uint32_t flags, int cflags)
 {
 {
-    CPUArchState *env = cpu->env_ptr;
+    CPUArchState *env = cpu_env(cpu);
     TranslationBlock *tb, *existing_tb;
     TranslationBlock *tb, *existing_tb;
-    tb_page_addr_t phys_pc;
+    tb_page_addr_t phys_pc, phys_p2;
     tcg_insn_unit *gen_code_buf;
     tcg_insn_unit *gen_code_buf;
     int gen_code_size, search_size, max_insns;
     int gen_code_size, search_size, max_insns;
-#ifdef CONFIG_PROFILER
-    TCGProfile *prof = &tcg_ctx->prof;
-#endif
     int64_t ti;
     int64_t ti;
     void *host_pc;
     void *host_pc;
 
 
@@ -321,7 +304,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
 
 
     if (phys_pc == -1) {
     if (phys_pc == -1) {
         /* Generate a one-shot TB with 1 insn in it */
         /* Generate a one-shot TB with 1 insn in it */
-        cflags = (cflags & ~CF_COUNT_MASK) | CF_LAST_IO | 1;
+        cflags = (cflags & ~CF_COUNT_MASK) | 1;
     }
     }
 
 
     max_insns = cflags & CF_COUNT_MASK;
     max_insns = cflags & CF_COUNT_MASK;
@@ -331,6 +314,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
     QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
     QEMU_BUILD_BUG_ON(CF_COUNT_MASK + 1 != TCG_MAX_INSNS);
 
 
  buffer_overflow:
  buffer_overflow:
+    assert_no_pages_locked();
     tb = tcg_tb_alloc(tcg_ctx);
     tb = tcg_tb_alloc(tcg_ctx);
     if (unlikely(!tb)) {
     if (unlikely(!tb)) {
         /* flush must be done */
         /* flush must be done */
@@ -349,18 +333,27 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
     tb->cs_base = cs_base;
     tb->cs_base = cs_base;
     tb->flags = flags;
     tb->flags = flags;
     tb->cflags = cflags;
     tb->cflags = cflags;
-    tb->trace_vcpu_dstate = *cpu->trace_dstate;
     tb_set_page_addr0(tb, phys_pc);
     tb_set_page_addr0(tb, phys_pc);
     tb_set_page_addr1(tb, -1);
     tb_set_page_addr1(tb, -1);
-    tcg_ctx->gen_tb = tb;
- tb_overflow:
+    if (phys_pc != -1) {
+        tb_lock_page0(phys_pc);
+    }
 
 
-#ifdef CONFIG_PROFILER
-    /* includes aborted translations because of exceptions */
-    qatomic_set(&prof->tb_count1, prof->tb_count1 + 1);
-    ti = profile_getclock();
+    tcg_ctx->gen_tb = tb;
+    tcg_ctx->addr_type = TARGET_LONG_BITS == 32 ? TCG_TYPE_I32 : TCG_TYPE_I64;
+#ifdef CONFIG_SOFTMMU
+    tcg_ctx->page_bits = TARGET_PAGE_BITS;
+    tcg_ctx->page_mask = TARGET_PAGE_MASK;
+    tcg_ctx->tlb_dyn_max_bits = CPU_TLB_DYN_MAX_BITS;
+#endif
+    tcg_ctx->insn_start_words = TARGET_INSN_START_WORDS;
+#ifdef TCG_GUEST_DEFAULT_MO
+    tcg_ctx->guest_mo = TCG_GUEST_DEFAULT_MO;
+#else
+    tcg_ctx->guest_mo = TCG_MO_ALL;
 #endif
 #endif
 
 
+ restart_translate:
     trace_translate_block(tb, pc, tb->tc.ptr);
     trace_translate_block(tb, pc, tb->tc.ptr);
 
 
     gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
     gen_code_size = setjmp_gen_code(env, tb, pc, host_pc, &max_insns, &ti);
@@ -379,6 +372,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
             qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
                           "Restarting code generation for "
                           "Restarting code generation for "
                           "code_gen_buffer overflow\n");
                           "code_gen_buffer overflow\n");
+            tb_unlock_pages(tb);
+            tcg_ctx->gen_tb = NULL;
             goto buffer_overflow;
             goto buffer_overflow;
 
 
         case -2:
         case -2:
@@ -397,14 +392,39 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
                           "Restarting code generation with "
                           "Restarting code generation with "
                           "smaller translation block (max %d insns)\n",
                           "smaller translation block (max %d insns)\n",
                           max_insns);
                           max_insns);
-            goto tb_overflow;
+
+            /*
+             * The half-sized TB may not cross pages.
+             * TODO: Fix all targets that cross pages except with
+             * the first insn, at which point this can't be reached.
+             */
+            phys_p2 = tb_page_addr1(tb);
+            if (unlikely(phys_p2 != -1)) {
+                tb_unlock_page1(phys_pc, phys_p2);
+                tb_set_page_addr1(tb, -1);
+            }
+            goto restart_translate;
+
+        case -3:
+            /*
+             * We had a page lock ordering problem.  In order to avoid
+             * deadlock we had to drop the lock on page0, which means
+             * that everything we translated so far is compromised.
+             * Restart with locks held on both pages.
+             */
+            qemu_log_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT,
+                          "Restarting code generation with re-locked pages");
+            goto restart_translate;
 
 
         default:
         default:
             g_assert_not_reached();
             g_assert_not_reached();
         }
         }
     }
     }
+    tcg_ctx->gen_tb = NULL;
+
     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
     search_size = encode_search(tb, (void *)gen_code_buf + gen_code_size);
     if (unlikely(search_size < 0)) {
     if (unlikely(search_size < 0)) {
+        tb_unlock_pages(tb);
         goto buffer_overflow;
         goto buffer_overflow;
     }
     }
     tb->tc.size = gen_code_size;
     tb->tc.size = gen_code_size;
@@ -415,14 +435,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
      */
      */
     perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
     perf_report_code(pc, tb, tcg_splitwx_to_rx(gen_code_buf));
 
 
-#ifdef CONFIG_PROFILER
-    qatomic_set(&prof->code_time, prof->code_time + profile_getclock() - ti);
-    qatomic_set(&prof->code_in_len, prof->code_in_len + tb->size);
-    qatomic_set(&prof->code_out_len, prof->code_out_len + gen_code_size);
-    qatomic_set(&prof->search_out_len, prof->search_out_len + search_size);
-#endif
-
-#ifdef DEBUG_DISAS
     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
     if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM) &&
         qemu_log_in_addr_range(pc)) {
         qemu_log_in_addr_range(pc)) {
         FILE *logfile = qemu_log_trylock();
         FILE *logfile = qemu_log_trylock();
@@ -445,8 +457,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
             /* Dump header and the first instruction */
             /* Dump header and the first instruction */
             fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
             fprintf(logfile, "OUT: [size=%d]\n", gen_code_size);
             fprintf(logfile,
             fprintf(logfile,
-                    "  -- guest addr 0x" TARGET_FMT_lx " + tb prologue\n",
-                    tcg_ctx->gen_insn_data[insn][0]);
+                    "  -- guest addr 0x%016" PRIx64 " + tb prologue\n",
+                    tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
             chunk_start = tcg_ctx->gen_insn_end_off[insn];
             chunk_start = tcg_ctx->gen_insn_end_off[insn];
             disas(logfile, tb->tc.ptr, chunk_start);
             disas(logfile, tb->tc.ptr, chunk_start);
 
 
@@ -458,8 +470,8 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
             while (insn < tb->icount) {
             while (insn < tb->icount) {
                 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
                 size_t chunk_end = tcg_ctx->gen_insn_end_off[insn];
                 if (chunk_end > chunk_start) {
                 if (chunk_end > chunk_start) {
-                    fprintf(logfile, "  -- guest addr 0x" TARGET_FMT_lx "\n",
-                            tcg_ctx->gen_insn_data[insn][0]);
+                    fprintf(logfile, "  -- guest addr 0x%016" PRIx64 "\n",
+                            tcg_ctx->gen_insn_data[insn * TARGET_INSN_START_WORDS]);
                     disas(logfile, tb->tc.ptr + chunk_start,
                     disas(logfile, tb->tc.ptr + chunk_start,
                           chunk_end - chunk_start);
                           chunk_end - chunk_start);
                     chunk_start = chunk_end;
                     chunk_start = chunk_end;
@@ -495,7 +507,6 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
             qemu_log_unlock(logfile);
             qemu_log_unlock(logfile);
         }
         }
     }
     }
-#endif
 
 
     qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
     qatomic_set(&tcg_ctx->code_gen_ptr, (void *)
         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
         ROUND_UP((uintptr_t)gen_code_buf + gen_code_size + search_size,
@@ -523,6 +534,7 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
      * before attempting to link to other TBs or add to the lookup table.
      * before attempting to link to other TBs or add to the lookup table.
      */
      */
     if (tb_page_addr0(tb) == -1) {
     if (tb_page_addr0(tb) == -1) {
+        assert_no_pages_locked();
         return tb;
         return tb;
     }
     }
 
 
@@ -537,7 +549,9 @@ TranslationBlock *tb_gen_code(CPUState *cpu,
      * No explicit memory barrier is required -- tb_link_page() makes the
      * No explicit memory barrier is required -- tb_link_page() makes the
      * TB visible in a consistent state.
      * TB visible in a consistent state.
      */
      */
-    existing_tb = tb_link_page(tb, tb_page_addr0(tb), tb_page_addr1(tb));
+    existing_tb = tb_link_page(tb);
+    assert_no_pages_locked();
+
     /* if the TB already exists, discard what we just translated */
     /* if the TB already exists, discard what we just translated */
     if (unlikely(existing_tb != tb)) {
     if (unlikely(existing_tb != tb)) {
         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
         uintptr_t orig_aligned = (uintptr_t)gen_code_buf;
@@ -565,8 +579,9 @@ void tb_check_watchpoint(CPUState *cpu, uintptr_t retaddr)
     } else {
     } else {
         /* The exception probably happened in a helper.  The CPU state should
         /* The exception probably happened in a helper.  The CPU state should
            have been saved before calling it. Fetch the PC from there.  */
            have been saved before calling it. Fetch the PC from there.  */
-        CPUArchState *env = cpu->env_ptr;
-        target_ulong pc, cs_base;
+        CPUArchState *env = cpu_env(cpu);
+        vaddr pc;
+        uint64_t cs_base;
         tb_page_addr_t addr;
         tb_page_addr_t addr;
         uint32_t flags;
         uint32_t flags;
 
 
@@ -607,7 +622,7 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
     cc = CPU_GET_CLASS(cpu);
     cc = CPU_GET_CLASS(cpu);
     if (cc->tcg_ops->io_recompile_replay_branch &&
     if (cc->tcg_ops->io_recompile_replay_branch &&
         cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
         cc->tcg_ops->io_recompile_replay_branch(cpu, tb)) {
-        cpu_neg(cpu)->icount_decr.u16.low++;
+        cpu->neg.icount_decr.u16.low++;
         n = 2;
         n = 2;
     }
     }
 
 
@@ -617,155 +632,19 @@ void cpu_io_recompile(CPUState *cpu, uintptr_t retaddr)
      * operations only (which execute after completion) so we don't
      * operations only (which execute after completion) so we don't
      * double instrument the instruction.
      * double instrument the instruction.
      */
      */
-    cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | CF_LAST_IO | n;
+    cpu->cflags_next_tb = curr_cflags(cpu) | CF_MEMI_ONLY | n;
 
 
     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
     if (qemu_loglevel_mask(CPU_LOG_EXEC)) {
-        target_ulong pc = log_pc(cpu, tb);
+        vaddr pc = cpu->cc->get_pc(cpu);
         if (qemu_log_in_addr_range(pc)) {
         if (qemu_log_in_addr_range(pc)) {
-            qemu_log("cpu_io_recompile: rewound execution of TB to "
-                     TARGET_FMT_lx "\n", pc);
+            qemu_log("cpu_io_recompile: rewound execution of TB to %016"
+                     VADDR_PRIx "\n", pc);
         }
         }
     }
     }
 
 
     cpu_loop_exit_noexc(cpu);
     cpu_loop_exit_noexc(cpu);
 }
 }
 
 
-static void print_qht_statistics(struct qht_stats hst, GString *buf)
-{
-    uint32_t hgram_opts;
-    size_t hgram_bins;
-    char *hgram;
-
-    if (!hst.head_buckets) {
-        return;
-    }
-    g_string_append_printf(buf, "TB hash buckets     %zu/%zu "
-                           "(%0.2f%% head buckets used)\n",
-                           hst.used_head_buckets, hst.head_buckets,
-                           (double)hst.used_head_buckets /
-                           hst.head_buckets * 100);
-
-    hgram_opts =  QDIST_PR_BORDER | QDIST_PR_LABELS;
-    hgram_opts |= QDIST_PR_100X   | QDIST_PR_PERCENT;
-    if (qdist_xmax(&hst.occupancy) - qdist_xmin(&hst.occupancy) == 1) {
-        hgram_opts |= QDIST_PR_NODECIMAL;
-    }
-    hgram = qdist_pr(&hst.occupancy, 10, hgram_opts);
-    g_string_append_printf(buf, "TB hash occupancy   %0.2f%% avg chain occ. "
-                           "Histogram: %s\n",
-                           qdist_avg(&hst.occupancy) * 100, hgram);
-    g_free(hgram);
-
-    hgram_opts = QDIST_PR_BORDER | QDIST_PR_LABELS;
-    hgram_bins = qdist_xmax(&hst.chain) - qdist_xmin(&hst.chain);
-    if (hgram_bins > 10) {
-        hgram_bins = 10;
-    } else {
-        hgram_bins = 0;
-        hgram_opts |= QDIST_PR_NODECIMAL | QDIST_PR_NOBINRANGE;
-    }
-    hgram = qdist_pr(&hst.chain, hgram_bins, hgram_opts);
-    g_string_append_printf(buf, "TB hash avg chain   %0.3f buckets. "
-                           "Histogram: %s\n",
-                           qdist_avg(&hst.chain), hgram);
-    g_free(hgram);
-}
-
-struct tb_tree_stats {
-    size_t nb_tbs;
-    size_t host_size;
-    size_t target_size;
-    size_t max_target_size;
-    size_t direct_jmp_count;
-    size_t direct_jmp2_count;
-    size_t cross_page;
-};
-
-static gboolean tb_tree_stats_iter(gpointer key, gpointer value, gpointer data)
-{
-    const TranslationBlock *tb = value;
-    struct tb_tree_stats *tst = data;
-
-    tst->nb_tbs++;
-    tst->host_size += tb->tc.size;
-    tst->target_size += tb->size;
-    if (tb->size > tst->max_target_size) {
-        tst->max_target_size = tb->size;
-    }
-    if (tb_page_addr1(tb) != -1) {
-        tst->cross_page++;
-    }
-    if (tb->jmp_reset_offset[0] != TB_JMP_OFFSET_INVALID) {
-        tst->direct_jmp_count++;
-        if (tb->jmp_reset_offset[1] != TB_JMP_OFFSET_INVALID) {
-            tst->direct_jmp2_count++;
-        }
-    }
-    return false;
-}
-
-void dump_exec_info(GString *buf)
-{
-    struct tb_tree_stats tst = {};
-    struct qht_stats hst;
-    size_t nb_tbs, flush_full, flush_part, flush_elide;
-
-    tcg_tb_foreach(tb_tree_stats_iter, &tst);
-    nb_tbs = tst.nb_tbs;
-    /* XXX: avoid using doubles ? */
-    g_string_append_printf(buf, "Translation buffer state:\n");
-    /*
-     * Report total code size including the padding and TB structs;
-     * otherwise users might think "-accel tcg,tb-size" is not honoured.
-     * For avg host size we use the precise numbers from tb_tree_stats though.
-     */
-    g_string_append_printf(buf, "gen code size       %zu/%zu\n",
-                           tcg_code_size(), tcg_code_capacity());
-    g_string_append_printf(buf, "TB count            %zu\n", nb_tbs);
-    g_string_append_printf(buf, "TB avg target size  %zu max=%zu bytes\n",
-                           nb_tbs ? tst.target_size / nb_tbs : 0,
-                           tst.max_target_size);
-    g_string_append_printf(buf, "TB avg host size    %zu bytes "
-                           "(expansion ratio: %0.1f)\n",
-                           nb_tbs ? tst.host_size / nb_tbs : 0,
-                           tst.target_size ?
-                           (double)tst.host_size / tst.target_size : 0);
-    g_string_append_printf(buf, "cross page TB count %zu (%zu%%)\n",
-                           tst.cross_page,
-                           nb_tbs ? (tst.cross_page * 100) / nb_tbs : 0);
-    g_string_append_printf(buf, "direct jump count   %zu (%zu%%) "
-                           "(2 jumps=%zu %zu%%)\n",
-                           tst.direct_jmp_count,
-                           nb_tbs ? (tst.direct_jmp_count * 100) / nb_tbs : 0,
-                           tst.direct_jmp2_count,
-                           nb_tbs ? (tst.direct_jmp2_count * 100) / nb_tbs : 0);
-
-    qht_statistics_init(&tb_ctx.htable, &hst);
-    print_qht_statistics(hst, buf);
-    qht_statistics_destroy(&hst);
-
-    g_string_append_printf(buf, "\nStatistics:\n");
-    g_string_append_printf(buf, "TB flush count      %u\n",
-                           qatomic_read(&tb_ctx.tb_flush_count));
-    g_string_append_printf(buf, "TB invalidate count %u\n",
-                           qatomic_read(&tb_ctx.tb_phys_invalidate_count));
-
-    tlb_flush_counts(&flush_full, &flush_part, &flush_elide);
-    g_string_append_printf(buf, "TLB full flushes    %zu\n", flush_full);
-    g_string_append_printf(buf, "TLB partial flushes %zu\n", flush_part);
-    g_string_append_printf(buf, "TLB elided flushes  %zu\n", flush_elide);
-    tcg_dump_info(buf);
-}
-
-#else /* CONFIG_USER_ONLY */
-
-void cpu_interrupt(CPUState *cpu, int mask)
-{
-    g_assert(qemu_mutex_iothread_locked());
-    cpu->interrupt_request |= mask;
-    qatomic_set(&cpu_neg(cpu)->icount_decr.u16.high, -1);
-}
-
 #endif /* CONFIG_USER_ONLY */
 #endif /* CONFIG_USER_ONLY */
 
 
 /*
 /*
@@ -785,11 +664,3 @@ void tcg_flush_jmp_cache(CPUState *cpu)
         qatomic_set(&jc->array[i].tb, NULL);
         qatomic_set(&jc->array[i].tb, NULL);
     }
     }
 }
 }
-
-/* This is a wrapper for common code that can not use CONFIG_SOFTMMU */
-void tcg_flush_softmmu_tlb(CPUState *cs)
-{
-#ifdef CONFIG_SOFTMMU
-    tlb_flush(cs);
-#endif
-}

+ 337 - 109
accel/tcg/translator.c

@@ -8,17 +8,101 @@
  */
  */
 
 
 #include "qemu/osdep.h"
 #include "qemu/osdep.h"
+#include "qemu/log.h"
 #include "qemu/error-report.h"
 #include "qemu/error-report.h"
-#include "tcg/tcg.h"
-#include "tcg/tcg-op.h"
 #include "exec/exec-all.h"
 #include "exec/exec-all.h"
-#include "exec/gen-icount.h"
-#include "exec/log.h"
 #include "exec/translator.h"
 #include "exec/translator.h"
+#include "exec/cpu_ldst.h"
 #include "exec/plugin-gen.h"
 #include "exec/plugin-gen.h"
-#include "exec/replay-core.h"
+#include "exec/cpu_ldst.h"
+#include "tcg/tcg-op-common.h"
+#include "internal-target.h"
+#include "disas/disas.h"
 
 
-bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
+static void set_can_do_io(DisasContextBase *db, bool val)
+{
+    QEMU_BUILD_BUG_ON(sizeof_field(CPUState, neg.can_do_io) != 1);
+    tcg_gen_st8_i32(tcg_constant_i32(val), tcg_env,
+                    offsetof(ArchCPU, parent_obj.neg.can_do_io) -
+                    offsetof(ArchCPU, env));
+}
+
+bool translator_io_start(DisasContextBase *db)
+{
+    /*
+     * Ensure that this instruction will be the last in the TB.
+     * The target may override this to something more forceful.
+     */
+    if (db->is_jmp == DISAS_NEXT) {
+        db->is_jmp = DISAS_TOO_MANY;
+    }
+    return true;
+}
+
+static TCGOp *gen_tb_start(DisasContextBase *db, uint32_t cflags)
+{
+    TCGv_i32 count = NULL;
+    TCGOp *icount_start_insn = NULL;
+
+    if ((cflags & CF_USE_ICOUNT) || !(cflags & CF_NOIRQ)) {
+        count = tcg_temp_new_i32();
+        tcg_gen_ld_i32(count, tcg_env,
+                       offsetof(ArchCPU, parent_obj.neg.icount_decr.u32)
+                       - offsetof(ArchCPU, env));
+    }
+
+    if (cflags & CF_USE_ICOUNT) {
+        /*
+         * We emit a sub with a dummy immediate argument. Keep the insn index
+         * of the sub so that we later (when we know the actual insn count)
+         * can update the argument with the actual insn count.
+         */
+        tcg_gen_sub_i32(count, count, tcg_constant_i32(0));
+        icount_start_insn = tcg_last_op();
+    }
+
+    /*
+     * Emit the check against icount_decr.u32 to see if we should exit
+     * unless we suppress the check with CF_NOIRQ. If we are using
+     * icount and have suppressed interruption the higher level code
+     * should have ensured we don't run more instructions than the
+     * budget.
+     */
+    if (cflags & CF_NOIRQ) {
+        tcg_ctx->exitreq_label = NULL;
+    } else {
+        tcg_ctx->exitreq_label = gen_new_label();
+        tcg_gen_brcondi_i32(TCG_COND_LT, count, 0, tcg_ctx->exitreq_label);
+    }
+
+    if (cflags & CF_USE_ICOUNT) {
+        tcg_gen_st16_i32(count, tcg_env,
+                         offsetof(ArchCPU, parent_obj.neg.icount_decr.u16.low)
+                         - offsetof(ArchCPU, env));
+    }
+
+    return icount_start_insn;
+}
+
+static void gen_tb_end(const TranslationBlock *tb, uint32_t cflags,
+                       TCGOp *icount_start_insn, int num_insns)
+{
+    if (cflags & CF_USE_ICOUNT) {
+        /*
+         * Update the num_insn immediate parameter now that we know
+         * the actual insn count.
+         */
+        tcg_set_insn_param(icount_start_insn, 2,
+                           tcgv_i32_arg(tcg_constant_i32(num_insns)));
+    }
+
+    if (tcg_ctx->exitreq_label) {
+        gen_set_label(tcg_ctx->exitreq_label);
+        tcg_gen_exit_tb(tb, TB_EXIT_REQUESTED);
+    }
+}
+
+bool translator_use_goto_tb(DisasContextBase *db, vaddr dest)
 {
 {
     /* Suppress goto_tb if requested. */
     /* Suppress goto_tb if requested. */
     if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
     if (tb_cflags(db->tb) & CF_NO_GOTO_TB) {
@@ -30,10 +114,12 @@ bool translator_use_goto_tb(DisasContextBase *db, target_ulong dest)
 }
 }
 
 
 void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
 void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
-                     target_ulong pc, void *host_pc,
-                     const TranslatorOps *ops, DisasContextBase *db)
+                     vaddr pc, void *host_pc, const TranslatorOps *ops,
+                     DisasContextBase *db)
 {
 {
     uint32_t cflags = tb_cflags(tb);
     uint32_t cflags = tb_cflags(tb);
+    TCGOp *icount_start_insn;
+    TCGOp *first_insn_start = NULL;
     bool plugin_enabled;
     bool plugin_enabled;
 
 
     /* Initialize DisasContext */
     /* Initialize DisasContext */
@@ -44,45 +130,44 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
     db->num_insns = 0;
     db->num_insns = 0;
     db->max_insns = *max_insns;
     db->max_insns = *max_insns;
     db->singlestep_enabled = cflags & CF_SINGLE_STEP;
     db->singlestep_enabled = cflags & CF_SINGLE_STEP;
+    db->insn_start = NULL;
+    db->fake_insn = false;
     db->host_addr[0] = host_pc;
     db->host_addr[0] = host_pc;
     db->host_addr[1] = NULL;
     db->host_addr[1] = NULL;
-
-#ifdef CONFIG_USER_ONLY
-    page_protect(pc);
-#endif
+    db->record_start = 0;
+    db->record_len = 0;
 
 
     ops->init_disas_context(db, cpu);
     ops->init_disas_context(db, cpu);
     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
 
 
     /* Start translating.  */
     /* Start translating.  */
-    gen_tb_start(db->tb);
+    icount_start_insn = gen_tb_start(db, cflags);
     ops->tb_start(db, cpu);
     ops->tb_start(db, cpu);
     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
     tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
 
 
-    plugin_enabled = plugin_gen_tb_start(cpu, db, cflags & CF_MEMI_ONLY);
+    plugin_enabled = plugin_gen_tb_start(cpu, db);
+    db->plugin_enabled = plugin_enabled;
 
 
     while (true) {
     while (true) {
         *max_insns = ++db->num_insns;
         *max_insns = ++db->num_insns;
         ops->insn_start(db, cpu);
         ops->insn_start(db, cpu);
+        db->insn_start = tcg_last_op();
+        if (first_insn_start == NULL) {
+            first_insn_start = db->insn_start;
+        }
         tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
         tcg_debug_assert(db->is_jmp == DISAS_NEXT);  /* no early exit */
 
 
         if (plugin_enabled) {
         if (plugin_enabled) {
             plugin_gen_insn_start(cpu, db);
             plugin_gen_insn_start(cpu, db);
         }
         }
 
 
-        /* Disassemble one instruction.  The translate_insn hook should
-           update db->pc_next and db->is_jmp to indicate what should be
-           done next -- either exiting this loop or locate the start of
-           the next instruction.  */
-        if (db->num_insns == db->max_insns && (cflags & CF_LAST_IO)) {
-            /* Accept I/O on the last instruction.  */
-            gen_io_start();
-            ops->translate_insn(db, cpu);
-        } else {
-            /* we should only see CF_MEMI_ONLY for io_recompile */
-            tcg_debug_assert(!(cflags & CF_MEMI_ONLY));
-            ops->translate_insn(db, cpu);
-        }
+        /*
+         * Disassemble one instruction.  The translate_insn hook should
+         * update db->pc_next and db->is_jmp to indicate what should be
+         * done next -- either exiting this loop or locate the start of
+         * the next instruction.
+         */
+        ops->translate_insn(db, cpu);
 
 
         /*
         /*
          * We can't instrument after instructions that change control
          * We can't instrument after instructions that change control
@@ -112,136 +197,279 @@ void translator_loop(CPUState *cpu, TranslationBlock *tb, int *max_insns,
 
 
     /* Emit code to exit the TB, as indicated by db->is_jmp.  */
     /* Emit code to exit the TB, as indicated by db->is_jmp.  */
     ops->tb_stop(db, cpu);
     ops->tb_stop(db, cpu);
-    gen_tb_end(db->tb, db->num_insns);
-
-    if (plugin_enabled) {
-        plugin_gen_tb_end(cpu);
+    gen_tb_end(tb, cflags, icount_start_insn, db->num_insns);
+
+    /*
+     * Manage can_do_io for the translation block: set to false before
+     * the first insn and set to true before the last insn.
+     */
+    if (db->num_insns == 1) {
+        tcg_debug_assert(first_insn_start == db->insn_start);
+    } else {
+        tcg_debug_assert(first_insn_start != db->insn_start);
+        tcg_ctx->emit_before_op = first_insn_start;
+        set_can_do_io(db, false);
     }
     }
+    tcg_ctx->emit_before_op = db->insn_start;
+    set_can_do_io(db, true);
+    tcg_ctx->emit_before_op = NULL;
 
 
-    /* The disas_log hook may use these values rather than recompute.  */
+    /* May be used by disas_log or plugin callbacks. */
     tb->size = db->pc_next - db->pc_first;
     tb->size = db->pc_next - db->pc_first;
     tb->icount = db->num_insns;
     tb->icount = db->num_insns;
 
 
-#ifdef DEBUG_DISAS
+    if (plugin_enabled) {
+        plugin_gen_tb_end(cpu, db->num_insns);
+    }
+
     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
     if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)
         && qemu_log_in_addr_range(db->pc_first)) {
         && qemu_log_in_addr_range(db->pc_first)) {
         FILE *logfile = qemu_log_trylock();
         FILE *logfile = qemu_log_trylock();
         if (logfile) {
         if (logfile) {
             fprintf(logfile, "----------------\n");
             fprintf(logfile, "----------------\n");
-            ops->disas_log(db, cpu, logfile);
+
+            if (!ops->disas_log ||
+                !ops->disas_log(db, cpu, logfile)) {
+                fprintf(logfile, "IN: %s\n", lookup_symbol(db->pc_first));
+                target_disas(logfile, cpu, db);
+            }
             fprintf(logfile, "\n");
             fprintf(logfile, "\n");
             qemu_log_unlock(logfile);
             qemu_log_unlock(logfile);
         }
         }
     }
     }
-#endif
 }
 }
 
 
-static void *translator_access(CPUArchState *env, DisasContextBase *db,
-                               target_ulong pc, size_t len)
+static bool translator_ld(CPUArchState *env, DisasContextBase *db,
+                          void *dest, vaddr pc, size_t len)
 {
 {
+    TranslationBlock *tb = db->tb;
+    vaddr last = pc + len - 1;
     void *host;
     void *host;
-    target_ulong base, end;
-    TranslationBlock *tb;
-
-    tb = db->tb;
+    vaddr base;
 
 
     /* Use slow path if first page is MMIO. */
     /* Use slow path if first page is MMIO. */
     if (unlikely(tb_page_addr0(tb) == -1)) {
     if (unlikely(tb_page_addr0(tb) == -1)) {
-        return NULL;
+        /* We capped translation with first page MMIO in tb_gen_code. */
+        tcg_debug_assert(db->max_insns == 1);
+        return false;
     }
     }
 
 
-    end = pc + len - 1;
-    if (likely(is_same_page(db, end))) {
-        host = db->host_addr[0];
-        base = db->pc_first;
-    } else {
-        host = db->host_addr[1];
-        base = TARGET_PAGE_ALIGN(db->pc_first);
-        if (host == NULL) {
-            tb_page_addr_t phys_page =
-                get_page_addr_code_hostp(env, base, &db->host_addr[1]);
-
-            /*
-             * If the second page is MMIO, treat as if the first page
-             * was MMIO as well, so that we do not cache the TB.
-             */
-            if (unlikely(phys_page == -1)) {
-                tb_set_page_addr0(tb, -1);
-                return NULL;
+    host = db->host_addr[0];
+    base = db->pc_first;
+
+    if (likely(((base ^ last) & TARGET_PAGE_MASK) == 0)) {
+        /* Entire read is from the first page. */
+        memcpy(dest, host + (pc - base), len);
+        return true;
+    }
+
+    if (unlikely(((base ^ pc) & TARGET_PAGE_MASK) == 0)) {
+        /* Read begins on the first page and extends to the second. */
+        size_t len0 = -(pc | TARGET_PAGE_MASK);
+        memcpy(dest, host + (pc - base), len0);
+        pc += len0;
+        dest += len0;
+        len -= len0;
+    }
+
+    /*
+     * The read must conclude on the second page and not extend to a third.
+     *
+     * TODO: We could allow the two pages to be virtually discontiguous,
+     * since we already allow the two pages to be physically discontiguous.
+     * The only reasonable use case would be executing an insn at the end
+     * of the address space wrapping around to the beginning.  For that,
+     * we would need to know the current width of the address space.
+     * In the meantime, assert.
+     */
+    base = (base & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
+    assert(((base ^ pc) & TARGET_PAGE_MASK) == 0);
+    assert(((base ^ last) & TARGET_PAGE_MASK) == 0);
+    host = db->host_addr[1];
+
+    if (host == NULL) {
+        tb_page_addr_t page0, old_page1, new_page1;
+
+        new_page1 = get_page_addr_code_hostp(env, base, &db->host_addr[1]);
+
+        /*
+         * If the second page is MMIO, treat as if the first page
+         * was MMIO as well, so that we do not cache the TB.
+         */
+        if (unlikely(new_page1 == -1)) {
+            tb_unlock_pages(tb);
+            tb_set_page_addr0(tb, -1);
+            /* Require that this be the final insn. */
+            db->max_insns = db->num_insns;
+            return false;
+        }
+
+        /*
+         * If this is not the first time around, and page1 matches,
+         * then we already have the page locked.  Alternately, we're
+         * not doing anything to prevent the PTE from changing, so
+         * we might wind up with a different page, requiring us to
+         * re-do the locking.
+         */
+        old_page1 = tb_page_addr1(tb);
+        if (likely(new_page1 != old_page1)) {
+            page0 = tb_page_addr0(tb);
+            if (unlikely(old_page1 != -1)) {
+                tb_unlock_page1(page0, old_page1);
             }
             }
+            tb_set_page_addr1(tb, new_page1);
+            tb_lock_page1(page0, new_page1);
+        }
+        host = db->host_addr[1];
+    }
+
+    memcpy(dest, host + (pc - base), len);
+    return true;
+}
+
+static void record_save(DisasContextBase *db, vaddr pc,
+                        const void *from, int size)
+{
+    int offset;
+
+    /* Do not record probes before the start of TB. */
+    if (pc < db->pc_first) {
+        return;
+    }
+
+    /*
+     * In translator_access, we verified that pc is within 2 pages
+     * of pc_first, thus this will never overflow.
+     */
+    offset = pc - db->pc_first;
+
+    /*
+     * Either the first or second page may be I/O.  If it is the second,
+     * then the first byte we need to record will be at a non-zero offset.
+     * In either case, we should not need to record but a single insn.
+     */
+    if (db->record_len == 0) {
+        db->record_start = offset;
+        db->record_len = size;
+    } else {
+        assert(offset == db->record_start + db->record_len);
+        assert(db->record_len + size <= sizeof(db->record));
+        db->record_len += size;
+    }
+
+    memcpy(db->record + (offset - db->record_start), from, size);
+}
+
+size_t translator_st_len(const DisasContextBase *db)
+{
+    return db->fake_insn ? db->record_len : db->tb->size;
+}
 
 
-            tb_set_page_addr1(tb, phys_page);
-#ifdef CONFIG_USER_ONLY
-            page_protect(end);
-#endif
-            host = db->host_addr[1];
+bool translator_st(const DisasContextBase *db, void *dest,
+                   vaddr addr, size_t len)
+{
+    size_t offset, offset_end;
+
+    if (addr < db->pc_first) {
+        return false;
+    }
+    offset = addr - db->pc_first;
+    offset_end = offset + len;
+    if (offset_end > translator_st_len(db)) {
+        return false;
+    }
+
+    if (!db->fake_insn) {
+        size_t offset_page1 = -(db->pc_first | TARGET_PAGE_MASK);
+
+        /* Get all the bytes from the first page. */
+        if (db->host_addr[0]) {
+            if (offset_end <= offset_page1) {
+                memcpy(dest, db->host_addr[0] + offset, len);
+                return true;
+            }
+            if (offset < offset_page1) {
+                size_t len0 = offset_page1 - offset;
+                memcpy(dest, db->host_addr[0] + offset, len0);
+                offset += len0;
+                dest += len0;
+            }
         }
         }
 
 
-        /* Use slow path when crossing pages. */
-        if (is_same_page(db, pc)) {
-            return NULL;
+        /* Get any bytes from the second page. */
+        if (db->host_addr[1] && offset >= offset_page1) {
+            memcpy(dest, db->host_addr[1] + (offset - offset_page1),
+                   offset_end - offset);
+            return true;
         }
         }
     }
     }
 
 
-    tcg_debug_assert(pc >= base);
-    return host + (pc - base);
+    /* Else get recorded bytes. */
+    if (db->record_len != 0 &&
+        offset >= db->record_start &&
+        offset_end <= db->record_start + db->record_len) {
+        memcpy(dest, db->record + (offset - db->record_start),
+               offset_end - offset);
+        return true;
+    }
+    return false;
 }
 }
 
 
-uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
+uint8_t translator_ldub(CPUArchState *env, DisasContextBase *db, vaddr pc)
 {
 {
-    uint8_t ret;
-    void *p = translator_access(env, db, pc, sizeof(ret));
+    uint8_t raw;
 
 
-    if (p) {
-        plugin_insn_append(pc, p, sizeof(ret));
-        return ldub_p(p);
+    if (!translator_ld(env, db, &raw, pc, sizeof(raw))) {
+        raw = cpu_ldub_code(env, pc);
+        record_save(db, pc, &raw, sizeof(raw));
     }
     }
-    ret = cpu_ldub_code(env, pc);
-    plugin_insn_append(pc, &ret, sizeof(ret));
-    return ret;
+    return raw;
 }
 }
 
 
-uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
+uint16_t translator_lduw(CPUArchState *env, DisasContextBase *db, vaddr pc)
 {
 {
-    uint16_t ret, plug;
-    void *p = translator_access(env, db, pc, sizeof(ret));
+    uint16_t raw, tgt;
 
 
-    if (p) {
-        plugin_insn_append(pc, p, sizeof(ret));
-        return lduw_p(p);
+    if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
+        tgt = tswap16(raw);
+    } else {
+        tgt = cpu_lduw_code(env, pc);
+        raw = tswap16(tgt);
+        record_save(db, pc, &raw, sizeof(raw));
     }
     }
-    ret = cpu_lduw_code(env, pc);
-    plug = tswap16(ret);
-    plugin_insn_append(pc, &plug, sizeof(ret));
-    return ret;
+    return tgt;
 }
 }
 
 
-uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
+uint32_t translator_ldl(CPUArchState *env, DisasContextBase *db, vaddr pc)
 {
 {
-    uint32_t ret, plug;
-    void *p = translator_access(env, db, pc, sizeof(ret));
+    uint32_t raw, tgt;
 
 
-    if (p) {
-        plugin_insn_append(pc, p, sizeof(ret));
-        return ldl_p(p);
+    if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
+        tgt = tswap32(raw);
+    } else {
+        tgt = cpu_ldl_code(env, pc);
+        raw = tswap32(tgt);
+        record_save(db, pc, &raw, sizeof(raw));
     }
     }
-    ret = cpu_ldl_code(env, pc);
-    plug = tswap32(ret);
-    plugin_insn_append(pc, &plug, sizeof(ret));
-    return ret;
+    return tgt;
 }
 }
 
 
-uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, abi_ptr pc)
+uint64_t translator_ldq(CPUArchState *env, DisasContextBase *db, vaddr pc)
 {
 {
-    uint64_t ret, plug;
-    void *p = translator_access(env, db, pc, sizeof(ret));
+    uint64_t raw, tgt;
 
 
-    if (p) {
-        plugin_insn_append(pc, p, sizeof(ret));
-        return ldq_p(p);
+    if (translator_ld(env, db, &raw, pc, sizeof(raw))) {
+        tgt = tswap64(raw);
+    } else {
+        tgt = cpu_ldq_code(env, pc);
+        raw = tswap64(tgt);
+        record_save(db, pc, &raw, sizeof(raw));
     }
     }
-    ret = cpu_ldq_code(env, pc);
-    plug = tswap64(ret);
-    plugin_insn_append(pc, &plug, sizeof(ret));
-    return ret;
+    return tgt;
+}
+
+void translator_fake_ld(DisasContextBase *db, const void *data, size_t len)
+{
+    db->fake_insn = true;
+    record_save(db, db->pc_first, data, len);
 }
 }

+ 4 - 2
accel/tcg/user-exec-stub.c

@@ -2,8 +2,6 @@
 #include "hw/core/cpu.h"
 #include "hw/core/cpu.h"
 #include "exec/replay-core.h"
 #include "exec/replay-core.h"
 
 
-bool enable_cpu_pm = false;
-
 void cpu_resume(CPUState *cpu)
 void cpu_resume(CPUState *cpu)
 {
 {
 }
 }
@@ -16,6 +14,10 @@ void qemu_init_vcpu(CPUState *cpu)
 {
 {
 }
 }
 
 
+void cpu_exec_reset_hold(CPUState *cpu)
+{
+}
+
 /* User mode emulation does not support record/replay yet.  */
 /* User mode emulation does not support record/replay yet.  */
 
 
 bool replay_exception(void)
 bool replay_exception(void)

+ 249 - 240
accel/tcg/user-exec.c

@@ -24,17 +24,27 @@
 #include "qemu/bitops.h"
 #include "qemu/bitops.h"
 #include "qemu/rcu.h"
 #include "qemu/rcu.h"
 #include "exec/cpu_ldst.h"
 #include "exec/cpu_ldst.h"
+#include "qemu/main-loop.h"
 #include "exec/translate-all.h"
 #include "exec/translate-all.h"
+#include "exec/page-protection.h"
 #include "exec/helper-proto.h"
 #include "exec/helper-proto.h"
 #include "qemu/atomic128.h"
 #include "qemu/atomic128.h"
 #include "trace/trace-root.h"
 #include "trace/trace-root.h"
 #include "tcg/tcg-ldst.h"
 #include "tcg/tcg-ldst.h"
-#include "internal.h"
+#include "internal-common.h"
+#include "internal-target.h"
 
 
 __thread uintptr_t helper_retaddr;
 __thread uintptr_t helper_retaddr;
 
 
 //#define DEBUG_SIGNAL
 //#define DEBUG_SIGNAL
 
 
+void cpu_interrupt(CPUState *cpu, int mask)
+{
+    g_assert(bql_locked());
+    cpu->interrupt_request |= mask;
+    qatomic_set(&cpu->neg.icount_decr.u16.high, -1);
+}
+
 /*
 /*
  * Adjust the pc to pass to cpu_restore_state; return the memop type.
  * Adjust the pc to pass to cpu_restore_state; return the memop type.
  */
  */
@@ -144,7 +154,7 @@ typedef struct PageFlagsNode {
 
 
 static IntervalTreeRoot pageflags_root;
 static IntervalTreeRoot pageflags_root;
 
 
-static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
+static PageFlagsNode *pageflags_find(target_ulong start, target_ulong last)
 {
 {
     IntervalTreeNode *n;
     IntervalTreeNode *n;
 
 
@@ -153,7 +163,7 @@ static PageFlagsNode *pageflags_find(target_ulong start, target_long last)
 }
 }
 
 
 static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
 static PageFlagsNode *pageflags_next(PageFlagsNode *p, target_ulong start,
-                                     target_long last)
+                                     target_ulong last)
 {
 {
     IntervalTreeNode *n;
     IntervalTreeNode *n;
 
 
@@ -520,19 +530,19 @@ void page_set_flags(target_ulong start, target_ulong last, int flags)
     }
     }
 }
 }
 
 
-int page_check_range(target_ulong start, target_ulong len, int flags)
+bool page_check_range(target_ulong start, target_ulong len, int flags)
 {
 {
     target_ulong last;
     target_ulong last;
     int locked;  /* tri-state: =0: unlocked, +1: global, -1: local */
     int locked;  /* tri-state: =0: unlocked, +1: global, -1: local */
-    int ret;
+    bool ret;
 
 
     if (len == 0) {
     if (len == 0) {
-        return 0;  /* trivial length */
+        return true;  /* trivial length */
     }
     }
 
 
     last = start + len - 1;
     last = start + len - 1;
     if (last < start) {
     if (last < start) {
-        return -1; /* wrap around */
+        return false; /* wrap around */
     }
     }
 
 
     locked = have_mmap_lock();
     locked = have_mmap_lock();
@@ -551,33 +561,33 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
                 p = pageflags_find(start, last);
                 p = pageflags_find(start, last);
             }
             }
             if (!p) {
             if (!p) {
-                ret = -1; /* entire region invalid */
+                ret = false; /* entire region invalid */
                 break;
                 break;
             }
             }
         }
         }
         if (start < p->itree.start) {
         if (start < p->itree.start) {
-            ret = -1; /* initial bytes invalid */
+            ret = false; /* initial bytes invalid */
             break;
             break;
         }
         }
 
 
         missing = flags & ~p->flags;
         missing = flags & ~p->flags;
-        if (missing & PAGE_READ) {
-            ret = -1; /* page not readable */
+        if (missing & ~PAGE_WRITE) {
+            ret = false; /* page doesn't match */
             break;
             break;
         }
         }
         if (missing & PAGE_WRITE) {
         if (missing & PAGE_WRITE) {
             if (!(p->flags & PAGE_WRITE_ORG)) {
             if (!(p->flags & PAGE_WRITE_ORG)) {
-                ret = -1; /* page not writable */
+                ret = false; /* page not writable */
                 break;
                 break;
             }
             }
             /* Asking about writable, but has been protected: undo. */
             /* Asking about writable, but has been protected: undo. */
             if (!page_unprotect(start, 0)) {
             if (!page_unprotect(start, 0)) {
-                ret = -1;
+                ret = false;
                 break;
                 break;
             }
             }
             /* TODO: page_unprotect should take a range, not a single page. */
             /* TODO: page_unprotect should take a range, not a single page. */
             if (last - start < TARGET_PAGE_SIZE) {
             if (last - start < TARGET_PAGE_SIZE) {
-                ret = 0; /* ok */
+                ret = true; /* ok */
                 break;
                 break;
             }
             }
             start += TARGET_PAGE_SIZE;
             start += TARGET_PAGE_SIZE;
@@ -585,7 +595,7 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
         }
         }
 
 
         if (last <= p->itree.last) {
         if (last <= p->itree.last) {
-            ret = 0; /* ok */
+            ret = true; /* ok */
             break;
             break;
         }
         }
         start = p->itree.last + 1;
         start = p->itree.last + 1;
@@ -598,20 +608,69 @@ int page_check_range(target_ulong start, target_ulong len, int flags)
     return ret;
     return ret;
 }
 }
 
 
+bool page_check_range_empty(target_ulong start, target_ulong last)
+{
+    assert(last >= start);
+    assert_memory_lock();
+    return pageflags_find(start, last) == NULL;
+}
+
+target_ulong page_find_range_empty(target_ulong min, target_ulong max,
+                                   target_ulong len, target_ulong align)
+{
+    target_ulong len_m1, align_m1;
+
+    assert(min <= max);
+    assert(max <= GUEST_ADDR_MAX);
+    assert(len != 0);
+    assert(is_power_of_2(align));
+    assert_memory_lock();
+
+    len_m1 = len - 1;
+    align_m1 = align - 1;
+
+    /* Iteratively narrow the search region. */
+    while (1) {
+        PageFlagsNode *p;
+
+        /* Align min and double-check there's enough space remaining. */
+        min = (min + align_m1) & ~align_m1;
+        if (min > max) {
+            return -1;
+        }
+        if (len_m1 > max - min) {
+            return -1;
+        }
+
+        p = pageflags_find(min, min + len_m1);
+        if (p == NULL) {
+            /* Found! */
+            return min;
+        }
+        if (max <= p->itree.last) {
+            /* Existing allocation fills the remainder of the search region. */
+            return -1;
+        }
+        /* Skip across existing allocation. */
+        min = p->itree.last + 1;
+    }
+}
+
 void page_protect(tb_page_addr_t address)
 void page_protect(tb_page_addr_t address)
 {
 {
     PageFlagsNode *p;
     PageFlagsNode *p;
     target_ulong start, last;
     target_ulong start, last;
+    int host_page_size = qemu_real_host_page_size();
     int prot;
     int prot;
 
 
     assert_memory_lock();
     assert_memory_lock();
 
 
-    if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
+    if (host_page_size <= TARGET_PAGE_SIZE) {
         start = address & TARGET_PAGE_MASK;
         start = address & TARGET_PAGE_MASK;
         last = start + TARGET_PAGE_SIZE - 1;
         last = start + TARGET_PAGE_SIZE - 1;
     } else {
     } else {
-        start = address & qemu_host_page_mask;
-        last = start + qemu_host_page_size - 1;
+        start = address & -host_page_size;
+        last = start + host_page_size - 1;
     }
     }
 
 
     p = pageflags_find(start, last);
     p = pageflags_find(start, last);
@@ -622,7 +681,7 @@ void page_protect(tb_page_addr_t address)
 
 
     if (unlikely(p->itree.last < last)) {
     if (unlikely(p->itree.last < last)) {
         /* More than one protection region covers the one host page. */
         /* More than one protection region covers the one host page. */
-        assert(TARGET_PAGE_SIZE < qemu_host_page_size);
+        assert(TARGET_PAGE_SIZE < host_page_size);
         while ((p = pageflags_next(p, start, last)) != NULL) {
         while ((p = pageflags_next(p, start, last)) != NULL) {
             prot |= p->flags;
             prot |= p->flags;
         }
         }
@@ -630,7 +689,7 @@ void page_protect(tb_page_addr_t address)
 
 
     if (prot & PAGE_WRITE) {
     if (prot & PAGE_WRITE) {
         pageflags_set_clear(start, last, 0, PAGE_WRITE);
         pageflags_set_clear(start, last, 0, PAGE_WRITE);
-        mprotect(g2h_untagged(start), qemu_host_page_size,
+        mprotect(g2h_untagged(start), last - start + 1,
                  prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
                  prot & (PAGE_READ | PAGE_EXEC) ? PROT_READ : PROT_NONE);
     }
     }
 }
 }
@@ -676,18 +735,19 @@ int page_unprotect(target_ulong address, uintptr_t pc)
         }
         }
 #endif
 #endif
     } else {
     } else {
+        int host_page_size = qemu_real_host_page_size();
         target_ulong start, len, i;
         target_ulong start, len, i;
         int prot;
         int prot;
 
 
-        if (qemu_host_page_size <= TARGET_PAGE_SIZE) {
+        if (host_page_size <= TARGET_PAGE_SIZE) {
             start = address & TARGET_PAGE_MASK;
             start = address & TARGET_PAGE_MASK;
             len = TARGET_PAGE_SIZE;
             len = TARGET_PAGE_SIZE;
             prot = p->flags | PAGE_WRITE;
             prot = p->flags | PAGE_WRITE;
             pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
             pageflags_set_clear(start, start + len - 1, PAGE_WRITE, 0);
             current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
             current_tb_invalidated = tb_invalidate_phys_page_unwind(start, pc);
         } else {
         } else {
-            start = address & qemu_host_page_mask;
-            len = qemu_host_page_size;
+            start = address & -host_page_size;
+            len = host_page_size;
             prot = 0;
             prot = 0;
 
 
             for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
             for (i = 0; i < len; i += TARGET_PAGE_SIZE) {
@@ -713,7 +773,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
         if (prot & PAGE_EXEC) {
         if (prot & PAGE_EXEC) {
             prot = (prot & ~PAGE_EXEC) | PAGE_READ;
             prot = (prot & ~PAGE_EXEC) | PAGE_READ;
         }
         }
-        mprotect((void *)g2h_untagged(start), len, prot & PAGE_BITS);
+        mprotect((void *)g2h_untagged(start), len, prot & PAGE_RWX);
     }
     }
     mmap_unlock();
     mmap_unlock();
 
 
@@ -721,7 +781,7 @@ int page_unprotect(target_ulong address, uintptr_t pc)
     return current_tb_invalidated ? 2 : 1;
     return current_tb_invalidated ? 2 : 1;
 }
 }
 
 
-static int probe_access_internal(CPUArchState *env, target_ulong addr,
+static int probe_access_internal(CPUArchState *env, vaddr addr,
                                  int fault_size, MMUAccessType access_type,
                                  int fault_size, MMUAccessType access_type,
                                  bool nonfault, uintptr_t ra)
                                  bool nonfault, uintptr_t ra)
 {
 {
@@ -745,6 +805,10 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
     if (guest_addr_valid_untagged(addr)) {
     if (guest_addr_valid_untagged(addr)) {
         int page_flags = page_get_flags(addr);
         int page_flags = page_get_flags(addr);
         if (page_flags & acc_flag) {
         if (page_flags & acc_flag) {
+            if ((acc_flag == PAGE_READ || acc_flag == PAGE_WRITE)
+                && cpu_plugin_mem_cbs_enabled(env_cpu(env))) {
+                return TLB_MMIO;
+            }
             return 0; /* success */
             return 0; /* success */
         }
         }
         maperr = !(page_flags & PAGE_VALID);
         maperr = !(page_flags & PAGE_VALID);
@@ -759,7 +823,7 @@ static int probe_access_internal(CPUArchState *env, target_ulong addr,
     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
     cpu_loop_exit_sigsegv(env_cpu(env), addr, access_type, maperr, ra);
 }
 }
 
 
-int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
+int probe_access_flags(CPUArchState *env, vaddr addr, int size,
                        MMUAccessType access_type, int mmu_idx,
                        MMUAccessType access_type, int mmu_idx,
                        bool nonfault, void **phost, uintptr_t ra)
                        bool nonfault, void **phost, uintptr_t ra)
 {
 {
@@ -767,23 +831,23 @@ int probe_access_flags(CPUArchState *env, target_ulong addr, int size,
 
 
     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
     flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
     flags = probe_access_internal(env, addr, size, access_type, nonfault, ra);
-    *phost = flags ? NULL : g2h(env_cpu(env), addr);
+    *phost = (flags & TLB_INVALID_MASK) ? NULL : g2h(env_cpu(env), addr);
     return flags;
     return flags;
 }
 }
 
 
-void *probe_access(CPUArchState *env, target_ulong addr, int size,
+void *probe_access(CPUArchState *env, vaddr addr, int size,
                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
                    MMUAccessType access_type, int mmu_idx, uintptr_t ra)
 {
 {
     int flags;
     int flags;
 
 
     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
     g_assert(-(addr | TARGET_PAGE_MASK) >= size);
     flags = probe_access_internal(env, addr, size, access_type, false, ra);
     flags = probe_access_internal(env, addr, size, access_type, false, ra);
-    g_assert(flags == 0);
+    g_assert((flags & ~TLB_MMIO) == 0);
 
 
     return size ? g2h(env_cpu(env), addr) : NULL;
     return size ? g2h(env_cpu(env), addr) : NULL;
 }
 }
 
 
-tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
+tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, vaddr addr,
                                         void **hostp)
                                         void **hostp)
 {
 {
     int flags;
     int flags;
@@ -809,7 +873,7 @@ tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
 typedef struct TargetPageDataNode {
 typedef struct TargetPageDataNode {
     struct rcu_head rcu;
     struct rcu_head rcu;
     IntervalTreeNode itree;
     IntervalTreeNode itree;
-    char data[TPD_PAGES][TARGET_PAGE_DATA_SIZE] __attribute__((aligned));
+    char data[] __attribute__((aligned));
 } TargetPageDataNode;
 } TargetPageDataNode;
 
 
 static IntervalTreeRoot targetdata_root;
 static IntervalTreeRoot targetdata_root;
@@ -847,7 +911,8 @@ void page_reset_target_data(target_ulong start, target_ulong last)
         n_last = MIN(last, n->last);
         n_last = MIN(last, n->last);
         p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
         p_len = (n_last + 1 - n_start) >> TARGET_PAGE_BITS;
 
 
-        memset(t->data[p_ofs], 0, p_len * TARGET_PAGE_DATA_SIZE);
+        memset(t->data + p_ofs * TARGET_PAGE_DATA_SIZE, 0,
+               p_len * TARGET_PAGE_DATA_SIZE);
     }
     }
 }
 }
 
 
@@ -855,7 +920,7 @@ void *page_get_target_data(target_ulong address)
 {
 {
     IntervalTreeNode *n;
     IntervalTreeNode *n;
     TargetPageDataNode *t;
     TargetPageDataNode *t;
-    target_ulong page, region;
+    target_ulong page, region, p_ofs;
 
 
     page = address & TARGET_PAGE_MASK;
     page = address & TARGET_PAGE_MASK;
     region = address & TBD_MASK;
     region = address & TBD_MASK;
@@ -871,7 +936,8 @@ void *page_get_target_data(target_ulong address)
         mmap_lock();
         mmap_lock();
         n = interval_tree_iter_first(&targetdata_root, page, page);
         n = interval_tree_iter_first(&targetdata_root, page, page);
         if (!n) {
         if (!n) {
-            t = g_new0(TargetPageDataNode, 1);
+            t = g_malloc0(sizeof(TargetPageDataNode)
+                          + TPD_PAGES * TARGET_PAGE_DATA_SIZE);
             n = &t->itree;
             n = &t->itree;
             n->start = region;
             n->start = region;
             n->last = region | ~TBD_MASK;
             n->last = region | ~TBD_MASK;
@@ -881,302 +947,192 @@ void *page_get_target_data(target_ulong address)
     }
     }
 
 
     t = container_of(n, TargetPageDataNode, itree);
     t = container_of(n, TargetPageDataNode, itree);
-    return t->data[(page - region) >> TARGET_PAGE_BITS];
+    p_ofs = (page - region) >> TARGET_PAGE_BITS;
+    return t->data + p_ofs * TARGET_PAGE_DATA_SIZE;
 }
 }
 #else
 #else
 void page_reset_target_data(target_ulong start, target_ulong last) { }
 void page_reset_target_data(target_ulong start, target_ulong last) { }
 #endif /* TARGET_PAGE_DATA_SIZE */
 #endif /* TARGET_PAGE_DATA_SIZE */
 
 
-/* The softmmu versions of these helpers are in cputlb.c.  */
-
-/*
- * Verify that we have passed the correct MemOp to the correct function.
- *
- * We could present one function to target code, and dispatch based on
- * the MemOp, but so far we have worked hard to avoid an indirect function
- * call along the memory path.
- */
-static void validate_memop(MemOpIdx oi, MemOp expected)
-{
-#ifdef CONFIG_DEBUG_TCG
-    MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
-    assert(have == expected);
-#endif
-}
+/* The system-mode versions of these helpers are in cputlb.c.  */
 
 
-void helper_unaligned_ld(CPUArchState *env, target_ulong addr)
+static void *cpu_mmu_lookup(CPUState *cpu, vaddr addr,
+                            MemOp mop, uintptr_t ra, MMUAccessType type)
 {
 {
-    cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_LOAD, GETPC());
-}
-
-void helper_unaligned_st(CPUArchState *env, target_ulong addr)
-{
-    cpu_loop_exit_sigbus(env_cpu(env), addr, MMU_DATA_STORE, GETPC());
-}
-
-static void *cpu_mmu_lookup(CPUArchState *env, target_ulong addr,
-                            MemOpIdx oi, uintptr_t ra, MMUAccessType type)
-{
-    MemOp mop = get_memop(oi);
     int a_bits = get_alignment_bits(mop);
     int a_bits = get_alignment_bits(mop);
     void *ret;
     void *ret;
 
 
     /* Enforce guest required alignment.  */
     /* Enforce guest required alignment.  */
     if (unlikely(addr & ((1 << a_bits) - 1))) {
     if (unlikely(addr & ((1 << a_bits) - 1))) {
-        cpu_loop_exit_sigbus(env_cpu(env), addr, type, ra);
+        cpu_loop_exit_sigbus(cpu, addr, type, ra);
     }
     }
 
 
-    ret = g2h(env_cpu(env), addr);
+    ret = g2h(cpu, addr);
     set_helper_retaddr(ra);
     set_helper_retaddr(ra);
     return ret;
     return ret;
 }
 }
 
 
-uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr,
-                    MemOpIdx oi, uintptr_t ra)
+#include "ldst_atomicity.c.inc"
+
+static uint8_t do_ld1_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
+                          uintptr_t ra, MMUAccessType access_type)
 {
 {
     void *haddr;
     void *haddr;
     uint8_t ret;
     uint8_t ret;
 
 
-    validate_memop(oi, MO_UB);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
+    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+    haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, access_type);
     ret = ldub_p(haddr);
     ret = ldub_p(haddr);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
     return ret;
     return ret;
 }
 }
 
 
-uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
-                        MemOpIdx oi, uintptr_t ra)
+static uint16_t do_ld2_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
+                           uintptr_t ra, MMUAccessType access_type)
 {
 {
     void *haddr;
     void *haddr;
     uint16_t ret;
     uint16_t ret;
+    MemOp mop = get_memop(oi);
 
 
-    validate_memop(oi, MO_BEUW);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
-    ret = lduw_be_p(haddr);
-    clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-    return ret;
-}
-
-uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
-                        MemOpIdx oi, uintptr_t ra)
-{
-    void *haddr;
-    uint32_t ret;
-
-    validate_memop(oi, MO_BEUL);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
-    ret = ldl_be_p(haddr);
+    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+    haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
+    ret = load_atom_2(cpu, ra, haddr, mop);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-    return ret;
-}
-
-uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
-                        MemOpIdx oi, uintptr_t ra)
-{
-    void *haddr;
-    uint64_t ret;
 
 
-    validate_memop(oi, MO_BEUQ);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
-    ret = ldq_be_p(haddr);
-    clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-    return ret;
-}
-
-uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
-                        MemOpIdx oi, uintptr_t ra)
-{
-    void *haddr;
-    uint16_t ret;
-
-    validate_memop(oi, MO_LEUW);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
-    ret = lduw_le_p(haddr);
-    clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+    if (mop & MO_BSWAP) {
+        ret = bswap16(ret);
+    }
     return ret;
     return ret;
 }
 }
 
 
-uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
-                        MemOpIdx oi, uintptr_t ra)
+static uint32_t do_ld4_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
+                           uintptr_t ra, MMUAccessType access_type)
 {
 {
     void *haddr;
     void *haddr;
     uint32_t ret;
     uint32_t ret;
+    MemOp mop = get_memop(oi);
 
 
-    validate_memop(oi, MO_LEUL);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
-    ret = ldl_le_p(haddr);
+    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+    haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
+    ret = load_atom_4(cpu, ra, haddr, mop);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
-    return ret;
-}
 
 
-uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
-                        MemOpIdx oi, uintptr_t ra)
-{
-    void *haddr;
-    uint64_t ret;
-
-    validate_memop(oi, MO_LEUQ);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
-    ret = ldq_le_p(haddr);
-    clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
+    if (mop & MO_BSWAP) {
+        ret = bswap32(ret);
+    }
     return ret;
     return ret;
 }
 }
 
 
-Int128 cpu_ld16_be_mmu(CPUArchState *env, abi_ptr addr,
-                       MemOpIdx oi, uintptr_t ra)
+static uint64_t do_ld8_mmu(CPUState *cpu, vaddr addr, MemOpIdx oi,
+                           uintptr_t ra, MMUAccessType access_type)
 {
 {
     void *haddr;
     void *haddr;
-    Int128 ret;
+    uint64_t ret;
+    MemOp mop = get_memop(oi);
 
 
-    validate_memop(oi, MO_128 | MO_BE);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
-    memcpy(&ret, haddr, 16);
+    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+    haddr = cpu_mmu_lookup(cpu, addr, mop, ra, access_type);
+    ret = load_atom_8(cpu, ra, haddr, mop);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
 
 
-    if (!HOST_BIG_ENDIAN) {
-        ret = bswap128(ret);
+    if (mop & MO_BSWAP) {
+        ret = bswap64(ret);
     }
     }
     return ret;
     return ret;
 }
 }
 
 
-Int128 cpu_ld16_le_mmu(CPUArchState *env, abi_ptr addr,
-                       MemOpIdx oi, uintptr_t ra)
+static Int128 do_ld16_mmu(CPUState *cpu, abi_ptr addr,
+                          MemOpIdx oi, uintptr_t ra)
 {
 {
     void *haddr;
     void *haddr;
     Int128 ret;
     Int128 ret;
+    MemOp mop = get_memop(oi);
 
 
-    validate_memop(oi, MO_128 | MO_LE);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_LOAD);
-    memcpy(&ret, haddr, 16);
+    tcg_debug_assert((mop & MO_SIZE) == MO_128);
+    cpu_req_mo(TCG_MO_LD_LD | TCG_MO_ST_LD);
+    haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_LOAD);
+    ret = load_atom_16(cpu, ra, haddr, mop);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
 
 
-    if (HOST_BIG_ENDIAN) {
+    if (mop & MO_BSWAP) {
         ret = bswap128(ret);
         ret = bswap128(ret);
     }
     }
     return ret;
     return ret;
 }
 }
 
 
-void cpu_stb_mmu(CPUArchState *env, abi_ptr addr, uint8_t val,
-                 MemOpIdx oi, uintptr_t ra)
+static void do_st1_mmu(CPUState *cpu, vaddr addr, uint8_t val,
+                       MemOpIdx oi, uintptr_t ra)
 {
 {
     void *haddr;
     void *haddr;
 
 
-    validate_memop(oi, MO_UB);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
+    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+    haddr = cpu_mmu_lookup(cpu, addr, get_memop(oi), ra, MMU_DATA_STORE);
     stb_p(haddr, val);
     stb_p(haddr, val);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-void cpu_stw_be_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
-                    MemOpIdx oi, uintptr_t ra)
-{
-    void *haddr;
-
-    validate_memop(oi, MO_BEUW);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
-    stw_be_p(haddr, val);
-    clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
 }
 }
 
 
-void cpu_stl_be_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
-                    MemOpIdx oi, uintptr_t ra)
+static void do_st2_mmu(CPUState *cpu, vaddr addr, uint16_t val,
+                       MemOpIdx oi, uintptr_t ra)
 {
 {
     void *haddr;
     void *haddr;
+    MemOp mop = get_memop(oi);
 
 
-    validate_memop(oi, MO_BEUL);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
-    stl_be_p(haddr, val);
-    clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
+    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+    haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
 
 
-void cpu_stq_be_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
-                    MemOpIdx oi, uintptr_t ra)
-{
-    void *haddr;
-
-    validate_memop(oi, MO_BEUQ);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
-    stq_be_p(haddr, val);
+    if (mop & MO_BSWAP) {
+        val = bswap16(val);
+    }
+    store_atom_2(cpu, ra, haddr, mop, val);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
 }
 }
 
 
-void cpu_stw_le_mmu(CPUArchState *env, abi_ptr addr, uint16_t val,
-                    MemOpIdx oi, uintptr_t ra)
+static void do_st4_mmu(CPUState *cpu, vaddr addr, uint32_t val,
+                       MemOpIdx oi, uintptr_t ra)
 {
 {
     void *haddr;
     void *haddr;
+    MemOp mop = get_memop(oi);
 
 
-    validate_memop(oi, MO_LEUW);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
-    stw_le_p(haddr, val);
-    clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
-
-void cpu_stl_le_mmu(CPUArchState *env, abi_ptr addr, uint32_t val,
-                    MemOpIdx oi, uintptr_t ra)
-{
-    void *haddr;
+    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+    haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
 
 
-    validate_memop(oi, MO_LEUL);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
-    stl_le_p(haddr, val);
+    if (mop & MO_BSWAP) {
+        val = bswap32(val);
+    }
+    store_atom_4(cpu, ra, haddr, mop, val);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
 }
 }
 
 
-void cpu_stq_le_mmu(CPUArchState *env, abi_ptr addr, uint64_t val,
-                    MemOpIdx oi, uintptr_t ra)
+static void do_st8_mmu(CPUState *cpu, vaddr addr, uint64_t val,
+                       MemOpIdx oi, uintptr_t ra)
 {
 {
     void *haddr;
     void *haddr;
+    MemOp mop = get_memop(oi);
 
 
-    validate_memop(oi, MO_LEUQ);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
-    stq_le_p(haddr, val);
-    clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
-}
+    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+    haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
 
 
-void cpu_st16_be_mmu(CPUArchState *env, abi_ptr addr,
-                     Int128 val, MemOpIdx oi, uintptr_t ra)
-{
-    void *haddr;
-
-    validate_memop(oi, MO_128 | MO_BE);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
-    if (!HOST_BIG_ENDIAN) {
-        val = bswap128(val);
+    if (mop & MO_BSWAP) {
+        val = bswap64(val);
     }
     }
-    memcpy(haddr, &val, 16);
+    store_atom_8(cpu, ra, haddr, mop, val);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
 }
 }
 
 
-void cpu_st16_le_mmu(CPUArchState *env, abi_ptr addr,
-                     Int128 val, MemOpIdx oi, uintptr_t ra)
+static void do_st16_mmu(CPUState *cpu, vaddr addr, Int128 val,
+                        MemOpIdx oi, uintptr_t ra)
 {
 {
     void *haddr;
     void *haddr;
+    MemOpIdx mop = get_memop(oi);
 
 
-    validate_memop(oi, MO_128 | MO_LE);
-    haddr = cpu_mmu_lookup(env, addr, oi, ra, MMU_DATA_STORE);
-    if (HOST_BIG_ENDIAN) {
+    cpu_req_mo(TCG_MO_LD_ST | TCG_MO_ST_ST);
+    haddr = cpu_mmu_lookup(cpu, addr, mop, ra, MMU_DATA_STORE);
+
+    if (mop & MO_BSWAP) {
         val = bswap128(val);
         val = bswap128(val);
     }
     }
-    memcpy(haddr, &val, 16);
+    store_atom_16(cpu, ra, haddr, mop, val);
     clear_helper_retaddr();
     clear_helper_retaddr();
-    qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
 }
 }
 
 
 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
 uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr ptr)
@@ -1219,16 +1175,70 @@ uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr ptr)
     return ret;
     return ret;
 }
 }
 
 
+uint8_t cpu_ldb_code_mmu(CPUArchState *env, abi_ptr addr,
+                         MemOpIdx oi, uintptr_t ra)
+{
+    void *haddr;
+    uint8_t ret;
+
+    haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
+    ret = ldub_p(haddr);
+    clear_helper_retaddr();
+    return ret;
+}
+
+uint16_t cpu_ldw_code_mmu(CPUArchState *env, abi_ptr addr,
+                          MemOpIdx oi, uintptr_t ra)
+{
+    void *haddr;
+    uint16_t ret;
+
+    haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
+    ret = lduw_p(haddr);
+    clear_helper_retaddr();
+    if (get_memop(oi) & MO_BSWAP) {
+        ret = bswap16(ret);
+    }
+    return ret;
+}
+
+uint32_t cpu_ldl_code_mmu(CPUArchState *env, abi_ptr addr,
+                          MemOpIdx oi, uintptr_t ra)
+{
+    void *haddr;
+    uint32_t ret;
+
+    haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_INST_FETCH);
+    ret = ldl_p(haddr);
+    clear_helper_retaddr();
+    if (get_memop(oi) & MO_BSWAP) {
+        ret = bswap32(ret);
+    }
+    return ret;
+}
+
+uint64_t cpu_ldq_code_mmu(CPUArchState *env, abi_ptr addr,
+                          MemOpIdx oi, uintptr_t ra)
+{
+    void *haddr;
+    uint64_t ret;
+
+    haddr = cpu_mmu_lookup(env_cpu(env), addr, oi, ra, MMU_DATA_LOAD);
+    ret = ldq_p(haddr);
+    clear_helper_retaddr();
+    if (get_memop(oi) & MO_BSWAP) {
+        ret = bswap64(ret);
+    }
+    return ret;
+}
+
 #include "ldst_common.c.inc"
 #include "ldst_common.c.inc"
 
 
 /*
 /*
  * Do not allow unaligned operations to proceed.  Return the host address.
  * Do not allow unaligned operations to proceed.  Return the host address.
- *
- * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
  */
  */
-static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
-                               MemOpIdx oi, int size, int prot,
-                               uintptr_t retaddr)
+static void *atomic_mmu_lookup(CPUState *cpu, vaddr addr, MemOpIdx oi,
+                               int size, uintptr_t retaddr)
 {
 {
     MemOp mop = get_memop(oi);
     MemOp mop = get_memop(oi);
     int a_bits = get_alignment_bits(mop);
     int a_bits = get_alignment_bits(mop);
@@ -1236,16 +1246,15 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
 
 
     /* Enforce guest required alignment.  */
     /* Enforce guest required alignment.  */
     if (unlikely(addr & ((1 << a_bits) - 1))) {
     if (unlikely(addr & ((1 << a_bits) - 1))) {
-        MMUAccessType t = prot == PAGE_READ ? MMU_DATA_LOAD : MMU_DATA_STORE;
-        cpu_loop_exit_sigbus(env_cpu(env), addr, t, retaddr);
+        cpu_loop_exit_sigbus(cpu, addr, MMU_DATA_STORE, retaddr);
     }
     }
 
 
     /* Enforce qemu required alignment.  */
     /* Enforce qemu required alignment.  */
     if (unlikely(addr & (size - 1))) {
     if (unlikely(addr & (size - 1))) {
-        cpu_loop_exit_atomic(env_cpu(env), retaddr);
+        cpu_loop_exit_atomic(cpu, retaddr);
     }
     }
 
 
-    ret = g2h(env_cpu(env), addr);
+    ret = g2h(cpu, addr);
     set_helper_retaddr(retaddr);
     set_helper_retaddr(retaddr);
     return ret;
     return ret;
 }
 }
@@ -1275,7 +1284,7 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
 #include "atomic_template.h"
 #include "atomic_template.h"
 #endif
 #endif
 
 
-#if HAVE_ATOMIC128 || HAVE_CMPXCHG128
+#if defined(CONFIG_ATOMIC128) || HAVE_CMPXCHG128
 #define DATA_SIZE 16
 #define DATA_SIZE 16
 #include "atomic_template.h"
 #include "atomic_template.h"
 #endif
 #endif

+ 18 - 0
accel/tcg/vcpu-state.h

@@ -0,0 +1,18 @@
+/*
+ * SPDX-FileContributor: Philippe Mathieu-Daudé <philmd@linaro.org>
+ * SPDX-FileCopyrightText: 2023 Linaro Ltd.
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+#ifndef ACCEL_TCG_VCPU_STATE_H
+#define ACCEL_TCG_VCPU_STATE_H
+
+#include "hw/core/cpu.h"
+
+#ifdef CONFIG_USER_ONLY
+static inline TaskState *get_task_state(const CPUState *cs)
+{
+    return cs->opaque;
+}
+#endif
+
+#endif

+ 5 - 88
softmmu/watchpoint.c → accel/tcg/watchpoint.c

@@ -27,85 +27,6 @@
 #include "hw/core/tcg-cpu-ops.h"
 #include "hw/core/tcg-cpu-ops.h"
 #include "hw/core/cpu.h"
 #include "hw/core/cpu.h"
 
 
-/* Add a watchpoint.  */
-int cpu_watchpoint_insert(CPUState *cpu, vaddr addr, vaddr len,
-                          int flags, CPUWatchpoint **watchpoint)
-{
-    CPUWatchpoint *wp;
-    vaddr in_page;
-
-    /* forbid ranges which are empty or run off the end of the address space */
-    if (len == 0 || (addr + len - 1) < addr) {
-        error_report("tried to set invalid watchpoint at %"
-                     VADDR_PRIx ", len=%" VADDR_PRIu, addr, len);
-        return -EINVAL;
-    }
-    wp = g_malloc(sizeof(*wp));
-
-    wp->vaddr = addr;
-    wp->len = len;
-    wp->flags = flags;
-
-    /* keep all GDB-injected watchpoints in front */
-    if (flags & BP_GDB) {
-        QTAILQ_INSERT_HEAD(&cpu->watchpoints, wp, entry);
-    } else {
-        QTAILQ_INSERT_TAIL(&cpu->watchpoints, wp, entry);
-    }
-
-    in_page = -(addr | TARGET_PAGE_MASK);
-    if (len <= in_page) {
-        tlb_flush_page(cpu, addr);
-    } else {
-        tlb_flush(cpu);
-    }
-
-    if (watchpoint) {
-        *watchpoint = wp;
-    }
-    return 0;
-}
-
-/* Remove a specific watchpoint.  */
-int cpu_watchpoint_remove(CPUState *cpu, vaddr addr, vaddr len,
-                          int flags)
-{
-    CPUWatchpoint *wp;
-
-    QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
-        if (addr == wp->vaddr && len == wp->len
-                && flags == (wp->flags & ~BP_WATCHPOINT_HIT)) {
-            cpu_watchpoint_remove_by_ref(cpu, wp);
-            return 0;
-        }
-    }
-    return -ENOENT;
-}
-
-/* Remove a specific watchpoint by reference.  */
-void cpu_watchpoint_remove_by_ref(CPUState *cpu, CPUWatchpoint *watchpoint)
-{
-    QTAILQ_REMOVE(&cpu->watchpoints, watchpoint, entry);
-
-    tlb_flush_page(cpu, watchpoint->vaddr);
-
-    g_free(watchpoint);
-}
-
-/* Remove all matching watchpoints.  */
-void cpu_watchpoint_remove_all(CPUState *cpu, int mask)
-{
-    CPUWatchpoint *wp, *next;
-
-    QTAILQ_FOREACH_SAFE(wp, &cpu->watchpoints, entry, next) {
-        if (wp->flags & mask) {
-            cpu_watchpoint_remove_by_ref(cpu, wp);
-        }
-    }
-}
-
-#ifdef CONFIG_TCG
-
 /*
 /*
  * Return true if this watchpoint address matches the specified
  * Return true if this watchpoint address matches the specified
  * access (ie the address range covered by the watchpoint overlaps
  * access (ie the address range covered by the watchpoint overlaps
@@ -155,9 +76,9 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
          * Now raise the debug interrupt so that it will
          * Now raise the debug interrupt so that it will
          * trigger after the current instruction.
          * trigger after the current instruction.
          */
          */
-        qemu_mutex_lock_iothread();
+        bql_lock();
         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
         cpu_interrupt(cpu, CPU_INTERRUPT_DEBUG);
-        qemu_mutex_unlock_iothread();
+        bql_unlock();
         return;
         return;
     }
     }
 
 
@@ -177,10 +98,9 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
                  * Force recompile to succeed, because icount may
                  * Force recompile to succeed, because icount may
                  * be read only at the end of the block.
                  * be read only at the end of the block.
                  */
                  */
-                if (!cpu->can_do_io) {
+                if (!cpu->neg.can_do_io) {
                     /* Force execution of one insn next time.  */
                     /* Force execution of one insn next time.  */
-                    cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ
-                                          | curr_cflags(cpu);
+                    cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
                     cpu_loop_exit_restore(cpu, ra);
                     cpu_loop_exit_restore(cpu, ra);
                 }
                 }
                 /*
                 /*
@@ -212,8 +132,7 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
                 cpu_loop_exit(cpu);
                 cpu_loop_exit(cpu);
             } else {
             } else {
                 /* Force execution of one insn next time.  */
                 /* Force execution of one insn next time.  */
-                cpu->cflags_next_tb = 1 | CF_LAST_IO | CF_NOIRQ
-                                      | curr_cflags(cpu);
+                cpu->cflags_next_tb = 1 | CF_NOIRQ | curr_cflags(cpu);
                 mmap_unlock();
                 mmap_unlock();
                 cpu_loop_exit_noexc(cpu);
                 cpu_loop_exit_noexc(cpu);
             }
             }
@@ -222,5 +141,3 @@ void cpu_check_watchpoint(CPUState *cpu, vaddr addr, vaddr len,
         }
         }
     }
     }
 }
 }
-
-#endif /* CONFIG_TCG */

+ 1 - 0
accel/xen/xen-all.c

@@ -15,6 +15,7 @@
 #include "hw/xen/xen_native.h"
 #include "hw/xen/xen_native.h"
 #include "hw/xen/xen-legacy-backend.h"
 #include "hw/xen/xen-legacy-backend.h"
 #include "hw/xen/xen_pt.h"
 #include "hw/xen/xen_pt.h"
+#include "hw/xen/xen_igd.h"
 #include "chardev/char.h"
 #include "chardev/char.h"
 #include "qemu/accel.h"
 #include "qemu/accel.h"
 #include "sysemu/cpus.h"
 #include "sysemu/cpus.h"

+ 1 - 2
audio/alsaaudio.c

@@ -904,7 +904,7 @@ static void alsa_init_per_direction(AudiodevAlsaPerDirectionOptions *apdo)
     }
     }
 }
 }
 
 
-static void *alsa_audio_init(Audiodev *dev)
+static void *alsa_audio_init(Audiodev *dev, Error **errp)
 {
 {
     AudiodevAlsaOptions *aopts;
     AudiodevAlsaOptions *aopts;
     assert(dev->driver == AUDIODEV_DRIVER_ALSA);
     assert(dev->driver == AUDIODEV_DRIVER_ALSA);
@@ -960,7 +960,6 @@ static struct audio_driver alsa_audio_driver = {
     .init           = alsa_audio_init,
     .init           = alsa_audio_init,
     .fini           = alsa_audio_fini,
     .fini           = alsa_audio_fini,
     .pcm_ops        = &alsa_pcm_ops,
     .pcm_ops        = &alsa_pcm_ops,
-    .can_be_default = 1,
     .max_voices_out = INT_MAX,
     .max_voices_out = INT_MAX,
     .max_voices_in  = INT_MAX,
     .max_voices_in  = INT_MAX,
     .voice_size_out = sizeof (ALSAVoiceOut),
     .voice_size_out = sizeof (ALSAVoiceOut),

+ 4 - 2
audio/audio-hmp-cmds.c

@@ -26,6 +26,7 @@
 #include "audio/audio.h"
 #include "audio/audio.h"
 #include "monitor/hmp.h"
 #include "monitor/hmp.h"
 #include "monitor/monitor.h"
 #include "monitor/monitor.h"
+#include "qapi/error.h"
 #include "qapi/qmp/qdict.h"
 #include "qapi/qmp/qdict.h"
 
 
 static QLIST_HEAD (capture_list_head, CaptureState) capture_head;
 static QLIST_HEAD (capture_list_head, CaptureState) capture_head;
@@ -65,10 +66,11 @@ void hmp_wavcapture(Monitor *mon, const QDict *qdict)
     int nchannels = qdict_get_try_int(qdict, "nchannels", 2);
     int nchannels = qdict_get_try_int(qdict, "nchannels", 2);
     const char *audiodev = qdict_get_str(qdict, "audiodev");
     const char *audiodev = qdict_get_str(qdict, "audiodev");
     CaptureState *s;
     CaptureState *s;
-    AudioState *as = audio_state_by_name(audiodev);
+    Error *local_err = NULL;
+    AudioState *as = audio_state_by_name(audiodev, &local_err);
 
 
     if (!as) {
     if (!as) {
-        monitor_printf(mon, "Audiodev '%s' not found\n", audiodev);
+        error_report_err(local_err);
         return;
         return;
     }
     }
 
 

+ 110 - 136
audio/audio.c

@@ -32,7 +32,9 @@
 #include "qapi/qobject-input-visitor.h"
 #include "qapi/qobject-input-visitor.h"
 #include "qapi/qapi-visit-audio.h"
 #include "qapi/qapi-visit-audio.h"
 #include "qapi/qapi-commands-audio.h"
 #include "qapi/qapi-commands-audio.h"
+#include "qapi/qmp/qdict.h"
 #include "qemu/cutils.h"
 #include "qemu/cutils.h"
+#include "qemu/error-report.h"
 #include "qemu/log.h"
 #include "qemu/log.h"
 #include "qemu/module.h"
 #include "qemu/module.h"
 #include "qemu/help_option.h"
 #include "qemu/help_option.h"
@@ -61,19 +63,22 @@ const char *audio_prio_list[] = {
     "spice",
     "spice",
     CONFIG_AUDIO_DRIVERS
     CONFIG_AUDIO_DRIVERS
     "none",
     "none",
-    "wav",
     NULL
     NULL
 };
 };
 
 
 static QLIST_HEAD(, audio_driver) audio_drivers;
 static QLIST_HEAD(, audio_driver) audio_drivers;
-static AudiodevListHead audiodevs = QSIMPLEQ_HEAD_INITIALIZER(audiodevs);
+static AudiodevListHead audiodevs =
+    QSIMPLEQ_HEAD_INITIALIZER(audiodevs);
+static AudiodevListHead default_audiodevs =
+    QSIMPLEQ_HEAD_INITIALIZER(default_audiodevs);
+
 
 
 void audio_driver_register(audio_driver *drv)
 void audio_driver_register(audio_driver *drv)
 {
 {
     QLIST_INSERT_HEAD(&audio_drivers, drv, next);
     QLIST_INSERT_HEAD(&audio_drivers, drv, next);
 }
 }
 
 
-audio_driver *audio_driver_lookup(const char *name)
+static audio_driver *audio_driver_lookup(const char *name)
 {
 {
     struct audio_driver *d;
     struct audio_driver *d;
     Error *local_err = NULL;
     Error *local_err = NULL;
@@ -99,6 +104,7 @@ audio_driver *audio_driver_lookup(const char *name)
 
 
 static QTAILQ_HEAD(AudioStateHead, AudioState) audio_states =
 static QTAILQ_HEAD(AudioStateHead, AudioState) audio_states =
     QTAILQ_HEAD_INITIALIZER(audio_states);
     QTAILQ_HEAD_INITIALIZER(audio_states);
+static AudioState *default_audio_state;
 
 
 const struct mixeng_volume nominal_volume = {
 const struct mixeng_volume nominal_volume = {
     .mute = 0,
     .mute = 0,
@@ -111,8 +117,6 @@ const struct mixeng_volume nominal_volume = {
 #endif
 #endif
 };
 };
 
 
-static bool legacy_config = true;
-
 int audio_bug (const char *funcname, int cond)
 int audio_bug (const char *funcname, int cond)
 {
 {
     if (cond) {
     if (cond) {
@@ -1571,9 +1575,11 @@ size_t audio_generic_read(HWVoiceIn *hw, void *buf, size_t size)
 }
 }
 
 
 static int audio_driver_init(AudioState *s, struct audio_driver *drv,
 static int audio_driver_init(AudioState *s, struct audio_driver *drv,
-                             bool msg, Audiodev *dev)
+                             Audiodev *dev, Error **errp)
 {
 {
-    s->drv_opaque = drv->init(dev);
+    Error *local_err = NULL;
+
+    s->drv_opaque = drv->init(dev, &local_err);
 
 
     if (s->drv_opaque) {
     if (s->drv_opaque) {
         if (!drv->pcm_ops->get_buffer_in) {
         if (!drv->pcm_ops->get_buffer_in) {
@@ -1585,13 +1591,15 @@ static int audio_driver_init(AudioState *s, struct audio_driver *drv,
             drv->pcm_ops->put_buffer_out = audio_generic_put_buffer_out;
             drv->pcm_ops->put_buffer_out = audio_generic_put_buffer_out;
         }
         }
 
 
-        audio_init_nb_voices_out(s, drv);
-        audio_init_nb_voices_in(s, drv);
+        audio_init_nb_voices_out(s, drv, 1);
+        audio_init_nb_voices_in(s, drv, 0);
         s->drv = drv;
         s->drv = drv;
         return 0;
         return 0;
     } else {
     } else {
-        if (msg) {
-            dolog("Could not init `%s' audio driver\n", drv->name);
+        if (local_err) {
+            error_propagate(errp, local_err);
+        } else {
+            error_setg(errp, "Could not init `%s' audio driver", drv->name);
         }
         }
         return -1;
         return -1;
     }
     }
@@ -1671,6 +1679,7 @@ static void free_audio_state(AudioState *s)
 
 
 void audio_cleanup(void)
 void audio_cleanup(void)
 {
 {
+    default_audio_state = NULL;
     while (!QTAILQ_EMPTY(&audio_states)) {
     while (!QTAILQ_EMPTY(&audio_states)) {
         AudioState *s = QTAILQ_FIRST(&audio_states);
         AudioState *s = QTAILQ_FIRST(&audio_states);
         QTAILQ_REMOVE(&audio_states, s, list);
         QTAILQ_REMOVE(&audio_states, s, list);
@@ -1692,24 +1701,30 @@ static const VMStateDescription vmstate_audio = {
     .version_id = 1,
     .version_id = 1,
     .minimum_version_id = 1,
     .minimum_version_id = 1,
     .needed = vmstate_audio_needed,
     .needed = vmstate_audio_needed,
-    .fields = (VMStateField[]) {
+    .fields = (const VMStateField[]) {
         VMSTATE_END_OF_LIST()
         VMSTATE_END_OF_LIST()
     }
     }
 };
 };
 
 
-static void audio_validate_opts(Audiodev *dev, Error **errp);
-
-static AudiodevListEntry *audiodev_find(
-    AudiodevListHead *head, const char *drvname)
+void audio_create_default_audiodevs(void)
 {
 {
-    AudiodevListEntry *e;
-    QSIMPLEQ_FOREACH(e, head, next) {
-        if (strcmp(AudiodevDriver_str(e->dev->driver), drvname) == 0) {
-            return e;
+    for (int i = 0; audio_prio_list[i]; i++) {
+        if (audio_driver_lookup(audio_prio_list[i])) {
+            QDict *dict = qdict_new();
+            Audiodev *dev = NULL;
+            Visitor *v;
+
+            qdict_put_str(dict, "driver", audio_prio_list[i]);
+            qdict_put_str(dict, "id", "#default");
+
+            v = qobject_input_visitor_new_keyval(QOBJECT(dict));
+            qobject_unref(dict);
+            visit_type_Audiodev(v, NULL, &dev, &error_fatal);
+            visit_free(v);
+
+            audio_define_default(dev, &error_abort);
         }
         }
     }
     }
-
-    return NULL;
 }
 }
 
 
 /*
 /*
@@ -1718,62 +1733,16 @@ static AudiodevListEntry *audiodev_find(
  * if dev == NULL => legacy implicit initialization, return the already created
  * if dev == NULL => legacy implicit initialization, return the already created
  *   state or create a new one
  *   state or create a new one
  */
  */
-static AudioState *audio_init(Audiodev *dev, const char *name)
+static AudioState *audio_init(Audiodev *dev, Error **errp)
 {
 {
     static bool atexit_registered;
     static bool atexit_registered;
-    size_t i;
     int done = 0;
     int done = 0;
-    const char *drvname = NULL;
-    VMChangeStateEntry *e;
+    const char *drvname;
+    VMChangeStateEntry *vmse;
     AudioState *s;
     AudioState *s;
     struct audio_driver *driver;
     struct audio_driver *driver;
-    /* silence gcc warning about uninitialized variable */
-    AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head);
-
-    if (using_spice) {
-        /*
-         * When using spice allow the spice audio driver being picked
-         * as default.
-         *
-         * Temporary hack.  Using audio devices without explicit
-         * audiodev= property is already deprecated.  Same goes for
-         * the -soundhw switch.  Once this support gets finally
-         * removed we can also drop the concept of a default audio
-         * backend and this can go away.
-         */
-        driver = audio_driver_lookup("spice");
-        if (driver) {
-            driver->can_be_default = 1;
-        }
-    }
-
-    if (dev) {
-        /* -audiodev option */
-        legacy_config = false;
-        drvname = AudiodevDriver_str(dev->driver);
-    } else if (!QTAILQ_EMPTY(&audio_states)) {
-        if (!legacy_config) {
-            dolog("Device %s: audiodev default parameter is deprecated, please "
-                  "specify audiodev=%s\n", name,
-                  QTAILQ_FIRST(&audio_states)->dev->id);
-        }
-        return QTAILQ_FIRST(&audio_states);
-    } else {
-        /* legacy implicit initialization */
-        head = audio_handle_legacy_opts();
-        /*
-         * In case of legacy initialization, all Audiodevs in the list will have
-         * the same configuration (except the driver), so it doesn't matter which
-         * one we chose.  We need an Audiodev to set up AudioState before we can
-         * init a driver.  Also note that dev at this point is still in the
-         * list.
-         */
-        dev = QSIMPLEQ_FIRST(&head)->dev;
-        audio_validate_opts(dev, &error_abort);
-    }
 
 
     s = g_new0(AudioState, 1);
     s = g_new0(AudioState, 1);
-    s->dev = dev;
 
 
     QLIST_INIT (&s->hw_head_out);
     QLIST_INIT (&s->hw_head_out);
     QLIST_INIT (&s->hw_head_in);
     QLIST_INIT (&s->hw_head_in);
@@ -1785,56 +1754,39 @@ static AudioState *audio_init(Audiodev *dev, const char *name)
 
 
     s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s);
     s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s);
 
 
-    s->nb_hw_voices_out = audio_get_pdo_out(dev)->voices;
-    s->nb_hw_voices_in = audio_get_pdo_in(dev)->voices;
-
-    if (s->nb_hw_voices_out < 1) {
-        dolog ("Bogus number of playback voices %d, setting to 1\n",
-               s->nb_hw_voices_out);
-        s->nb_hw_voices_out = 1;
-    }
-
-    if (s->nb_hw_voices_in < 0) {
-        dolog ("Bogus number of capture voices %d, setting to 0\n",
-               s->nb_hw_voices_in);
-        s->nb_hw_voices_in = 0;
-    }
-
-    if (drvname) {
+    if (dev) {
+        /* -audiodev option */
+        s->dev = dev;
+        drvname = AudiodevDriver_str(dev->driver);
         driver = audio_driver_lookup(drvname);
         driver = audio_driver_lookup(drvname);
         if (driver) {
         if (driver) {
-            done = !audio_driver_init(s, driver, true, dev);
+            done = !audio_driver_init(s, driver, dev, errp);
         } else {
         } else {
-            dolog ("Unknown audio driver `%s'\n", drvname);
+            error_setg(errp, "Unknown audio driver `%s'", drvname);
         }
         }
         if (!done) {
         if (!done) {
-            free_audio_state(s);
-            return NULL;
+            goto out;
         }
         }
     } else {
     } else {
-        for (i = 0; audio_prio_list[i]; i++) {
-            AudiodevListEntry *e = audiodev_find(&head, audio_prio_list[i]);
-            driver = audio_driver_lookup(audio_prio_list[i]);
-
-            if (e && driver) {
-                s->dev = dev = e->dev;
-                audio_validate_opts(dev, &error_abort);
-                done = !audio_driver_init(s, driver, false, dev);
-                if (done) {
-                    e->dev = NULL;
-                    break;
-                }
+        assert(!default_audio_state);
+        for (;;) {
+            AudiodevListEntry *e = QSIMPLEQ_FIRST(&default_audiodevs);
+            if (!e) {
+                error_setg(errp, "no default audio driver available");
+                goto out;
+            }
+            s->dev = dev = e->dev;
+            QSIMPLEQ_REMOVE_HEAD(&default_audiodevs, next);
+            g_free(e);
+            drvname = AudiodevDriver_str(dev->driver);
+            driver = audio_driver_lookup(drvname);
+            if (!audio_driver_init(s, driver, dev, NULL)) {
+                break;
             }
             }
+            qapi_free_Audiodev(dev);
+            s->dev = NULL;
         }
         }
     }
     }
-    audio_free_audiodev_list(&head);
-
-    if (!done) {
-        driver = audio_driver_lookup("none");
-        done = !audio_driver_init(s, driver, false, dev);
-        assert(done);
-        dolog("warning: Using timer based audio emulation\n");
-    }
 
 
     if (dev->timer_period <= 0) {
     if (dev->timer_period <= 0) {
         s->period_ticks = 1;
         s->period_ticks = 1;
@@ -1842,37 +1794,51 @@ static AudioState *audio_init(Audiodev *dev, const char *name)
         s->period_ticks = dev->timer_period * (int64_t)SCALE_US;
         s->period_ticks = dev->timer_period * (int64_t)SCALE_US;
     }
     }
 
 
-    e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
-    if (!e) {
+    vmse = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s);
+    if (!vmse) {
         dolog ("warning: Could not register change state handler\n"
         dolog ("warning: Could not register change state handler\n"
                "(Audio can continue looping even after stopping the VM)\n");
                "(Audio can continue looping even after stopping the VM)\n");
     }
     }
 
 
     QTAILQ_INSERT_TAIL(&audio_states, s, list);
     QTAILQ_INSERT_TAIL(&audio_states, s, list);
     QLIST_INIT (&s->card_head);
     QLIST_INIT (&s->card_head);
-    vmstate_register (NULL, 0, &vmstate_audio, s);
+    vmstate_register_any(NULL, &vmstate_audio, s);
     return s;
     return s;
+
+out:
+    free_audio_state(s);
+    return NULL;
 }
 }
 
 
-void audio_free_audiodev_list(AudiodevListHead *head)
+AudioState *audio_get_default_audio_state(Error **errp)
 {
 {
-    AudiodevListEntry *e;
-    while ((e = QSIMPLEQ_FIRST(head))) {
-        QSIMPLEQ_REMOVE_HEAD(head, next);
-        qapi_free_Audiodev(e->dev);
-        g_free(e);
+    if (!default_audio_state) {
+        default_audio_state = audio_init(NULL, errp);
+        if (!default_audio_state) {
+            if (!QSIMPLEQ_EMPTY(&audiodevs)) {
+                error_append_hint(errp, "Perhaps you wanted to use -audio or set audiodev=%s?\n",
+                                  QSIMPLEQ_FIRST(&audiodevs)->dev->id);
+            }
+        }
     }
     }
+
+    return default_audio_state;
 }
 }
 
 
-void AUD_register_card (const char *name, QEMUSoundCard *card)
+bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp)
 {
 {
     if (!card->state) {
     if (!card->state) {
-        card->state = audio_init(NULL, name);
+        card->state = audio_get_default_audio_state(errp);
+        if (!card->state) {
+            return false;
+        }
     }
     }
 
 
     card->name = g_strdup (name);
     card->name = g_strdup (name);
     memset (&card->entries, 0, sizeof (card->entries));
     memset (&card->entries, 0, sizeof (card->entries));
     QLIST_INSERT_HEAD(&card->state->card_head, card, entries);
     QLIST_INSERT_HEAD(&card->state->card_head, card, entries);
+
+    return true;
 }
 }
 
 
 void AUD_remove_card (QEMUSoundCard *card)
 void AUD_remove_card (QEMUSoundCard *card)
@@ -1894,10 +1860,8 @@ CaptureVoiceOut *AUD_add_capture(
     struct capture_callback *cb;
     struct capture_callback *cb;
 
 
     if (!s) {
     if (!s) {
-        if (!legacy_config) {
-            dolog("Capturing without setting an audiodev is deprecated\n");
-        }
-        s = audio_init(NULL, NULL);
+        error_report("Capturing without setting an audiodev is not supported");
+        abort();
     }
     }
 
 
     if (!audio_get_pdo_out(s->dev)->mixing_engine) {
     if (!audio_get_pdo_out(s->dev)->mixing_engine) {
@@ -1918,10 +1882,8 @@ CaptureVoiceOut *AUD_add_capture(
     cap = audio_pcm_capture_find_specific(s, as);
     cap = audio_pcm_capture_find_specific(s, as);
     if (cap) {
     if (cap) {
         QLIST_INSERT_HEAD (&cap->cb_head, cb, entries);
         QLIST_INSERT_HEAD (&cap->cb_head, cb, entries);
-        return cap;
     } else {
     } else {
         HWVoiceOut *hw;
         HWVoiceOut *hw;
-        CaptureVoiceOut *cap;
 
 
         cap = g_malloc0(sizeof(*cap));
         cap = g_malloc0(sizeof(*cap));
 
 
@@ -1955,8 +1917,9 @@ CaptureVoiceOut *AUD_add_capture(
         QLIST_FOREACH(hw, &s->hw_head_out, entries) {
         QLIST_FOREACH(hw, &s->hw_head_out, entries) {
             audio_attach_capture (hw);
             audio_attach_capture (hw);
         }
         }
-        return cap;
     }
     }
+
+    return cap;
 }
 }
 
 
 void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque)
 void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque)
@@ -2079,6 +2042,9 @@ void audio_create_pdos(Audiodev *dev)
 #ifdef CONFIG_AUDIO_PA
 #ifdef CONFIG_AUDIO_PA
         CASE(PA, pa, Pa);
         CASE(PA, pa, Pa);
 #endif
 #endif
+#ifdef CONFIG_AUDIO_PIPEWIRE
+        CASE(PIPEWIRE, pipewire, Pipewire);
+#endif
 #ifdef CONFIG_AUDIO_SDL
 #ifdef CONFIG_AUDIO_SDL
         CASE(SDL, sdl, Sdl);
         CASE(SDL, sdl, Sdl);
 #endif
 #endif
@@ -2199,17 +2165,24 @@ void audio_define(Audiodev *dev)
     QSIMPLEQ_INSERT_TAIL(&audiodevs, e, next);
     QSIMPLEQ_INSERT_TAIL(&audiodevs, e, next);
 }
 }
 
 
-bool audio_init_audiodevs(void)
+void audio_define_default(Audiodev *dev, Error **errp)
+{
+    AudiodevListEntry *e;
+
+    audio_validate_opts(dev, errp);
+
+    e = g_new0(AudiodevListEntry, 1);
+    e->dev = dev;
+    QSIMPLEQ_INSERT_TAIL(&default_audiodevs, e, next);
+}
+
+void audio_init_audiodevs(void)
 {
 {
     AudiodevListEntry *e;
     AudiodevListEntry *e;
 
 
     QSIMPLEQ_FOREACH(e, &audiodevs, next) {
     QSIMPLEQ_FOREACH(e, &audiodevs, next) {
-        if (!audio_init(e->dev, NULL)) {
-            return false;
-        }
+        audio_init(e->dev, &error_fatal);
     }
     }
-
-    return true;
 }
 }
 
 
 audsettings audiodev_to_audsettings(AudiodevPerDirectionOptions *pdo)
 audsettings audiodev_to_audsettings(AudiodevPerDirectionOptions *pdo)
@@ -2271,7 +2244,7 @@ int audio_buffer_bytes(AudiodevPerDirectionOptions *pdo,
         audioformat_bytes_per_sample(as->fmt);
         audioformat_bytes_per_sample(as->fmt);
 }
 }
 
 
-AudioState *audio_state_by_name(const char *name)
+AudioState *audio_state_by_name(const char *name, Error **errp)
 {
 {
     AudioState *s;
     AudioState *s;
     QTAILQ_FOREACH(s, &audio_states, list) {
     QTAILQ_FOREACH(s, &audio_states, list) {
@@ -2280,6 +2253,7 @@ AudioState *audio_state_by_name(const char *name)
             return s;
             return s;
         }
         }
     }
     }
+    error_setg(errp, "audiodev '%s' not found", name);
     return NULL;
     return NULL;
 }
 }
 
 

+ 6 - 4
audio/audio.h

@@ -94,7 +94,7 @@ typedef struct QEMUAudioTimeStamp {
 void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0);
 void AUD_vlog (const char *cap, const char *fmt, va_list ap) G_GNUC_PRINTF(2, 0);
 void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
 void AUD_log (const char *cap, const char *fmt, ...) G_GNUC_PRINTF(2, 3);
 
 
-void AUD_register_card (const char *name, QEMUSoundCard *card);
+bool AUD_register_card (const char *name, QEMUSoundCard *card, Error **errp);
 void AUD_remove_card (QEMUSoundCard *card);
 void AUD_remove_card (QEMUSoundCard *card);
 CaptureVoiceOut *AUD_add_capture(
 CaptureVoiceOut *AUD_add_capture(
     AudioState *s,
     AudioState *s,
@@ -169,12 +169,14 @@ void audio_sample_from_uint64(void *samples, int pos,
                             uint64_t left, uint64_t right);
                             uint64_t left, uint64_t right);
 
 
 void audio_define(Audiodev *audio);
 void audio_define(Audiodev *audio);
+void audio_define_default(Audiodev *dev, Error **errp);
 void audio_parse_option(const char *opt);
 void audio_parse_option(const char *opt);
-bool audio_init_audiodevs(void);
+void audio_create_default_audiodevs(void);
+void audio_init_audiodevs(void);
 void audio_help(void);
 void audio_help(void);
-void audio_legacy_help(void);
 
 
-AudioState *audio_state_by_name(const char *name);
+AudioState *audio_state_by_name(const char *name, Error **errp);
+AudioState *audio_get_default_audio_state(Error **errp);
 const char *audio_get_id(QEMUSoundCard *card);
 const char *audio_get_id(QEMUSoundCard *card);
 
 
 #define DEFINE_AUDIO_PROPERTIES(_s, _f)         \
 #define DEFINE_AUDIO_PROPERTIES(_s, _f)         \

+ 1 - 6
audio/audio_int.h

@@ -140,13 +140,12 @@ typedef struct audio_driver audio_driver;
 struct audio_driver {
 struct audio_driver {
     const char *name;
     const char *name;
     const char *descr;
     const char *descr;
-    void *(*init) (Audiodev *);
+    void *(*init) (Audiodev *, Error **);
     void (*fini) (void *);
     void (*fini) (void *);
 #ifdef CONFIG_GIO
 #ifdef CONFIG_GIO
     void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p);
     void (*set_dbus_server) (AudioState *s, GDBusObjectManagerServer *manager, bool p2p);
 #endif
 #endif
     struct audio_pcm_ops *pcm_ops;
     struct audio_pcm_ops *pcm_ops;
-    int can_be_default;
     int max_voices_out;
     int max_voices_out;
     int max_voices_in;
     int max_voices_in;
     size_t voice_size_out;
     size_t voice_size_out;
@@ -245,7 +244,6 @@ extern const struct mixeng_volume nominal_volume;
 extern const char *audio_prio_list[];
 extern const char *audio_prio_list[];
 
 
 void audio_driver_register(audio_driver *drv);
 void audio_driver_register(audio_driver *drv);
-audio_driver *audio_driver_lookup(const char *name);
 
 
 void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as);
 void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as);
 void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
 void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len);
@@ -299,9 +297,6 @@ typedef struct AudiodevListEntry {
 } AudiodevListEntry;
 } AudiodevListEntry;
 
 
 typedef QSIMPLEQ_HEAD(, AudiodevListEntry) AudiodevListHead;
 typedef QSIMPLEQ_HEAD(, AudiodevListEntry) AudiodevListHead;
-AudiodevListHead audio_handle_legacy_opts(void);
-
-void audio_free_audiodev_list(AudiodevListHead *head);
 
 
 void audio_create_pdos(Audiodev *dev);
 void audio_create_pdos(Audiodev *dev);
 AudiodevPerDirectionOptions *audio_get_pdo_in(Audiodev *dev);
 AudiodevPerDirectionOptions *audio_get_pdo_in(Audiodev *dev);

+ 0 - 591
audio/audio_legacy.c

@@ -1,591 +0,0 @@
-/*
- * QEMU Audio subsystem: legacy configuration handling
- *
- * Copyright (c) 2015-2019 Zoltán Kővágó <DirtY.iCE.hu@gmail.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a copy
- * of this software and associated documentation files (the "Software"), to deal
- * in the Software without restriction, including without limitation the rights
- * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
- * copies of the Software, and to permit persons to whom the Software is
- * furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice shall be included in
- * all copies or substantial portions of the Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
- * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
- * THE SOFTWARE.
- */
-#include "qemu/osdep.h"
-#include "audio.h"
-#include "audio_int.h"
-#include "qemu/cutils.h"
-#include "qemu/timer.h"
-#include "qapi/error.h"
-#include "qapi/qapi-visit-audio.h"
-#include "qapi/visitor-impl.h"
-
-#define AUDIO_CAP "audio-legacy"
-#include "audio_int.h"
-
-static uint32_t toui32(const char *str)
-{
-    unsigned long long ret;
-    if (parse_uint_full(str, &ret, 10) || ret > UINT32_MAX) {
-        dolog("Invalid integer value `%s'\n", str);
-        exit(1);
-    }
-    return ret;
-}
-
-/* helper functions to convert env variables */
-static void get_bool(const char *env, bool *dst, bool *has_dst)
-{
-    const char *val = getenv(env);
-    if (val) {
-        *dst = toui32(val) != 0;
-        *has_dst = true;
-    }
-}
-
-static void get_int(const char *env, uint32_t *dst, bool *has_dst)
-{
-    const char *val = getenv(env);
-    if (val) {
-        *dst = toui32(val);
-        *has_dst = true;
-    }
-}
-
-static void get_str(const char *env, char **dst)
-{
-    const char *val = getenv(env);
-    if (val) {
-        g_free(*dst);
-        *dst = g_strdup(val);
-    }
-}
-
-static void get_fmt(const char *env, AudioFormat *dst, bool *has_dst)
-{
-    const char *val = getenv(env);
-    if (val) {
-        size_t i;
-        for (i = 0; AudioFormat_lookup.size; ++i) {
-            if (strcasecmp(val, AudioFormat_lookup.array[i]) == 0) {
-                *dst = i;
-                *has_dst = true;
-                return;
-            }
-        }
-
-        dolog("Invalid audio format `%s'\n", val);
-        exit(1);
-    }
-}
-
-
-#if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_DSOUND)
-static void get_millis_to_usecs(const char *env, uint32_t *dst, bool *has_dst)
-{
-    const char *val = getenv(env);
-    if (val) {
-        *dst = toui32(val) * 1000;
-        *has_dst = true;
-    }
-}
-#endif
-
-#if defined(CONFIG_AUDIO_ALSA) || defined(CONFIG_AUDIO_COREAUDIO) || \
-    defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \
-    defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
-static uint32_t frames_to_usecs(uint32_t frames,
-                                AudiodevPerDirectionOptions *pdo)
-{
-    uint32_t freq = pdo->has_frequency ? pdo->frequency : 44100;
-    return (frames * 1000000 + freq / 2) / freq;
-}
-#endif
-
-#ifdef CONFIG_AUDIO_COREAUDIO
-static void get_frames_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
-                                AudiodevPerDirectionOptions *pdo)
-{
-    const char *val = getenv(env);
-    if (val) {
-        *dst = frames_to_usecs(toui32(val), pdo);
-        *has_dst = true;
-    }
-}
-#endif
-
-#if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL) || \
-    defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
-static uint32_t samples_to_usecs(uint32_t samples,
-                                 AudiodevPerDirectionOptions *pdo)
-{
-    uint32_t channels = pdo->has_channels ? pdo->channels : 2;
-    return frames_to_usecs(samples / channels, pdo);
-}
-#endif
-
-#if defined(CONFIG_AUDIO_PA) || defined(CONFIG_AUDIO_SDL)
-static void get_samples_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
-                                 AudiodevPerDirectionOptions *pdo)
-{
-    const char *val = getenv(env);
-    if (val) {
-        *dst = samples_to_usecs(toui32(val), pdo);
-        *has_dst = true;
-    }
-}
-#endif
-
-#if defined(CONFIG_AUDIO_DSOUND) || defined(CONFIG_AUDIO_OSS)
-static uint32_t bytes_to_usecs(uint32_t bytes, AudiodevPerDirectionOptions *pdo)
-{
-    AudioFormat fmt = pdo->has_format ? pdo->format : AUDIO_FORMAT_S16;
-    uint32_t bytes_per_sample = audioformat_bytes_per_sample(fmt);
-    return samples_to_usecs(bytes / bytes_per_sample, pdo);
-}
-
-static void get_bytes_to_usecs(const char *env, uint32_t *dst, bool *has_dst,
-                               AudiodevPerDirectionOptions *pdo)
-{
-    const char *val = getenv(env);
-    if (val) {
-        *dst = bytes_to_usecs(toui32(val), pdo);
-        *has_dst = true;
-    }
-}
-#endif
-
-/* backend specific functions */
-
-#ifdef CONFIG_AUDIO_ALSA
-/* ALSA */
-static void handle_alsa_per_direction(
-    AudiodevAlsaPerDirectionOptions *apdo, const char *prefix)
-{
-    char buf[64];
-    size_t len = strlen(prefix);
-    bool size_in_usecs = false;
-    bool dummy;
-
-    memcpy(buf, prefix, len);
-    strcpy(buf + len, "TRY_POLL");
-    get_bool(buf, &apdo->try_poll, &apdo->has_try_poll);
-
-    strcpy(buf + len, "DEV");
-    get_str(buf, &apdo->dev);
-
-    strcpy(buf + len, "SIZE_IN_USEC");
-    get_bool(buf, &size_in_usecs, &dummy);
-
-    strcpy(buf + len, "PERIOD_SIZE");
-    get_int(buf, &apdo->period_length, &apdo->has_period_length);
-    if (apdo->has_period_length && !size_in_usecs) {
-        apdo->period_length = frames_to_usecs(
-            apdo->period_length,
-            qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
-    }
-
-    strcpy(buf + len, "BUFFER_SIZE");
-    get_int(buf, &apdo->buffer_length, &apdo->has_buffer_length);
-    if (apdo->has_buffer_length && !size_in_usecs) {
-        apdo->buffer_length = frames_to_usecs(
-            apdo->buffer_length,
-            qapi_AudiodevAlsaPerDirectionOptions_base(apdo));
-    }
-}
-
-static void handle_alsa(Audiodev *dev)
-{
-    AudiodevAlsaOptions *aopt = &dev->u.alsa;
-    handle_alsa_per_direction(aopt->in, "QEMU_ALSA_ADC_");
-    handle_alsa_per_direction(aopt->out, "QEMU_ALSA_DAC_");
-
-    get_millis_to_usecs("QEMU_ALSA_THRESHOLD",
-                        &aopt->threshold, &aopt->has_threshold);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_COREAUDIO
-/* coreaudio */
-static void handle_coreaudio(Audiodev *dev)
-{
-    get_frames_to_usecs(
-        "QEMU_COREAUDIO_BUFFER_SIZE",
-        &dev->u.coreaudio.out->buffer_length,
-        &dev->u.coreaudio.out->has_buffer_length,
-        qapi_AudiodevCoreaudioPerDirectionOptions_base(dev->u.coreaudio.out));
-    get_int("QEMU_COREAUDIO_BUFFER_COUNT",
-            &dev->u.coreaudio.out->buffer_count,
-            &dev->u.coreaudio.out->has_buffer_count);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_DSOUND
-/* dsound */
-static void handle_dsound(Audiodev *dev)
-{
-    get_millis_to_usecs("QEMU_DSOUND_LATENCY_MILLIS",
-                        &dev->u.dsound.latency, &dev->u.dsound.has_latency);
-    get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_OUT",
-                       &dev->u.dsound.out->buffer_length,
-                       &dev->u.dsound.out->has_buffer_length,
-                       dev->u.dsound.out);
-    get_bytes_to_usecs("QEMU_DSOUND_BUFSIZE_IN",
-                       &dev->u.dsound.in->buffer_length,
-                       &dev->u.dsound.in->has_buffer_length,
-                       dev->u.dsound.in);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_OSS
-/* OSS */
-static void handle_oss_per_direction(
-    AudiodevOssPerDirectionOptions *opdo, const char *try_poll_env,
-    const char *dev_env)
-{
-    get_bool(try_poll_env, &opdo->try_poll, &opdo->has_try_poll);
-    get_str(dev_env, &opdo->dev);
-
-    get_bytes_to_usecs("QEMU_OSS_FRAGSIZE",
-                       &opdo->buffer_length, &opdo->has_buffer_length,
-                       qapi_AudiodevOssPerDirectionOptions_base(opdo));
-    get_int("QEMU_OSS_NFRAGS", &opdo->buffer_count,
-            &opdo->has_buffer_count);
-}
-
-static void handle_oss(Audiodev *dev)
-{
-    AudiodevOssOptions *oopt = &dev->u.oss;
-    handle_oss_per_direction(oopt->in, "QEMU_AUDIO_ADC_TRY_POLL",
-                             "QEMU_OSS_ADC_DEV");
-    handle_oss_per_direction(oopt->out, "QEMU_AUDIO_DAC_TRY_POLL",
-                             "QEMU_OSS_DAC_DEV");
-
-    get_bool("QEMU_OSS_MMAP", &oopt->try_mmap, &oopt->has_try_mmap);
-    get_bool("QEMU_OSS_EXCLUSIVE", &oopt->exclusive, &oopt->has_exclusive);
-    get_int("QEMU_OSS_POLICY", &oopt->dsp_policy, &oopt->has_dsp_policy);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_PA
-/* pulseaudio */
-static void handle_pa_per_direction(
-    AudiodevPaPerDirectionOptions *ppdo, const char *env)
-{
-    get_str(env, &ppdo->name);
-}
-
-static void handle_pa(Audiodev *dev)
-{
-    handle_pa_per_direction(dev->u.pa.in, "QEMU_PA_SOURCE");
-    handle_pa_per_direction(dev->u.pa.out, "QEMU_PA_SINK");
-
-    get_samples_to_usecs(
-        "QEMU_PA_SAMPLES", &dev->u.pa.in->buffer_length,
-        &dev->u.pa.in->has_buffer_length,
-        qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.in));
-    get_samples_to_usecs(
-        "QEMU_PA_SAMPLES", &dev->u.pa.out->buffer_length,
-        &dev->u.pa.out->has_buffer_length,
-        qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.out));
-
-    get_str("QEMU_PA_SERVER", &dev->u.pa.server);
-}
-#endif
-
-#ifdef CONFIG_AUDIO_SDL
-/* SDL */
-static void handle_sdl(Audiodev *dev)
-{
-    /* SDL is output only */
-    get_samples_to_usecs("QEMU_SDL_SAMPLES", &dev->u.sdl.out->buffer_length,
-        &dev->u.sdl.out->has_buffer_length,
-        qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.out));
-}
-#endif
-
-/* wav */
-static void handle_wav(Audiodev *dev)
-{
-    get_int("QEMU_WAV_FREQUENCY",
-            &dev->u.wav.out->frequency, &dev->u.wav.out->has_frequency);
-    get_fmt("QEMU_WAV_FORMAT", &dev->u.wav.out->format,
-            &dev->u.wav.out->has_format);
-    get_int("QEMU_WAV_DAC_FIXED_CHANNELS",
-            &dev->u.wav.out->channels, &dev->u.wav.out->has_channels);
-    get_str("QEMU_WAV_PATH", &dev->u.wav.path);
-}
-
-/* general */
-static void handle_per_direction(
-    AudiodevPerDirectionOptions *pdo, const char *prefix)
-{
-    char buf[64];
-    size_t len = strlen(prefix);
-
-    memcpy(buf, prefix, len);
-    strcpy(buf + len, "FIXED_SETTINGS");
-    get_bool(buf, &pdo->fixed_settings, &pdo->has_fixed_settings);
-
-    strcpy(buf + len, "FIXED_FREQ");
-    get_int(buf, &pdo->frequency, &pdo->has_frequency);
-
-    strcpy(buf + len, "FIXED_FMT");
-    get_fmt(buf, &pdo->format, &pdo->has_format);
-
-    strcpy(buf + len, "FIXED_CHANNELS");
-    get_int(buf, &pdo->channels, &pdo->has_channels);
-
-    strcpy(buf + len, "VOICES");
-    get_int(buf, &pdo->voices, &pdo->has_voices);
-}
-
-static AudiodevListEntry *legacy_opt(const char *drvname)
-{
-    AudiodevListEntry *e = g_new0(AudiodevListEntry, 1);
-    e->dev = g_new0(Audiodev, 1);
-    e->dev->id = g_strdup(drvname);
-    e->dev->driver = qapi_enum_parse(
-        &AudiodevDriver_lookup, drvname, -1, &error_abort);
-
-    audio_create_pdos(e->dev);
-
-    handle_per_direction(audio_get_pdo_in(e->dev), "QEMU_AUDIO_ADC_");
-    handle_per_direction(audio_get_pdo_out(e->dev), "QEMU_AUDIO_DAC_");
-
-    /* Original description: Timer period in HZ (0 - use lowest possible) */
-    get_int("QEMU_AUDIO_TIMER_PERIOD",
-            &e->dev->timer_period, &e->dev->has_timer_period);
-    if (e->dev->has_timer_period && e->dev->timer_period) {
-        e->dev->timer_period = NANOSECONDS_PER_SECOND / 1000 /
-                               e->dev->timer_period;
-    }
-
-    switch (e->dev->driver) {
-#ifdef CONFIG_AUDIO_ALSA
-    case AUDIODEV_DRIVER_ALSA:
-        handle_alsa(e->dev);
-        break;
-#endif
-
-#ifdef CONFIG_AUDIO_COREAUDIO
-    case AUDIODEV_DRIVER_COREAUDIO:
-        handle_coreaudio(e->dev);
-        break;
-#endif
-
-#ifdef CONFIG_AUDIO_DSOUND
-    case AUDIODEV_DRIVER_DSOUND:
-        handle_dsound(e->dev);
-        break;
-#endif
-
-#ifdef CONFIG_AUDIO_OSS
-    case AUDIODEV_DRIVER_OSS:
-        handle_oss(e->dev);
-        break;
-#endif
-
-#ifdef CONFIG_AUDIO_PA
-    case AUDIODEV_DRIVER_PA:
-        handle_pa(e->dev);
-        break;
-#endif
-
-#ifdef CONFIG_AUDIO_SDL
-    case AUDIODEV_DRIVER_SDL:
-        handle_sdl(e->dev);
-        break;
-#endif
-
-    case AUDIODEV_DRIVER_WAV:
-        handle_wav(e->dev);
-        break;
-
-    default:
-        break;
-    }
-
-    return e;
-}
-
-AudiodevListHead audio_handle_legacy_opts(void)
-{
-    const char *drvname = getenv("QEMU_AUDIO_DRV");
-    AudiodevListHead head = QSIMPLEQ_HEAD_INITIALIZER(head);
-
-    if (drvname) {
-        AudiodevListEntry *e;
-        audio_driver *driver = audio_driver_lookup(drvname);
-        if (!driver) {
-            dolog("Unknown audio driver `%s'\n", drvname);
-            exit(1);
-        }
-        e = legacy_opt(drvname);
-        QSIMPLEQ_INSERT_TAIL(&head, e, next);
-    } else {
-        for (int i = 0; audio_prio_list[i]; i++) {
-            audio_driver *driver = audio_driver_lookup(audio_prio_list[i]);
-            if (driver && driver->can_be_default) {
-                AudiodevListEntry *e = legacy_opt(driver->name);
-                QSIMPLEQ_INSERT_TAIL(&head, e, next);
-            }
-        }
-        if (QSIMPLEQ_EMPTY(&head)) {
-            dolog("Internal error: no default audio driver available\n");
-            exit(1);
-        }
-    }
-
-    return head;
-}
-
-/* visitor to print -audiodev option */
-typedef struct {
-    Visitor visitor;
-
-    bool comma;
-    GList *path;
-} LegacyPrintVisitor;
-
-static bool lv_start_struct(Visitor *v, const char *name, void **obj,
-                            size_t size, Error **errp)
-{
-    LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
-    lv->path = g_list_append(lv->path, g_strdup(name));
-    return true;
-}
-
-static void lv_end_struct(Visitor *v, void **obj)
-{
-    LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
-    lv->path = g_list_delete_link(lv->path, g_list_last(lv->path));
-}
-
-static void lv_print_key(Visitor *v, const char *name)
-{
-    GList *e;
-    LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
-    if (lv->comma) {
-        putchar(',');
-    } else {
-        lv->comma = true;
-    }
-
-    for (e = lv->path; e; e = e->next) {
-        if (e->data) {
-            printf("%s.", (const char *) e->data);
-        }
-    }
-
-    printf("%s=", name);
-}
-
-static bool lv_type_int64(Visitor *v, const char *name, int64_t *obj,
-                          Error **errp)
-{
-    lv_print_key(v, name);
-    printf("%" PRIi64, *obj);
-    return true;
-}
-
-static bool lv_type_uint64(Visitor *v, const char *name, uint64_t *obj,
-                           Error **errp)
-{
-    lv_print_key(v, name);
-    printf("%" PRIu64, *obj);
-    return true;
-}
-
-static bool lv_type_bool(Visitor *v, const char *name, bool *obj, Error **errp)
-{
-    lv_print_key(v, name);
-    printf("%s", *obj ? "on" : "off");
-    return true;
-}
-
-static bool lv_type_str(Visitor *v, const char *name, char **obj, Error **errp)
-{
-    const char *str = *obj;
-    lv_print_key(v, name);
-
-    while (*str) {
-        if (*str == ',') {
-            putchar(',');
-        }
-        putchar(*str++);
-    }
-    return true;
-}
-
-static void lv_complete(Visitor *v, void *opaque)
-{
-    LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
-    assert(lv->path == NULL);
-}
-
-static void lv_free(Visitor *v)
-{
-    LegacyPrintVisitor *lv = (LegacyPrintVisitor *) v;
-
-    g_list_free_full(lv->path, g_free);
-    g_free(lv);
-}
-
-static Visitor *legacy_visitor_new(void)
-{
-    LegacyPrintVisitor *lv = g_new0(LegacyPrintVisitor, 1);
-
-    lv->visitor.start_struct = lv_start_struct;
-    lv->visitor.end_struct = lv_end_struct;
-    /* lists not supported */
-    lv->visitor.type_int64 = lv_type_int64;
-    lv->visitor.type_uint64 = lv_type_uint64;
-    lv->visitor.type_bool = lv_type_bool;
-    lv->visitor.type_str = lv_type_str;
-
-    lv->visitor.type = VISITOR_OUTPUT;
-    lv->visitor.complete = lv_complete;
-    lv->visitor.free = lv_free;
-
-    return &lv->visitor;
-}
-
-void audio_legacy_help(void)
-{
-    AudiodevListHead head;
-    AudiodevListEntry *e;
-
-    printf("Environment variable based configuration deprecated.\n");
-    printf("Please use the new -audiodev option.\n");
-
-    head = audio_handle_legacy_opts();
-    printf("\nEquivalent -audiodev to your current environment variables:\n");
-    if (!getenv("QEMU_AUDIO_DRV")) {
-        printf("(Since you didn't specify QEMU_AUDIO_DRV, I'll list all "
-               "possibilities)\n");
-    }
-
-    QSIMPLEQ_FOREACH(e, &head, next) {
-        Visitor *v;
-        Audiodev *dev = e->dev;
-        printf("-audiodev ");
-
-        v = legacy_visitor_new();
-        visit_type_Audiodev(v, NULL, &dev, &error_abort);
-        visit_free(v);
-
-        printf("\n");
-    }
-    audio_free_audiodev_list(&head);
-}

+ 12 - 1
audio/audio_template.h

@@ -37,11 +37,12 @@
 #endif
 #endif
 
 
 static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
 static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
-                                              struct audio_driver *drv)
+                                              struct audio_driver *drv, int min_voices)
 {
 {
     int max_voices = glue (drv->max_voices_, TYPE);
     int max_voices = glue (drv->max_voices_, TYPE);
     size_t voice_size = glue(drv->voice_size_, TYPE);
     size_t voice_size = glue(drv->voice_size_, TYPE);
 
 
+    glue (s->nb_hw_voices_, TYPE) = glue(audio_get_pdo_, TYPE)(s->dev)->voices;
     if (glue (s->nb_hw_voices_, TYPE) > max_voices) {
     if (glue (s->nb_hw_voices_, TYPE) > max_voices) {
         if (!max_voices) {
         if (!max_voices) {
 #ifdef DAC
 #ifdef DAC
@@ -56,6 +57,12 @@ static void glue(audio_init_nb_voices_, TYPE)(AudioState *s,
         glue (s->nb_hw_voices_, TYPE) = max_voices;
         glue (s->nb_hw_voices_, TYPE) = max_voices;
     }
     }
 
 
+    if (glue (s->nb_hw_voices_, TYPE) < min_voices) {
+        dolog ("Bogus number of " NAME " voices %d, setting to %d\n",
+               glue (s->nb_hw_voices_, TYPE),
+               min_voices);
+    }
+
     if (audio_bug(__func__, !voice_size && max_voices)) {
     if (audio_bug(__func__, !voice_size && max_voices)) {
         dolog ("drv=`%s' voice_size=0 max_voices=%d\n",
         dolog ("drv=`%s' voice_size=0 max_voices=%d\n",
                drv->name, max_voices);
                drv->name, max_voices);
@@ -362,6 +369,10 @@ AudiodevPerDirectionOptions *glue(audio_get_pdo_, TYPE)(Audiodev *dev)
     case AUDIODEV_DRIVER_PA:
     case AUDIODEV_DRIVER_PA:
         return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE);
         return qapi_AudiodevPaPerDirectionOptions_base(dev->u.pa.TYPE);
 #endif
 #endif
+#ifdef CONFIG_AUDIO_PIPEWIRE
+    case AUDIODEV_DRIVER_PIPEWIRE:
+        return qapi_AudiodevPipewirePerDirectionOptions_base(dev->u.pipewire.TYPE);
+#endif
 #ifdef CONFIG_AUDIO_SDL
 #ifdef CONFIG_AUDIO_SDL
     case AUDIODEV_DRIVER_SDL:
     case AUDIODEV_DRIVER_SDL:
         return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE);
         return qapi_AudiodevSdlPerDirectionOptions_base(dev->u.sdl.TYPE);

+ 5 - 11
audio/coreaudio.m

@@ -44,11 +44,6 @@
     bool enabled;
     bool enabled;
 } coreaudioVoiceOut;
 } coreaudioVoiceOut;
 
 
-#if !defined(MAC_OS_VERSION_12_0) \
-    || (MAC_OS_X_VERSION_MIN_REQUIRED < MAC_OS_VERSION_12_0)
-#define kAudioObjectPropertyElementMain kAudioObjectPropertyElementMaster
-#endif
-
 static const AudioObjectPropertyAddress voice_addr = {
 static const AudioObjectPropertyAddress voice_addr = {
     kAudioHardwarePropertyDefaultOutputDevice,
     kAudioHardwarePropertyDefaultOutputDevice,
     kAudioObjectPropertyScopeGlobal,
     kAudioObjectPropertyScopeGlobal,
@@ -299,7 +294,7 @@ static ret_type glue(coreaudio_, name)args_decl             \
 #undef COREAUDIO_WRAPPER_FUNC
 #undef COREAUDIO_WRAPPER_FUNC
 
 
 /*
 /*
- * callback to feed audiooutput buffer. called without iothread lock.
+ * callback to feed audiooutput buffer. called without BQL.
  * allowed to lock "buf_mutex", but disallowed to have any other locks.
  * allowed to lock "buf_mutex", but disallowed to have any other locks.
  */
  */
 static OSStatus audioDeviceIOProc(
 static OSStatus audioDeviceIOProc(
@@ -542,7 +537,7 @@ static void update_device_playback_state(coreaudioVoiceOut *core)
     }
     }
 }
 }
 
 
-/* called without iothread lock. */
+/* called without BQL. */
 static OSStatus handle_voice_change(
 static OSStatus handle_voice_change(
     AudioObjectID in_object_id,
     AudioObjectID in_object_id,
     UInt32 in_number_addresses,
     UInt32 in_number_addresses,
@@ -551,7 +546,7 @@ static OSStatus handle_voice_change(
 {
 {
     coreaudioVoiceOut *core = in_client_data;
     coreaudioVoiceOut *core = in_client_data;
 
 
-    qemu_mutex_lock_iothread();
+    bql_lock();
 
 
     if (core->outputDeviceID) {
     if (core->outputDeviceID) {
         fini_out_device(core);
         fini_out_device(core);
@@ -563,7 +558,7 @@ static OSStatus handle_voice_change(
         update_device_playback_state(core);
         update_device_playback_state(core);
     }
     }
 
 
-    qemu_mutex_unlock_iothread();
+    bql_unlock();
     return 0;
     return 0;
 }
 }
 
 
@@ -650,7 +645,7 @@ static void coreaudio_enable_out(HWVoiceOut *hw, bool enable)
     update_device_playback_state(core);
     update_device_playback_state(core);
 }
 }
 
 
-static void *coreaudio_audio_init(Audiodev *dev)
+static void *coreaudio_audio_init(Audiodev *dev, Error **errp)
 {
 {
     return dev;
     return dev;
 }
 }
@@ -679,7 +674,6 @@ static void coreaudio_audio_fini (void *opaque)
     .init           = coreaudio_audio_init,
     .init           = coreaudio_audio_init,
     .fini           = coreaudio_audio_fini,
     .fini           = coreaudio_audio_fini,
     .pcm_ops        = &coreaudio_pcm_ops,
     .pcm_ops        = &coreaudio_pcm_ops,
-    .can_be_default = 1,
     .max_voices_out = 1,
     .max_voices_out = 1,
     .max_voices_in  = 0,
     .max_voices_in  = 0,
     .voice_size_out = sizeof (coreaudioVoiceOut),
     .voice_size_out = sizeof (coreaudioVoiceOut),

+ 41 - 7
audio/dbusaudio.c

@@ -29,7 +29,11 @@
 #include "qemu/timer.h"
 #include "qemu/timer.h"
 #include "qemu/dbus.h"
 #include "qemu/dbus.h"
 
 
+#ifdef G_OS_UNIX
 #include <gio/gunixfdlist.h>
 #include <gio/gunixfdlist.h>
+#endif
+
+#include "ui/dbus.h"
 #include "ui/dbus-display1.h"
 #include "ui/dbus-display1.h"
 
 
 #define AUDIO_CAP "dbus"
 #define AUDIO_CAP "dbus"
@@ -101,7 +105,7 @@ static size_t dbus_put_buffer_out(HWVoiceOut *hw, void *buf, size_t size)
     assert(buf == vo->buf + vo->buf_pos && vo->buf_pos + size <= vo->buf_size);
     assert(buf == vo->buf + vo->buf_pos && vo->buf_pos + size <= vo->buf_size);
     vo->buf_pos += size;
     vo->buf_pos += size;
 
 
-    trace_dbus_audio_put_buffer_out(size);
+    trace_dbus_audio_put_buffer_out(vo->buf_pos, vo->buf_size);
 
 
     if (vo->buf_pos < vo->buf_size) {
     if (vo->buf_pos < vo->buf_size) {
         return size;
         return size;
@@ -391,7 +395,7 @@ dbus_enable_in(HWVoiceIn *hw, bool enable)
 }
 }
 
 
 static void *
 static void *
-dbus_audio_init(Audiodev *dev)
+dbus_audio_init(Audiodev *dev, Error **errp)
 {
 {
     DBusAudio *da = g_new0(DBusAudio, 1);
     DBusAudio *da = g_new0(DBusAudio, 1);
 
 
@@ -444,7 +448,9 @@ listener_in_vanished_cb(GDBusConnection *connection,
 static gboolean
 static gboolean
 dbus_audio_register_listener(AudioState *s,
 dbus_audio_register_listener(AudioState *s,
                              GDBusMethodInvocation *invocation,
                              GDBusMethodInvocation *invocation,
+#ifdef G_OS_UNIX
                              GUnixFDList *fd_list,
                              GUnixFDList *fd_list,
+#endif
                              GVariant *arg_listener,
                              GVariant *arg_listener,
                              bool out)
                              bool out)
 {
 {
@@ -471,6 +477,11 @@ dbus_audio_register_listener(AudioState *s,
         return DBUS_METHOD_INVOCATION_HANDLED;
         return DBUS_METHOD_INVOCATION_HANDLED;
     }
     }
 
 
+#ifdef G_OS_WIN32
+    if (!dbus_win32_import_socket(invocation, arg_listener, &fd)) {
+        return DBUS_METHOD_INVOCATION_HANDLED;
+    }
+#else
     fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err);
     fd = g_unix_fd_list_get(fd_list, g_variant_get_handle(arg_listener), &err);
     if (err) {
     if (err) {
         g_dbus_method_invocation_return_error(invocation,
         g_dbus_method_invocation_return_error(invocation,
@@ -480,6 +491,7 @@ dbus_audio_register_listener(AudioState *s,
                                               err->message);
                                               err->message);
         return DBUS_METHOD_INVOCATION_HANDLED;
         return DBUS_METHOD_INVOCATION_HANDLED;
     }
     }
+#endif
 
 
     socket = g_socket_new_from_fd(fd, &err);
     socket = g_socket_new_from_fd(fd, &err);
     if (err) {
     if (err) {
@@ -488,15 +500,28 @@ dbus_audio_register_listener(AudioState *s,
                                               DBUS_DISPLAY_ERROR_FAILED,
                                               DBUS_DISPLAY_ERROR_FAILED,
                                               "Couldn't make a socket: %s",
                                               "Couldn't make a socket: %s",
                                               err->message);
                                               err->message);
+#ifdef G_OS_WIN32
+        closesocket(fd);
+#else
+        close(fd);
+#endif
         return DBUS_METHOD_INVOCATION_HANDLED;
         return DBUS_METHOD_INVOCATION_HANDLED;
     }
     }
     socket_conn = g_socket_connection_factory_create_connection(socket);
     socket_conn = g_socket_connection_factory_create_connection(socket);
     if (out) {
     if (out) {
         qemu_dbus_display1_audio_complete_register_out_listener(
         qemu_dbus_display1_audio_complete_register_out_listener(
-            da->iface, invocation, NULL);
+            da->iface, invocation
+#ifdef G_OS_UNIX
+            , NULL
+#endif
+            );
     } else {
     } else {
         qemu_dbus_display1_audio_complete_register_in_listener(
         qemu_dbus_display1_audio_complete_register_in_listener(
-            da->iface, invocation, NULL);
+            da->iface, invocation
+#ifdef G_OS_UNIX
+            , NULL
+#endif
+            );
     }
     }
 
 
     listener_conn =
     listener_conn =
@@ -574,22 +599,32 @@ dbus_audio_register_listener(AudioState *s,
 static gboolean
 static gboolean
 dbus_audio_register_out_listener(AudioState *s,
 dbus_audio_register_out_listener(AudioState *s,
                                  GDBusMethodInvocation *invocation,
                                  GDBusMethodInvocation *invocation,
+#ifdef G_OS_UNIX
                                  GUnixFDList *fd_list,
                                  GUnixFDList *fd_list,
+#endif
                                  GVariant *arg_listener)
                                  GVariant *arg_listener)
 {
 {
     return dbus_audio_register_listener(s, invocation,
     return dbus_audio_register_listener(s, invocation,
-                                        fd_list, arg_listener, true);
+#ifdef G_OS_UNIX
+                                        fd_list,
+#endif
+                                        arg_listener, true);
 
 
 }
 }
 
 
 static gboolean
 static gboolean
 dbus_audio_register_in_listener(AudioState *s,
 dbus_audio_register_in_listener(AudioState *s,
                                 GDBusMethodInvocation *invocation,
                                 GDBusMethodInvocation *invocation,
+#ifdef G_OS_UNIX
                                 GUnixFDList *fd_list,
                                 GUnixFDList *fd_list,
+#endif
                                 GVariant *arg_listener)
                                 GVariant *arg_listener)
 {
 {
     return dbus_audio_register_listener(s, invocation,
     return dbus_audio_register_listener(s, invocation,
-                                        fd_list, arg_listener, false);
+#ifdef G_OS_UNIX
+                                        fd_list,
+#endif
+                                        arg_listener, false);
 }
 }
 
 
 static void
 static void
@@ -641,7 +676,6 @@ static struct audio_driver dbus_audio_driver = {
     .fini            = dbus_audio_fini,
     .fini            = dbus_audio_fini,
     .set_dbus_server = dbus_audio_set_server,
     .set_dbus_server = dbus_audio_set_server,
     .pcm_ops         = &dbus_pcm_ops,
     .pcm_ops         = &dbus_pcm_ops,
-    .can_be_default  = 1,
     .max_voices_out  = INT_MAX,
     .max_voices_out  = INT_MAX,
     .max_voices_in   = INT_MAX,
     .max_voices_in   = INT_MAX,
     .voice_size_out  = sizeof(DBusVoiceOut),
     .voice_size_out  = sizeof(DBusVoiceOut),

+ 1 - 2
audio/dsoundaudio.c

@@ -619,7 +619,7 @@ static void dsound_audio_fini (void *opaque)
     g_free(s);
     g_free(s);
 }
 }
 
 
-static void *dsound_audio_init(Audiodev *dev)
+static void *dsound_audio_init(Audiodev *dev, Error **errp)
 {
 {
     int err;
     int err;
     HRESULT hr;
     HRESULT hr;
@@ -721,7 +721,6 @@ static struct audio_driver dsound_audio_driver = {
     .init           = dsound_audio_init,
     .init           = dsound_audio_init,
     .fini           = dsound_audio_fini,
     .fini           = dsound_audio_fini,
     .pcm_ops        = &dsound_pcm_ops,
     .pcm_ops        = &dsound_pcm_ops,
-    .can_be_default = 1,
     .max_voices_out = INT_MAX,
     .max_voices_out = INT_MAX,
     .max_voices_in  = 1,
     .max_voices_in  = 1,
     .voice_size_out = sizeof (DSoundVoiceOut),
     .voice_size_out = sizeof (DSoundVoiceOut),

+ 15 - 9
audio/jackaudio.c

@@ -70,6 +70,9 @@ typedef struct QJackClient {
     int             buffersize;
     int             buffersize;
     jack_port_t   **port;
     jack_port_t   **port;
     QJackBuffer     fifo;
     QJackBuffer     fifo;
+
+    /* Used as workspace by qjack_process() */
+    float **process_buffers;
 }
 }
 QJackClient;
 QJackClient;
 
 
@@ -267,22 +270,21 @@ static int qjack_process(jack_nframes_t nframes, void *arg)
     }
     }
 
 
     /* get the buffers for the ports */
     /* get the buffers for the ports */
-    float *buffers[c->nchannels];
     for (int i = 0; i < c->nchannels; ++i) {
     for (int i = 0; i < c->nchannels; ++i) {
-        buffers[i] = jack_port_get_buffer(c->port[i], nframes);
+        c->process_buffers[i] = jack_port_get_buffer(c->port[i], nframes);
     }
     }
 
 
     if (c->out) {
     if (c->out) {
         if (likely(c->enabled)) {
         if (likely(c->enabled)) {
-            qjack_buffer_read_l(&c->fifo, buffers, nframes);
+            qjack_buffer_read_l(&c->fifo, c->process_buffers, nframes);
         } else {
         } else {
             for (int i = 0; i < c->nchannels; ++i) {
             for (int i = 0; i < c->nchannels; ++i) {
-                memset(buffers[i], 0, nframes * sizeof(float));
+                memset(c->process_buffers[i], 0, nframes * sizeof(float));
             }
             }
         }
         }
     } else {
     } else {
         if (likely(c->enabled)) {
         if (likely(c->enabled)) {
-            qjack_buffer_write_l(&c->fifo, buffers, nframes);
+            qjack_buffer_write_l(&c->fifo, c->process_buffers, nframes);
         }
         }
     }
     }
 
 
@@ -400,7 +402,8 @@ static void qjack_client_connect_ports(QJackClient *c)
 static int qjack_client_init(QJackClient *c)
 static int qjack_client_init(QJackClient *c)
 {
 {
     jack_status_t status;
     jack_status_t status;
-    char client_name[jack_client_name_size()];
+    int client_name_len = jack_client_name_size(); /* includes NUL */
+    g_autofree char *client_name = g_new(char, client_name_len);
     jack_options_t options = JackNullOption;
     jack_options_t options = JackNullOption;
 
 
     if (c->state == QJACK_STATE_RUNNING) {
     if (c->state == QJACK_STATE_RUNNING) {
@@ -409,7 +412,7 @@ static int qjack_client_init(QJackClient *c)
 
 
     c->connect_ports = true;
     c->connect_ports = true;
 
 
-    snprintf(client_name, sizeof(client_name), "%s-%s",
+    snprintf(client_name, client_name_len, "%s-%s",
         c->out ? "out" : "in",
         c->out ? "out" : "in",
         c->opt->client_name ? c->opt->client_name : audio_application_name());
         c->opt->client_name ? c->opt->client_name : audio_application_name());
 
 
@@ -447,6 +450,9 @@ static int qjack_client_init(QJackClient *c)
           jack_get_client_name(c->client));
           jack_get_client_name(c->client));
     }
     }
 
 
+    /* Allocate working buffer for process callback */
+    c->process_buffers = g_new(float *, c->nchannels);
+
     jack_set_process_callback(c->client, qjack_process , c);
     jack_set_process_callback(c->client, qjack_process , c);
     jack_set_port_registration_callback(c->client, qjack_port_registration, c);
     jack_set_port_registration_callback(c->client, qjack_port_registration, c);
     jack_set_xrun_callback(c->client, qjack_xrun, c);
     jack_set_xrun_callback(c->client, qjack_xrun, c);
@@ -578,6 +584,7 @@ static void qjack_client_fini_locked(QJackClient *c)
 
 
         qjack_buffer_free(&c->fifo);
         qjack_buffer_free(&c->fifo);
         g_free(c->port);
         g_free(c->port);
+        g_free(c->process_buffers);
 
 
         c->state = QJACK_STATE_DISCONNECTED;
         c->state = QJACK_STATE_DISCONNECTED;
         /* fallthrough */
         /* fallthrough */
@@ -638,7 +645,7 @@ static int qjack_thread_creator(jack_native_thread_t *thread,
 }
 }
 #endif
 #endif
 
 
-static void *qjack_init(Audiodev *dev)
+static void *qjack_init(Audiodev *dev, Error **errp)
 {
 {
     assert(dev->driver == AUDIODEV_DRIVER_JACK);
     assert(dev->driver == AUDIODEV_DRIVER_JACK);
     return dev;
     return dev;
@@ -669,7 +676,6 @@ static struct audio_driver jack_driver = {
     .init           = qjack_init,
     .init           = qjack_init,
     .fini           = qjack_fini,
     .fini           = qjack_fini,
     .pcm_ops        = &jack_pcm_ops,
     .pcm_ops        = &jack_pcm_ops,
-    .can_be_default = 1,
     .max_voices_out = INT_MAX,
     .max_voices_out = INT_MAX,
     .max_voices_in  = INT_MAX,
     .max_voices_in  = INT_MAX,
     .voice_size_out = sizeof(QJackOut),
     .voice_size_out = sizeof(QJackOut),

+ 7 - 6
audio/meson.build

@@ -1,15 +1,14 @@
-softmmu_ss.add([spice_headers, files('audio.c')])
-softmmu_ss.add(files(
+system_ss.add([spice_headers, files('audio.c')])
+system_ss.add(files(
   'audio-hmp-cmds.c',
   'audio-hmp-cmds.c',
-  'audio_legacy.c',
   'mixeng.c',
   'mixeng.c',
   'noaudio.c',
   'noaudio.c',
   'wavaudio.c',
   'wavaudio.c',
   'wavcapture.c',
   'wavcapture.c',
 ))
 ))
 
 
-softmmu_ss.add(when: coreaudio, if_true: files('coreaudio.m'))
-softmmu_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c'))
+system_ss.add(when: coreaudio, if_true: files('coreaudio.m'))
+system_ss.add(when: dsound, if_true: files('dsoundaudio.c', 'audio_win_int.c'))
 
 
 audio_modules = {}
 audio_modules = {}
 foreach m : [
 foreach m : [
@@ -19,6 +18,7 @@ foreach m : [
   ['sdl', sdl, files('sdlaudio.c')],
   ['sdl', sdl, files('sdlaudio.c')],
   ['jack', jack, files('jackaudio.c')],
   ['jack', jack, files('jackaudio.c')],
   ['sndio', sndio, files('sndioaudio.c')],
   ['sndio', sndio, files('sndioaudio.c')],
+  ['pipewire', pipewire, files('pwaudio.c')],
   ['spice', spice, files('spiceaudio.c')]
   ['spice', spice, files('spiceaudio.c')]
 ]
 ]
   if m[1].found()
   if m[1].found()
@@ -30,7 +30,8 @@ endforeach
 
 
 if dbus_display
 if dbus_display
     module_ss = ss.source_set()
     module_ss = ss.source_set()
-    module_ss.add(when: gio, if_true: files('dbusaudio.c'))
+    module_ss.add(when: [gio, pixman],
+                  if_true: [dbus_display1, files('dbusaudio.c')])
     audio_modules += {'dbus': module_ss}
     audio_modules += {'dbus': module_ss}
 endif
 endif
 
 

Энэ ялгаанд хэт олон файл өөрчлөгдсөн тул зарим файлыг харуулаагүй болно