2
0

stats64.c 3.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. /*
  2. * Atomic operations on 64-bit quantities.
  3. *
  4. * Copyright (C) 2017 Red Hat, Inc.
  5. *
  6. * Author: Paolo Bonzini <pbonzini@redhat.com>
  7. *
  8. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  9. * See the COPYING file in the top-level directory.
  10. */
  11. #include "qemu/osdep.h"
  12. #include "qemu/atomic.h"
  13. #include "qemu/stats64.h"
  14. #include "qemu/processor.h"
  15. #ifndef CONFIG_ATOMIC64
  16. static inline void stat64_rdlock(Stat64 *s)
  17. {
  18. /* Keep out incoming writers to avoid them starving us. */
  19. qatomic_add(&s->lock, 2);
  20. /* If there is a concurrent writer, wait for it. */
  21. while (qatomic_read(&s->lock) & 1) {
  22. cpu_relax();
  23. }
  24. }
  25. static inline void stat64_rdunlock(Stat64 *s)
  26. {
  27. qatomic_sub(&s->lock, 2);
  28. }
  29. static inline bool stat64_wrtrylock(Stat64 *s)
  30. {
  31. return qatomic_cmpxchg(&s->lock, 0, 1) == 0;
  32. }
  33. static inline void stat64_wrunlock(Stat64 *s)
  34. {
  35. qatomic_dec(&s->lock);
  36. }
  37. uint64_t stat64_get(const Stat64 *s)
  38. {
  39. uint32_t high, low;
  40. stat64_rdlock((Stat64 *)s);
  41. /* 64-bit writes always take the lock, so we can read in
  42. * any order.
  43. */
  44. high = qatomic_read(&s->high);
  45. low = qatomic_read(&s->low);
  46. stat64_rdunlock((Stat64 *)s);
  47. return ((uint64_t)high << 32) | low;
  48. }
  49. bool stat64_add32_carry(Stat64 *s, uint32_t low, uint32_t high)
  50. {
  51. uint32_t old;
  52. if (!stat64_wrtrylock(s)) {
  53. cpu_relax();
  54. return false;
  55. }
  56. /* 64-bit reads always take the lock, so they don't care about the
  57. * order of our update. By updating s->low first, we can check
  58. * whether we have to carry into s->high.
  59. */
  60. old = qatomic_fetch_add(&s->low, low);
  61. high += (old + low) < old;
  62. qatomic_add(&s->high, high);
  63. stat64_wrunlock(s);
  64. return true;
  65. }
  66. bool stat64_min_slow(Stat64 *s, uint64_t value)
  67. {
  68. uint32_t high, low;
  69. uint64_t orig;
  70. if (!stat64_wrtrylock(s)) {
  71. cpu_relax();
  72. return false;
  73. }
  74. high = qatomic_read(&s->high);
  75. low = qatomic_read(&s->low);
  76. orig = ((uint64_t)high << 32) | low;
  77. if (value < orig) {
  78. /* We have to set low before high, just like stat64_min reads
  79. * high before low. The value may become higher temporarily, but
  80. * stat64_get does not notice (it takes the lock) and the only ill
  81. * effect on stat64_min is that the slow path may be triggered
  82. * unnecessarily.
  83. */
  84. qatomic_set(&s->low, (uint32_t)value);
  85. smp_wmb();
  86. qatomic_set(&s->high, value >> 32);
  87. }
  88. stat64_wrunlock(s);
  89. return true;
  90. }
  91. bool stat64_max_slow(Stat64 *s, uint64_t value)
  92. {
  93. uint32_t high, low;
  94. uint64_t orig;
  95. if (!stat64_wrtrylock(s)) {
  96. cpu_relax();
  97. return false;
  98. }
  99. high = qatomic_read(&s->high);
  100. low = qatomic_read(&s->low);
  101. orig = ((uint64_t)high << 32) | low;
  102. if (value > orig) {
  103. /* We have to set low before high, just like stat64_max reads
  104. * high before low. The value may become lower temporarily, but
  105. * stat64_get does not notice (it takes the lock) and the only ill
  106. * effect on stat64_max is that the slow path may be triggered
  107. * unnecessarily.
  108. */
  109. qatomic_set(&s->low, (uint32_t)value);
  110. smp_wmb();
  111. qatomic_set(&s->high, value >> 32);
  112. }
  113. stat64_wrunlock(s);
  114. return true;
  115. }
  116. #endif