memop.h 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218
  1. /*
  2. * Constants for memory operations
  3. *
  4. * Authors:
  5. * Richard Henderson <rth@twiddle.net>
  6. *
  7. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  8. * See the COPYING file in the top-level directory.
  9. *
  10. */
  11. #ifndef MEMOP_H
  12. #define MEMOP_H
  13. #include "qemu/host-utils.h"
  14. typedef enum MemOp {
  15. MO_8 = 0,
  16. MO_16 = 1,
  17. MO_32 = 2,
  18. MO_64 = 3,
  19. MO_128 = 4,
  20. MO_256 = 5,
  21. MO_512 = 6,
  22. MO_1024 = 7,
  23. MO_SIZE = 0x07, /* Mask for the above. */
  24. MO_SIGN = 0x08, /* Sign-extended, otherwise zero-extended. */
  25. MO_BSWAP = 0x10, /* Host reverse endian. */
  26. #if HOST_BIG_ENDIAN
  27. MO_LE = MO_BSWAP,
  28. MO_BE = 0,
  29. #else
  30. MO_LE = 0,
  31. MO_BE = MO_BSWAP,
  32. #endif
  33. #ifdef COMPILING_PER_TARGET
  34. #if TARGET_BIG_ENDIAN
  35. MO_TE = MO_BE,
  36. #else
  37. MO_TE = MO_LE,
  38. #endif
  39. #endif
  40. /*
  41. * MO_UNALN accesses are never checked for alignment.
  42. * MO_ALIGN accesses will result in a call to the CPU's
  43. * do_unaligned_access hook if the guest address is not aligned.
  44. *
  45. * Some architectures (e.g. ARMv8) need the address which is aligned
  46. * to a size more than the size of the memory access.
  47. * Some architectures (e.g. SPARCv9) need an address which is aligned,
  48. * but less strictly than the natural alignment.
  49. *
  50. * MO_ALIGN supposes the alignment size is the size of a memory access.
  51. *
  52. * There are three options:
  53. * - unaligned access permitted (MO_UNALN).
  54. * - an alignment to the size of an access (MO_ALIGN);
  55. * - an alignment to a specified size, which may be more or less than
  56. * the access size (MO_ALIGN_x where 'x' is a size in bytes);
  57. */
  58. MO_ASHIFT = 5,
  59. MO_AMASK = 0x7 << MO_ASHIFT,
  60. MO_UNALN = 0,
  61. MO_ALIGN_2 = 1 << MO_ASHIFT,
  62. MO_ALIGN_4 = 2 << MO_ASHIFT,
  63. MO_ALIGN_8 = 3 << MO_ASHIFT,
  64. MO_ALIGN_16 = 4 << MO_ASHIFT,
  65. MO_ALIGN_32 = 5 << MO_ASHIFT,
  66. MO_ALIGN_64 = 6 << MO_ASHIFT,
  67. MO_ALIGN = MO_AMASK,
  68. /*
  69. * MO_ATOM_* describes the atomicity requirements of the operation:
  70. * MO_ATOM_IFALIGN: the operation must be single-copy atomic if it
  71. * is aligned; if unaligned there is no atomicity.
  72. * MO_ATOM_IFALIGN_PAIR: the entire operation may be considered to
  73. * be a pair of half-sized operations which are packed together
  74. * for convenience, with single-copy atomicity on each half if
  75. * the half is aligned.
  76. * This is the atomicity e.g. of Arm pre-FEAT_LSE2 LDP.
  77. * MO_ATOM_WITHIN16: the operation is single-copy atomic, even if it
  78. * is unaligned, so long as it does not cross a 16-byte boundary;
  79. * if it crosses a 16-byte boundary there is no atomicity.
  80. * This is the atomicity e.g. of Arm FEAT_LSE2 LDR.
  81. * MO_ATOM_WITHIN16_PAIR: the entire operation is single-copy atomic,
  82. * if it happens to be within a 16-byte boundary, otherwise it
  83. * devolves to a pair of half-sized MO_ATOM_WITHIN16 operations.
  84. * Depending on alignment, one or both will be single-copy atomic.
  85. * This is the atomicity e.g. of Arm FEAT_LSE2 LDP.
  86. * MO_ATOM_SUBALIGN: the operation is single-copy atomic by parts
  87. * by the alignment. E.g. if an 8-byte value is accessed at an
  88. * address which is 0 mod 8, then the whole 8-byte access is
  89. * single-copy atomic; otherwise, if it is accessed at 0 mod 4
  90. * then each 4-byte subobject is single-copy atomic; otherwise
  91. * if it is accessed at 0 mod 2 then the four 2-byte subobjects
  92. * are single-copy atomic.
  93. * This is the atomicity e.g. of IBM Power.
  94. * MO_ATOM_NONE: the operation has no atomicity requirements.
  95. *
  96. * Note the default (i.e. 0) value is single-copy atomic to the
  97. * size of the operation, if aligned. This retains the behaviour
  98. * from before this field was introduced.
  99. */
  100. MO_ATOM_SHIFT = 8,
  101. MO_ATOM_IFALIGN = 0 << MO_ATOM_SHIFT,
  102. MO_ATOM_IFALIGN_PAIR = 1 << MO_ATOM_SHIFT,
  103. MO_ATOM_WITHIN16 = 2 << MO_ATOM_SHIFT,
  104. MO_ATOM_WITHIN16_PAIR = 3 << MO_ATOM_SHIFT,
  105. MO_ATOM_SUBALIGN = 4 << MO_ATOM_SHIFT,
  106. MO_ATOM_NONE = 5 << MO_ATOM_SHIFT,
  107. MO_ATOM_MASK = 7 << MO_ATOM_SHIFT,
  108. /* Combinations of the above, for ease of use. */
  109. MO_UB = MO_8,
  110. MO_UW = MO_16,
  111. MO_UL = MO_32,
  112. MO_UQ = MO_64,
  113. MO_UO = MO_128,
  114. MO_SB = MO_SIGN | MO_8,
  115. MO_SW = MO_SIGN | MO_16,
  116. MO_SL = MO_SIGN | MO_32,
  117. MO_SQ = MO_SIGN | MO_64,
  118. MO_SO = MO_SIGN | MO_128,
  119. MO_LEUW = MO_LE | MO_UW,
  120. MO_LEUL = MO_LE | MO_UL,
  121. MO_LEUQ = MO_LE | MO_UQ,
  122. MO_LESW = MO_LE | MO_SW,
  123. MO_LESL = MO_LE | MO_SL,
  124. MO_LESQ = MO_LE | MO_SQ,
  125. MO_BEUW = MO_BE | MO_UW,
  126. MO_BEUL = MO_BE | MO_UL,
  127. MO_BEUQ = MO_BE | MO_UQ,
  128. MO_BESW = MO_BE | MO_SW,
  129. MO_BESL = MO_BE | MO_SL,
  130. MO_BESQ = MO_BE | MO_SQ,
  131. #ifdef COMPILING_PER_TARGET
  132. MO_TEUW = MO_TE | MO_UW,
  133. MO_TEUL = MO_TE | MO_UL,
  134. MO_TEUQ = MO_TE | MO_UQ,
  135. MO_TEUO = MO_TE | MO_UO,
  136. MO_TESW = MO_TE | MO_SW,
  137. MO_TESL = MO_TE | MO_SL,
  138. MO_TESQ = MO_TE | MO_SQ,
  139. #endif
  140. MO_SSIZE = MO_SIZE | MO_SIGN,
  141. } MemOp;
  142. /* MemOp to size in bytes. */
  143. static inline unsigned memop_size(MemOp op)
  144. {
  145. return 1 << (op & MO_SIZE);
  146. }
  147. /* Size in bytes to MemOp. */
  148. static inline MemOp size_memop(unsigned size)
  149. {
  150. #ifdef CONFIG_DEBUG_TCG
  151. /* Power of 2 up to 8. */
  152. assert((size & (size - 1)) == 0 && size >= 1 && size <= 8);
  153. #endif
  154. return (MemOp)ctz32(size);
  155. }
  156. /**
  157. * memop_alignment_bits:
  158. * @memop: MemOp value
  159. *
  160. * Extract the alignment size from the memop.
  161. */
  162. static inline unsigned memop_alignment_bits(MemOp memop)
  163. {
  164. unsigned a = memop & MO_AMASK;
  165. if (a == MO_UNALN) {
  166. /* No alignment required. */
  167. a = 0;
  168. } else if (a == MO_ALIGN) {
  169. /* A natural alignment requirement. */
  170. a = memop & MO_SIZE;
  171. } else {
  172. /* A specific alignment requirement. */
  173. a = a >> MO_ASHIFT;
  174. }
  175. return a;
  176. }
  177. /*
  178. * memop_atomicity_bits:
  179. * @memop: MemOp value
  180. *
  181. * Extract the atomicity size from the memop.
  182. */
  183. static inline unsigned memop_atomicity_bits(MemOp memop)
  184. {
  185. unsigned size = memop & MO_SIZE;
  186. switch (memop & MO_ATOM_MASK) {
  187. case MO_ATOM_NONE:
  188. size = MO_8;
  189. break;
  190. case MO_ATOM_IFALIGN_PAIR:
  191. case MO_ATOM_WITHIN16_PAIR:
  192. size = size ? size - 1 : 0;
  193. break;
  194. default:
  195. break;
  196. }
  197. return size;
  198. }
  199. #endif