host-utils.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. /*
  2. * Utility compute operations used by translated code.
  3. *
  4. * Copyright (c) 2003 Fabrice Bellard
  5. * Copyright (c) 2007 Aurelien Jarno
  6. *
  7. * Permission is hereby granted, free of charge, to any person obtaining a copy
  8. * of this software and associated documentation files (the "Software"), to deal
  9. * in the Software without restriction, including without limitation the rights
  10. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  11. * copies of the Software, and to permit persons to whom the Software is
  12. * furnished to do so, subject to the following conditions:
  13. *
  14. * The above copyright notice and this permission notice shall be included in
  15. * all copies or substantial portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  21. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  22. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  23. * THE SOFTWARE.
  24. */
  25. #include "qemu/osdep.h"
  26. #include "qemu/host-utils.h"
  27. #ifndef CONFIG_INT128
  28. /* Long integer helpers */
  29. static inline void mul64(uint64_t *plow, uint64_t *phigh,
  30. uint64_t a, uint64_t b)
  31. {
  32. typedef union {
  33. uint64_t ll;
  34. struct {
  35. #ifdef HOST_WORDS_BIGENDIAN
  36. uint32_t high, low;
  37. #else
  38. uint32_t low, high;
  39. #endif
  40. } l;
  41. } LL;
  42. LL rl, rm, rn, rh, a0, b0;
  43. uint64_t c;
  44. a0.ll = a;
  45. b0.ll = b;
  46. rl.ll = (uint64_t)a0.l.low * b0.l.low;
  47. rm.ll = (uint64_t)a0.l.low * b0.l.high;
  48. rn.ll = (uint64_t)a0.l.high * b0.l.low;
  49. rh.ll = (uint64_t)a0.l.high * b0.l.high;
  50. c = (uint64_t)rl.l.high + rm.l.low + rn.l.low;
  51. rl.l.high = c;
  52. c >>= 32;
  53. c = c + rm.l.high + rn.l.high + rh.l.low;
  54. rh.l.low = c;
  55. rh.l.high += (uint32_t)(c >> 32);
  56. *plow = rl.ll;
  57. *phigh = rh.ll;
  58. }
  59. /* Unsigned 64x64 -> 128 multiplication */
  60. void mulu64 (uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
  61. {
  62. mul64(plow, phigh, a, b);
  63. }
  64. /* Signed 64x64 -> 128 multiplication */
  65. void muls64 (uint64_t *plow, uint64_t *phigh, int64_t a, int64_t b)
  66. {
  67. uint64_t rh;
  68. mul64(plow, &rh, a, b);
  69. /* Adjust for signs. */
  70. if (b < 0) {
  71. rh -= a;
  72. }
  73. if (a < 0) {
  74. rh -= b;
  75. }
  76. *phigh = rh;
  77. }
  78. /* Unsigned 128x64 division. Returns 1 if overflow (divide by zero or */
  79. /* quotient exceeds 64 bits). Otherwise returns quotient via plow and */
  80. /* remainder via phigh. */
  81. int divu128(uint64_t *plow, uint64_t *phigh, uint64_t divisor)
  82. {
  83. uint64_t dhi = *phigh;
  84. uint64_t dlo = *plow;
  85. unsigned i;
  86. uint64_t carry = 0;
  87. if (divisor == 0) {
  88. return 1;
  89. } else if (dhi == 0) {
  90. *plow = dlo / divisor;
  91. *phigh = dlo % divisor;
  92. return 0;
  93. } else if (dhi > divisor) {
  94. return 1;
  95. } else {
  96. for (i = 0; i < 64; i++) {
  97. carry = dhi >> 63;
  98. dhi = (dhi << 1) | (dlo >> 63);
  99. if (carry || (dhi >= divisor)) {
  100. dhi -= divisor;
  101. carry = 1;
  102. } else {
  103. carry = 0;
  104. }
  105. dlo = (dlo << 1) | carry;
  106. }
  107. *plow = dlo;
  108. *phigh = dhi;
  109. return 0;
  110. }
  111. }
  112. int divs128(int64_t *plow, int64_t *phigh, int64_t divisor)
  113. {
  114. int sgn_dvdnd = *phigh < 0;
  115. int sgn_divsr = divisor < 0;
  116. int overflow = 0;
  117. if (sgn_dvdnd) {
  118. *plow = ~(*plow);
  119. *phigh = ~(*phigh);
  120. if (*plow == (int64_t)-1) {
  121. *plow = 0;
  122. (*phigh)++;
  123. } else {
  124. (*plow)++;
  125. }
  126. }
  127. if (sgn_divsr) {
  128. divisor = 0 - divisor;
  129. }
  130. overflow = divu128((uint64_t *)plow, (uint64_t *)phigh, (uint64_t)divisor);
  131. if (sgn_dvdnd ^ sgn_divsr) {
  132. *plow = 0 - *plow;
  133. }
  134. if (!overflow) {
  135. if ((*plow < 0) ^ (sgn_dvdnd ^ sgn_divsr)) {
  136. overflow = 1;
  137. }
  138. }
  139. return overflow;
  140. }
  141. #endif
  142. /**
  143. * urshift - 128-bit Unsigned Right Shift.
  144. * @plow: in/out - lower 64-bit integer.
  145. * @phigh: in/out - higher 64-bit integer.
  146. * @shift: in - bytes to shift, between 0 and 127.
  147. *
  148. * Result is zero-extended and stored in plow/phigh, which are
  149. * input/output variables. Shift values outside the range will
  150. * be mod to 128. In other words, the caller is responsible to
  151. * verify/assert both the shift range and plow/phigh pointers.
  152. */
  153. void urshift(uint64_t *plow, uint64_t *phigh, int32_t shift)
  154. {
  155. shift &= 127;
  156. if (shift == 0) {
  157. return;
  158. }
  159. uint64_t h = *phigh >> (shift & 63);
  160. if (shift >= 64) {
  161. *plow = h;
  162. *phigh = 0;
  163. } else {
  164. *plow = (*plow >> (shift & 63)) | (*phigh << (64 - (shift & 63)));
  165. *phigh = h;
  166. }
  167. }
  168. /**
  169. * ulshift - 128-bit Unsigned Left Shift.
  170. * @plow: in/out - lower 64-bit integer.
  171. * @phigh: in/out - higher 64-bit integer.
  172. * @shift: in - bytes to shift, between 0 and 127.
  173. * @overflow: out - true if any 1-bit is shifted out.
  174. *
  175. * Result is zero-extended and stored in plow/phigh, which are
  176. * input/output variables. Shift values outside the range will
  177. * be mod to 128. In other words, the caller is responsible to
  178. * verify/assert both the shift range and plow/phigh pointers.
  179. */
  180. void ulshift(uint64_t *plow, uint64_t *phigh, int32_t shift, bool *overflow)
  181. {
  182. uint64_t low = *plow;
  183. uint64_t high = *phigh;
  184. shift &= 127;
  185. if (shift == 0) {
  186. return;
  187. }
  188. /* check if any bit will be shifted out */
  189. urshift(&low, &high, 128 - shift);
  190. if (low | high) {
  191. *overflow = true;
  192. }
  193. if (shift >= 64) {
  194. *phigh = *plow << (shift & 63);
  195. *plow = 0;
  196. } else {
  197. *phigh = (*plow >> (64 - (shift & 63))) | (*phigh << (shift & 63));
  198. *plow = *plow << shift;
  199. }
  200. }