vis_helper.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489
  1. /*
  2. * VIS op helpers
  3. *
  4. * Copyright (c) 2003-2005 Fabrice Bellard
  5. *
  6. * This library is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU Lesser General Public
  8. * License as published by the Free Software Foundation; either
  9. * version 2 of the License, or (at your option) any later version.
  10. *
  11. * This library is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  14. * Lesser General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU Lesser General Public
  17. * License along with this library; if not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include "cpu.h"
  20. #include "exec/helper-proto.h"
  21. /* This function uses non-native bit order */
  22. #define GET_FIELD(X, FROM, TO) \
  23. ((X) >> (63 - (TO)) & ((1ULL << ((TO) - (FROM) + 1)) - 1))
  24. /* This function uses the order in the manuals, i.e. bit 0 is 2^0 */
  25. #define GET_FIELD_SP(X, FROM, TO) \
  26. GET_FIELD(X, 63 - (TO), 63 - (FROM))
  27. target_ulong helper_array8(target_ulong pixel_addr, target_ulong cubesize)
  28. {
  29. return (GET_FIELD_SP(pixel_addr, 60, 63) << (17 + 2 * cubesize)) |
  30. (GET_FIELD_SP(pixel_addr, 39, 39 + cubesize - 1) << (17 + cubesize)) |
  31. (GET_FIELD_SP(pixel_addr, 17 + cubesize - 1, 17) << 17) |
  32. (GET_FIELD_SP(pixel_addr, 56, 59) << 13) |
  33. (GET_FIELD_SP(pixel_addr, 35, 38) << 9) |
  34. (GET_FIELD_SP(pixel_addr, 13, 16) << 5) |
  35. (((pixel_addr >> 55) & 1) << 4) |
  36. (GET_FIELD_SP(pixel_addr, 33, 34) << 2) |
  37. GET_FIELD_SP(pixel_addr, 11, 12);
  38. }
  39. #ifdef HOST_WORDS_BIGENDIAN
  40. #define VIS_B64(n) b[7 - (n)]
  41. #define VIS_W64(n) w[3 - (n)]
  42. #define VIS_SW64(n) sw[3 - (n)]
  43. #define VIS_L64(n) l[1 - (n)]
  44. #define VIS_B32(n) b[3 - (n)]
  45. #define VIS_W32(n) w[1 - (n)]
  46. #else
  47. #define VIS_B64(n) b[n]
  48. #define VIS_W64(n) w[n]
  49. #define VIS_SW64(n) sw[n]
  50. #define VIS_L64(n) l[n]
  51. #define VIS_B32(n) b[n]
  52. #define VIS_W32(n) w[n]
  53. #endif
  54. typedef union {
  55. uint8_t b[8];
  56. uint16_t w[4];
  57. int16_t sw[4];
  58. uint32_t l[2];
  59. uint64_t ll;
  60. float64 d;
  61. } VIS64;
  62. typedef union {
  63. uint8_t b[4];
  64. uint16_t w[2];
  65. uint32_t l;
  66. float32 f;
  67. } VIS32;
  68. uint64_t helper_fpmerge(uint64_t src1, uint64_t src2)
  69. {
  70. VIS64 s, d;
  71. s.ll = src1;
  72. d.ll = src2;
  73. /* Reverse calculation order to handle overlap */
  74. d.VIS_B64(7) = s.VIS_B64(3);
  75. d.VIS_B64(6) = d.VIS_B64(3);
  76. d.VIS_B64(5) = s.VIS_B64(2);
  77. d.VIS_B64(4) = d.VIS_B64(2);
  78. d.VIS_B64(3) = s.VIS_B64(1);
  79. d.VIS_B64(2) = d.VIS_B64(1);
  80. d.VIS_B64(1) = s.VIS_B64(0);
  81. /* d.VIS_B64(0) = d.VIS_B64(0); */
  82. return d.ll;
  83. }
  84. uint64_t helper_fmul8x16(uint64_t src1, uint64_t src2)
  85. {
  86. VIS64 s, d;
  87. uint32_t tmp;
  88. s.ll = src1;
  89. d.ll = src2;
  90. #define PMUL(r) \
  91. tmp = (int32_t)d.VIS_SW64(r) * (int32_t)s.VIS_B64(r); \
  92. if ((tmp & 0xff) > 0x7f) { \
  93. tmp += 0x100; \
  94. } \
  95. d.VIS_W64(r) = tmp >> 8;
  96. PMUL(0);
  97. PMUL(1);
  98. PMUL(2);
  99. PMUL(3);
  100. #undef PMUL
  101. return d.ll;
  102. }
  103. uint64_t helper_fmul8x16al(uint64_t src1, uint64_t src2)
  104. {
  105. VIS64 s, d;
  106. uint32_t tmp;
  107. s.ll = src1;
  108. d.ll = src2;
  109. #define PMUL(r) \
  110. tmp = (int32_t)d.VIS_SW64(1) * (int32_t)s.VIS_B64(r); \
  111. if ((tmp & 0xff) > 0x7f) { \
  112. tmp += 0x100; \
  113. } \
  114. d.VIS_W64(r) = tmp >> 8;
  115. PMUL(0);
  116. PMUL(1);
  117. PMUL(2);
  118. PMUL(3);
  119. #undef PMUL
  120. return d.ll;
  121. }
  122. uint64_t helper_fmul8x16au(uint64_t src1, uint64_t src2)
  123. {
  124. VIS64 s, d;
  125. uint32_t tmp;
  126. s.ll = src1;
  127. d.ll = src2;
  128. #define PMUL(r) \
  129. tmp = (int32_t)d.VIS_SW64(0) * (int32_t)s.VIS_B64(r); \
  130. if ((tmp & 0xff) > 0x7f) { \
  131. tmp += 0x100; \
  132. } \
  133. d.VIS_W64(r) = tmp >> 8;
  134. PMUL(0);
  135. PMUL(1);
  136. PMUL(2);
  137. PMUL(3);
  138. #undef PMUL
  139. return d.ll;
  140. }
  141. uint64_t helper_fmul8sux16(uint64_t src1, uint64_t src2)
  142. {
  143. VIS64 s, d;
  144. uint32_t tmp;
  145. s.ll = src1;
  146. d.ll = src2;
  147. #define PMUL(r) \
  148. tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
  149. if ((tmp & 0xff) > 0x7f) { \
  150. tmp += 0x100; \
  151. } \
  152. d.VIS_W64(r) = tmp >> 8;
  153. PMUL(0);
  154. PMUL(1);
  155. PMUL(2);
  156. PMUL(3);
  157. #undef PMUL
  158. return d.ll;
  159. }
  160. uint64_t helper_fmul8ulx16(uint64_t src1, uint64_t src2)
  161. {
  162. VIS64 s, d;
  163. uint32_t tmp;
  164. s.ll = src1;
  165. d.ll = src2;
  166. #define PMUL(r) \
  167. tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
  168. if ((tmp & 0xff) > 0x7f) { \
  169. tmp += 0x100; \
  170. } \
  171. d.VIS_W64(r) = tmp >> 8;
  172. PMUL(0);
  173. PMUL(1);
  174. PMUL(2);
  175. PMUL(3);
  176. #undef PMUL
  177. return d.ll;
  178. }
  179. uint64_t helper_fmuld8sux16(uint64_t src1, uint64_t src2)
  180. {
  181. VIS64 s, d;
  182. uint32_t tmp;
  183. s.ll = src1;
  184. d.ll = src2;
  185. #define PMUL(r) \
  186. tmp = (int32_t)d.VIS_SW64(r) * ((int32_t)s.VIS_SW64(r) >> 8); \
  187. if ((tmp & 0xff) > 0x7f) { \
  188. tmp += 0x100; \
  189. } \
  190. d.VIS_L64(r) = tmp;
  191. /* Reverse calculation order to handle overlap */
  192. PMUL(1);
  193. PMUL(0);
  194. #undef PMUL
  195. return d.ll;
  196. }
  197. uint64_t helper_fmuld8ulx16(uint64_t src1, uint64_t src2)
  198. {
  199. VIS64 s, d;
  200. uint32_t tmp;
  201. s.ll = src1;
  202. d.ll = src2;
  203. #define PMUL(r) \
  204. tmp = (int32_t)d.VIS_SW64(r) * ((uint32_t)s.VIS_B64(r * 2)); \
  205. if ((tmp & 0xff) > 0x7f) { \
  206. tmp += 0x100; \
  207. } \
  208. d.VIS_L64(r) = tmp;
  209. /* Reverse calculation order to handle overlap */
  210. PMUL(1);
  211. PMUL(0);
  212. #undef PMUL
  213. return d.ll;
  214. }
  215. uint64_t helper_fexpand(uint64_t src1, uint64_t src2)
  216. {
  217. VIS32 s;
  218. VIS64 d;
  219. s.l = (uint32_t)src1;
  220. d.ll = src2;
  221. d.VIS_W64(0) = s.VIS_B32(0) << 4;
  222. d.VIS_W64(1) = s.VIS_B32(1) << 4;
  223. d.VIS_W64(2) = s.VIS_B32(2) << 4;
  224. d.VIS_W64(3) = s.VIS_B32(3) << 4;
  225. return d.ll;
  226. }
  227. #define VIS_HELPER(name, F) \
  228. uint64_t name##16(uint64_t src1, uint64_t src2) \
  229. { \
  230. VIS64 s, d; \
  231. \
  232. s.ll = src1; \
  233. d.ll = src2; \
  234. \
  235. d.VIS_W64(0) = F(d.VIS_W64(0), s.VIS_W64(0)); \
  236. d.VIS_W64(1) = F(d.VIS_W64(1), s.VIS_W64(1)); \
  237. d.VIS_W64(2) = F(d.VIS_W64(2), s.VIS_W64(2)); \
  238. d.VIS_W64(3) = F(d.VIS_W64(3), s.VIS_W64(3)); \
  239. \
  240. return d.ll; \
  241. } \
  242. \
  243. uint32_t name##16s(uint32_t src1, uint32_t src2) \
  244. { \
  245. VIS32 s, d; \
  246. \
  247. s.l = src1; \
  248. d.l = src2; \
  249. \
  250. d.VIS_W32(0) = F(d.VIS_W32(0), s.VIS_W32(0)); \
  251. d.VIS_W32(1) = F(d.VIS_W32(1), s.VIS_W32(1)); \
  252. \
  253. return d.l; \
  254. } \
  255. \
  256. uint64_t name##32(uint64_t src1, uint64_t src2) \
  257. { \
  258. VIS64 s, d; \
  259. \
  260. s.ll = src1; \
  261. d.ll = src2; \
  262. \
  263. d.VIS_L64(0) = F(d.VIS_L64(0), s.VIS_L64(0)); \
  264. d.VIS_L64(1) = F(d.VIS_L64(1), s.VIS_L64(1)); \
  265. \
  266. return d.ll; \
  267. } \
  268. \
  269. uint32_t name##32s(uint32_t src1, uint32_t src2) \
  270. { \
  271. VIS32 s, d; \
  272. \
  273. s.l = src1; \
  274. d.l = src2; \
  275. \
  276. d.l = F(d.l, s.l); \
  277. \
  278. return d.l; \
  279. }
  280. #define FADD(a, b) ((a) + (b))
  281. #define FSUB(a, b) ((a) - (b))
  282. VIS_HELPER(helper_fpadd, FADD)
  283. VIS_HELPER(helper_fpsub, FSUB)
  284. #define VIS_CMPHELPER(name, F) \
  285. uint64_t name##16(uint64_t src1, uint64_t src2) \
  286. { \
  287. VIS64 s, d; \
  288. \
  289. s.ll = src1; \
  290. d.ll = src2; \
  291. \
  292. d.VIS_W64(0) = F(s.VIS_W64(0), d.VIS_W64(0)) ? 1 : 0; \
  293. d.VIS_W64(0) |= F(s.VIS_W64(1), d.VIS_W64(1)) ? 2 : 0; \
  294. d.VIS_W64(0) |= F(s.VIS_W64(2), d.VIS_W64(2)) ? 4 : 0; \
  295. d.VIS_W64(0) |= F(s.VIS_W64(3), d.VIS_W64(3)) ? 8 : 0; \
  296. d.VIS_W64(1) = d.VIS_W64(2) = d.VIS_W64(3) = 0; \
  297. \
  298. return d.ll; \
  299. } \
  300. \
  301. uint64_t name##32(uint64_t src1, uint64_t src2) \
  302. { \
  303. VIS64 s, d; \
  304. \
  305. s.ll = src1; \
  306. d.ll = src2; \
  307. \
  308. d.VIS_L64(0) = F(s.VIS_L64(0), d.VIS_L64(0)) ? 1 : 0; \
  309. d.VIS_L64(0) |= F(s.VIS_L64(1), d.VIS_L64(1)) ? 2 : 0; \
  310. d.VIS_L64(1) = 0; \
  311. \
  312. return d.ll; \
  313. }
  314. #define FCMPGT(a, b) ((a) > (b))
  315. #define FCMPEQ(a, b) ((a) == (b))
  316. #define FCMPLE(a, b) ((a) <= (b))
  317. #define FCMPNE(a, b) ((a) != (b))
  318. VIS_CMPHELPER(helper_fcmpgt, FCMPGT)
  319. VIS_CMPHELPER(helper_fcmpeq, FCMPEQ)
  320. VIS_CMPHELPER(helper_fcmple, FCMPLE)
  321. VIS_CMPHELPER(helper_fcmpne, FCMPNE)
  322. uint64_t helper_pdist(uint64_t sum, uint64_t src1, uint64_t src2)
  323. {
  324. int i;
  325. for (i = 0; i < 8; i++) {
  326. int s1, s2;
  327. s1 = (src1 >> (56 - (i * 8))) & 0xff;
  328. s2 = (src2 >> (56 - (i * 8))) & 0xff;
  329. /* Absolute value of difference. */
  330. s1 -= s2;
  331. if (s1 < 0) {
  332. s1 = -s1;
  333. }
  334. sum += s1;
  335. }
  336. return sum;
  337. }
  338. uint32_t helper_fpack16(uint64_t gsr, uint64_t rs2)
  339. {
  340. int scale = (gsr >> 3) & 0xf;
  341. uint32_t ret = 0;
  342. int byte;
  343. for (byte = 0; byte < 4; byte++) {
  344. uint32_t val;
  345. int16_t src = rs2 >> (byte * 16);
  346. int32_t scaled = src << scale;
  347. int32_t from_fixed = scaled >> 7;
  348. val = (from_fixed < 0 ? 0 :
  349. from_fixed > 255 ? 255 : from_fixed);
  350. ret |= val << (8 * byte);
  351. }
  352. return ret;
  353. }
  354. uint64_t helper_fpack32(uint64_t gsr, uint64_t rs1, uint64_t rs2)
  355. {
  356. int scale = (gsr >> 3) & 0x1f;
  357. uint64_t ret = 0;
  358. int word;
  359. ret = (rs1 << 8) & ~(0x000000ff000000ffULL);
  360. for (word = 0; word < 2; word++) {
  361. uint64_t val;
  362. int32_t src = rs2 >> (word * 32);
  363. int64_t scaled = (int64_t)src << scale;
  364. int64_t from_fixed = scaled >> 23;
  365. val = (from_fixed < 0 ? 0 :
  366. (from_fixed > 255) ? 255 : from_fixed);
  367. ret |= val << (32 * word);
  368. }
  369. return ret;
  370. }
  371. uint32_t helper_fpackfix(uint64_t gsr, uint64_t rs2)
  372. {
  373. int scale = (gsr >> 3) & 0x1f;
  374. uint32_t ret = 0;
  375. int word;
  376. for (word = 0; word < 2; word++) {
  377. uint32_t val;
  378. int32_t src = rs2 >> (word * 32);
  379. int64_t scaled = src << scale;
  380. int64_t from_fixed = scaled >> 16;
  381. val = (from_fixed < -32768 ? -32768 :
  382. from_fixed > 32767 ? 32767 : from_fixed);
  383. ret |= (val & 0xffff) << (word * 16);
  384. }
  385. return ret;
  386. }
  387. uint64_t helper_bshuffle(uint64_t gsr, uint64_t src1, uint64_t src2)
  388. {
  389. union {
  390. uint64_t ll[2];
  391. uint8_t b[16];
  392. } s;
  393. VIS64 r;
  394. uint32_t i, mask, host;
  395. /* Set up S such that we can index across all of the bytes. */
  396. #ifdef HOST_WORDS_BIGENDIAN
  397. s.ll[0] = src1;
  398. s.ll[1] = src2;
  399. host = 0;
  400. #else
  401. s.ll[1] = src1;
  402. s.ll[0] = src2;
  403. host = 15;
  404. #endif
  405. mask = gsr >> 32;
  406. for (i = 0; i < 8; ++i) {
  407. unsigned e = (mask >> (28 - i*4)) & 0xf;
  408. r.VIS_B64(i) = s.b[e ^ host];
  409. }
  410. return r.ll;
  411. }