vmware_utils.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /*
  2. * QEMU VMWARE paravirtual devices - auxiliary code
  3. *
  4. * Copyright (c) 2012 Ravello Systems LTD (http://ravellosystems.com)
  5. *
  6. * Developed by Daynix Computing LTD (http://www.daynix.com)
  7. *
  8. * Authors:
  9. * Dmitry Fleytman <dmitry@daynix.com>
  10. * Yan Vugenfirer <yan@daynix.com>
  11. *
  12. * This work is licensed under the terms of the GNU GPL, version 2 or later.
  13. * See the COPYING file in the top-level directory.
  14. *
  15. */
  16. #ifndef VMWARE_UTILS_H
  17. #define VMWARE_UTILS_H
  18. #include "qemu/range.h"
  19. #ifndef VMW_SHPRN
  20. #define VMW_SHPRN(fmt, ...) do {} while (0)
  21. #endif
  22. /*
  23. * Shared memory access functions with byte swap support
  24. * Each function contains printout for reverse-engineering needs
  25. *
  26. */
  27. static inline void
  28. vmw_shmem_read(hwaddr addr, void *buf, int len)
  29. {
  30. VMW_SHPRN("SHMEM r: %" PRIx64 ", len: %d to %p", addr, len, buf);
  31. cpu_physical_memory_read(addr, buf, len);
  32. }
  33. static inline void
  34. vmw_shmem_write(hwaddr addr, void *buf, int len)
  35. {
  36. VMW_SHPRN("SHMEM w: %" PRIx64 ", len: %d to %p", addr, len, buf);
  37. cpu_physical_memory_write(addr, buf, len);
  38. }
  39. static inline void
  40. vmw_shmem_rw(hwaddr addr, void *buf, int len, int is_write)
  41. {
  42. VMW_SHPRN("SHMEM r/w: %" PRIx64 ", len: %d (to %p), is write: %d",
  43. addr, len, buf, is_write);
  44. cpu_physical_memory_rw(addr, buf, len, is_write);
  45. }
  46. static inline void
  47. vmw_shmem_set(hwaddr addr, uint8 val, int len)
  48. {
  49. int i;
  50. VMW_SHPRN("SHMEM set: %" PRIx64 ", len: %d (value 0x%X)", addr, len, val);
  51. for (i = 0; i < len; i++) {
  52. cpu_physical_memory_write(addr + i, &val, 1);
  53. }
  54. }
  55. static inline uint32_t
  56. vmw_shmem_ld8(hwaddr addr)
  57. {
  58. uint8_t res = ldub_phys(&address_space_memory, addr);
  59. VMW_SHPRN("SHMEM load8: %" PRIx64 " (value 0x%X)", addr, res);
  60. return res;
  61. }
  62. static inline void
  63. vmw_shmem_st8(hwaddr addr, uint8_t value)
  64. {
  65. VMW_SHPRN("SHMEM store8: %" PRIx64 " (value 0x%X)", addr, value);
  66. stb_phys(&address_space_memory, addr, value);
  67. }
  68. static inline uint32_t
  69. vmw_shmem_ld16(hwaddr addr)
  70. {
  71. uint16_t res = lduw_le_phys(&address_space_memory, addr);
  72. VMW_SHPRN("SHMEM load16: %" PRIx64 " (value 0x%X)", addr, res);
  73. return res;
  74. }
  75. static inline void
  76. vmw_shmem_st16(hwaddr addr, uint16_t value)
  77. {
  78. VMW_SHPRN("SHMEM store16: %" PRIx64 " (value 0x%X)", addr, value);
  79. stw_le_phys(&address_space_memory, addr, value);
  80. }
  81. static inline uint32_t
  82. vmw_shmem_ld32(hwaddr addr)
  83. {
  84. uint32_t res = ldl_le_phys(&address_space_memory, addr);
  85. VMW_SHPRN("SHMEM load32: %" PRIx64 " (value 0x%X)", addr, res);
  86. return res;
  87. }
  88. static inline void
  89. vmw_shmem_st32(hwaddr addr, uint32_t value)
  90. {
  91. VMW_SHPRN("SHMEM store32: %" PRIx64 " (value 0x%X)", addr, value);
  92. stl_le_phys(&address_space_memory, addr, value);
  93. }
  94. static inline uint64_t
  95. vmw_shmem_ld64(hwaddr addr)
  96. {
  97. uint64_t res = ldq_le_phys(&address_space_memory, addr);
  98. VMW_SHPRN("SHMEM load64: %" PRIx64 " (value %" PRIx64 ")", addr, res);
  99. return res;
  100. }
  101. static inline void
  102. vmw_shmem_st64(hwaddr addr, uint64_t value)
  103. {
  104. VMW_SHPRN("SHMEM store64: %" PRIx64 " (value %" PRIx64 ")", addr, value);
  105. stq_le_phys(&address_space_memory, addr, value);
  106. }
  107. /* Macros for simplification of operations on array-style registers */
  108. /*
  109. * Whether <addr> lies inside of array-style register defined by <base>,
  110. * number of elements (<cnt>) and element size (<regsize>)
  111. *
  112. */
  113. #define VMW_IS_MULTIREG_ADDR(addr, base, cnt, regsize) \
  114. range_covers_byte(base, cnt * regsize, addr)
  115. /*
  116. * Returns index of given register (<addr>) in array-style register defined by
  117. * <base> and element size (<regsize>)
  118. *
  119. */
  120. #define VMW_MULTIREG_IDX_BY_ADDR(addr, base, regsize) \
  121. (((addr) - (base)) / (regsize))
  122. #endif