cpu-common.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178
  1. #ifndef CPU_COMMON_H
  2. #define CPU_COMMON_H 1
  3. /* CPU interfaces that are target indpendent. */
  4. #if defined(__arm__) || defined(__sparc__) || defined(__mips__) || defined(__hppa__) || defined(__ia64__)
  5. #define WORDS_ALIGNED
  6. #endif
  7. #ifdef TARGET_PHYS_ADDR_BITS
  8. #include "targphys.h"
  9. #endif
  10. #ifndef NEED_CPU_H
  11. #include "poison.h"
  12. #endif
  13. #include "bswap.h"
  14. #include "qemu-queue.h"
  15. #if !defined(CONFIG_USER_ONLY)
  16. enum device_endian {
  17. DEVICE_NATIVE_ENDIAN,
  18. DEVICE_BIG_ENDIAN,
  19. DEVICE_LITTLE_ENDIAN,
  20. };
  21. /* address in the RAM (different from a physical address) */
  22. typedef unsigned long ram_addr_t;
  23. /* memory API */
  24. typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
  25. typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
  26. void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
  27. ram_addr_t size,
  28. ram_addr_t phys_offset,
  29. ram_addr_t region_offset,
  30. bool log_dirty);
  31. static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
  32. ram_addr_t size,
  33. ram_addr_t phys_offset,
  34. ram_addr_t region_offset)
  35. {
  36. cpu_register_physical_memory_log(start_addr, size, phys_offset,
  37. region_offset, false);
  38. }
  39. static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
  40. ram_addr_t size,
  41. ram_addr_t phys_offset)
  42. {
  43. cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
  44. }
  45. ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
  46. ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
  47. ram_addr_t size, void *host);
  48. ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
  49. void qemu_ram_free(ram_addr_t addr);
  50. void qemu_ram_free_from_ptr(ram_addr_t addr);
  51. void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
  52. /* This should only be used for ram local to a device. */
  53. void *qemu_get_ram_ptr(ram_addr_t addr);
  54. void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size);
  55. /* Same but slower, to use for migration, where the order of
  56. * RAMBlocks must not change. */
  57. void *qemu_safe_ram_ptr(ram_addr_t addr);
  58. void qemu_put_ram_ptr(void *addr);
  59. /* This should not be used by devices. */
  60. int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
  61. ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
  62. int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
  63. CPUWriteMemoryFunc * const *mem_write,
  64. void *opaque, enum device_endian endian);
  65. void cpu_unregister_io_memory(int table_address);
  66. void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
  67. int len, int is_write);
  68. static inline void cpu_physical_memory_read(target_phys_addr_t addr,
  69. void *buf, int len)
  70. {
  71. cpu_physical_memory_rw(addr, buf, len, 0);
  72. }
  73. static inline void cpu_physical_memory_write(target_phys_addr_t addr,
  74. const void *buf, int len)
  75. {
  76. cpu_physical_memory_rw(addr, (void *)buf, len, 1);
  77. }
  78. void *cpu_physical_memory_map(target_phys_addr_t addr,
  79. target_phys_addr_t *plen,
  80. int is_write);
  81. void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
  82. int is_write, target_phys_addr_t access_len);
  83. void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
  84. void cpu_unregister_map_client(void *cookie);
  85. struct CPUPhysMemoryClient;
  86. typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
  87. struct CPUPhysMemoryClient {
  88. void (*set_memory)(struct CPUPhysMemoryClient *client,
  89. target_phys_addr_t start_addr,
  90. ram_addr_t size,
  91. ram_addr_t phys_offset,
  92. bool log_dirty);
  93. int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
  94. target_phys_addr_t start_addr,
  95. target_phys_addr_t end_addr);
  96. int (*migration_log)(struct CPUPhysMemoryClient *client,
  97. int enable);
  98. int (*log_start)(struct CPUPhysMemoryClient *client,
  99. target_phys_addr_t phys_addr, ram_addr_t size);
  100. int (*log_stop)(struct CPUPhysMemoryClient *client,
  101. target_phys_addr_t phys_addr, ram_addr_t size);
  102. QLIST_ENTRY(CPUPhysMemoryClient) list;
  103. };
  104. void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
  105. void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
  106. /* Coalesced MMIO regions are areas where write operations can be reordered.
  107. * This usually implies that write operations are side-effect free. This allows
  108. * batching which can make a major impact on performance when using
  109. * virtualization.
  110. */
  111. void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
  112. void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
  113. void qemu_flush_coalesced_mmio_buffer(void);
  114. uint32_t ldub_phys(target_phys_addr_t addr);
  115. uint32_t lduw_le_phys(target_phys_addr_t addr);
  116. uint32_t lduw_be_phys(target_phys_addr_t addr);
  117. uint32_t ldl_le_phys(target_phys_addr_t addr);
  118. uint32_t ldl_be_phys(target_phys_addr_t addr);
  119. uint64_t ldq_le_phys(target_phys_addr_t addr);
  120. uint64_t ldq_be_phys(target_phys_addr_t addr);
  121. void stb_phys(target_phys_addr_t addr, uint32_t val);
  122. void stw_le_phys(target_phys_addr_t addr, uint32_t val);
  123. void stw_be_phys(target_phys_addr_t addr, uint32_t val);
  124. void stl_le_phys(target_phys_addr_t addr, uint32_t val);
  125. void stl_be_phys(target_phys_addr_t addr, uint32_t val);
  126. void stq_le_phys(target_phys_addr_t addr, uint64_t val);
  127. void stq_be_phys(target_phys_addr_t addr, uint64_t val);
  128. #ifdef NEED_CPU_H
  129. uint32_t lduw_phys(target_phys_addr_t addr);
  130. uint32_t ldl_phys(target_phys_addr_t addr);
  131. uint64_t ldq_phys(target_phys_addr_t addr);
  132. void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
  133. void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
  134. void stw_phys(target_phys_addr_t addr, uint32_t val);
  135. void stl_phys(target_phys_addr_t addr, uint32_t val);
  136. void stq_phys(target_phys_addr_t addr, uint64_t val);
  137. #endif
  138. void cpu_physical_memory_write_rom(target_phys_addr_t addr,
  139. const uint8_t *buf, int len);
  140. #define IO_MEM_SHIFT 3
  141. #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
  142. #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
  143. #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
  144. #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
  145. /* Acts like a ROM when read and like a device when written. */
  146. #define IO_MEM_ROMD (1)
  147. #define IO_MEM_SUBPAGE (2)
  148. #endif
  149. #endif /* !CPU_COMMON_H */