cpu-common.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. #ifndef CPU_COMMON_H
  2. #define CPU_COMMON_H 1
  3. /* CPU interfaces that are target indpendent. */
  4. #ifdef TARGET_PHYS_ADDR_BITS
  5. #include "targphys.h"
  6. #endif
  7. #ifndef NEED_CPU_H
  8. #include "poison.h"
  9. #endif
  10. #include "bswap.h"
  11. #include "qemu-queue.h"
  12. #if !defined(CONFIG_USER_ONLY)
  13. enum device_endian {
  14. DEVICE_NATIVE_ENDIAN,
  15. DEVICE_BIG_ENDIAN,
  16. DEVICE_LITTLE_ENDIAN,
  17. };
  18. /* address in the RAM (different from a physical address) */
  19. #if defined(CONFIG_XEN_BACKEND) && TARGET_PHYS_ADDR_BITS == 64
  20. typedef uint64_t ram_addr_t;
  21. # define RAM_ADDR_MAX UINT64_MAX
  22. # define RAM_ADDR_FMT "%" PRIx64
  23. #else
  24. typedef unsigned long ram_addr_t;
  25. # define RAM_ADDR_MAX ULONG_MAX
  26. # define RAM_ADDR_FMT "%lx"
  27. #endif
  28. /* memory API */
  29. typedef void CPUWriteMemoryFunc(void *opaque, target_phys_addr_t addr, uint32_t value);
  30. typedef uint32_t CPUReadMemoryFunc(void *opaque, target_phys_addr_t addr);
  31. void cpu_register_physical_memory_log(target_phys_addr_t start_addr,
  32. ram_addr_t size,
  33. ram_addr_t phys_offset,
  34. ram_addr_t region_offset,
  35. bool log_dirty);
  36. static inline void cpu_register_physical_memory_offset(target_phys_addr_t start_addr,
  37. ram_addr_t size,
  38. ram_addr_t phys_offset,
  39. ram_addr_t region_offset)
  40. {
  41. cpu_register_physical_memory_log(start_addr, size, phys_offset,
  42. region_offset, false);
  43. }
  44. static inline void cpu_register_physical_memory(target_phys_addr_t start_addr,
  45. ram_addr_t size,
  46. ram_addr_t phys_offset)
  47. {
  48. cpu_register_physical_memory_offset(start_addr, size, phys_offset, 0);
  49. }
  50. ram_addr_t cpu_get_physical_page_desc(target_phys_addr_t addr);
  51. ram_addr_t qemu_ram_alloc_from_ptr(DeviceState *dev, const char *name,
  52. ram_addr_t size, void *host);
  53. ram_addr_t qemu_ram_alloc(DeviceState *dev, const char *name, ram_addr_t size);
  54. void qemu_ram_free(ram_addr_t addr);
  55. void qemu_ram_free_from_ptr(ram_addr_t addr);
  56. void qemu_ram_remap(ram_addr_t addr, ram_addr_t length);
  57. /* This should only be used for ram local to a device. */
  58. void *qemu_get_ram_ptr(ram_addr_t addr);
  59. void *qemu_ram_ptr_length(ram_addr_t addr, ram_addr_t *size);
  60. /* Same but slower, to use for migration, where the order of
  61. * RAMBlocks must not change. */
  62. void *qemu_safe_ram_ptr(ram_addr_t addr);
  63. void qemu_put_ram_ptr(void *addr);
  64. /* This should not be used by devices. */
  65. int qemu_ram_addr_from_host(void *ptr, ram_addr_t *ram_addr);
  66. ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr);
  67. int cpu_register_io_memory(CPUReadMemoryFunc * const *mem_read,
  68. CPUWriteMemoryFunc * const *mem_write,
  69. void *opaque, enum device_endian endian);
  70. void cpu_unregister_io_memory(int table_address);
  71. void cpu_physical_memory_rw(target_phys_addr_t addr, uint8_t *buf,
  72. int len, int is_write);
  73. static inline void cpu_physical_memory_read(target_phys_addr_t addr,
  74. void *buf, int len)
  75. {
  76. cpu_physical_memory_rw(addr, buf, len, 0);
  77. }
  78. static inline void cpu_physical_memory_write(target_phys_addr_t addr,
  79. const void *buf, int len)
  80. {
  81. cpu_physical_memory_rw(addr, (void *)buf, len, 1);
  82. }
  83. void *cpu_physical_memory_map(target_phys_addr_t addr,
  84. target_phys_addr_t *plen,
  85. int is_write);
  86. void cpu_physical_memory_unmap(void *buffer, target_phys_addr_t len,
  87. int is_write, target_phys_addr_t access_len);
  88. void *cpu_register_map_client(void *opaque, void (*callback)(void *opaque));
  89. void cpu_unregister_map_client(void *cookie);
  90. struct CPUPhysMemoryClient;
  91. typedef struct CPUPhysMemoryClient CPUPhysMemoryClient;
  92. struct CPUPhysMemoryClient {
  93. void (*set_memory)(struct CPUPhysMemoryClient *client,
  94. target_phys_addr_t start_addr,
  95. ram_addr_t size,
  96. ram_addr_t phys_offset,
  97. bool log_dirty);
  98. int (*sync_dirty_bitmap)(struct CPUPhysMemoryClient *client,
  99. target_phys_addr_t start_addr,
  100. target_phys_addr_t end_addr);
  101. int (*migration_log)(struct CPUPhysMemoryClient *client,
  102. int enable);
  103. int (*log_start)(struct CPUPhysMemoryClient *client,
  104. target_phys_addr_t phys_addr, ram_addr_t size);
  105. int (*log_stop)(struct CPUPhysMemoryClient *client,
  106. target_phys_addr_t phys_addr, ram_addr_t size);
  107. QLIST_ENTRY(CPUPhysMemoryClient) list;
  108. };
  109. void cpu_register_phys_memory_client(CPUPhysMemoryClient *);
  110. void cpu_unregister_phys_memory_client(CPUPhysMemoryClient *);
  111. /* Coalesced MMIO regions are areas where write operations can be reordered.
  112. * This usually implies that write operations are side-effect free. This allows
  113. * batching which can make a major impact on performance when using
  114. * virtualization.
  115. */
  116. void qemu_register_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
  117. void qemu_unregister_coalesced_mmio(target_phys_addr_t addr, ram_addr_t size);
  118. void qemu_flush_coalesced_mmio_buffer(void);
  119. uint32_t ldub_phys(target_phys_addr_t addr);
  120. uint32_t lduw_le_phys(target_phys_addr_t addr);
  121. uint32_t lduw_be_phys(target_phys_addr_t addr);
  122. uint32_t ldl_le_phys(target_phys_addr_t addr);
  123. uint32_t ldl_be_phys(target_phys_addr_t addr);
  124. uint64_t ldq_le_phys(target_phys_addr_t addr);
  125. uint64_t ldq_be_phys(target_phys_addr_t addr);
  126. void stb_phys(target_phys_addr_t addr, uint32_t val);
  127. void stw_le_phys(target_phys_addr_t addr, uint32_t val);
  128. void stw_be_phys(target_phys_addr_t addr, uint32_t val);
  129. void stl_le_phys(target_phys_addr_t addr, uint32_t val);
  130. void stl_be_phys(target_phys_addr_t addr, uint32_t val);
  131. void stq_le_phys(target_phys_addr_t addr, uint64_t val);
  132. void stq_be_phys(target_phys_addr_t addr, uint64_t val);
  133. #ifdef NEED_CPU_H
  134. uint32_t lduw_phys(target_phys_addr_t addr);
  135. uint32_t ldl_phys(target_phys_addr_t addr);
  136. uint64_t ldq_phys(target_phys_addr_t addr);
  137. void stl_phys_notdirty(target_phys_addr_t addr, uint32_t val);
  138. void stq_phys_notdirty(target_phys_addr_t addr, uint64_t val);
  139. void stw_phys(target_phys_addr_t addr, uint32_t val);
  140. void stl_phys(target_phys_addr_t addr, uint32_t val);
  141. void stq_phys(target_phys_addr_t addr, uint64_t val);
  142. #endif
  143. void cpu_physical_memory_write_rom(target_phys_addr_t addr,
  144. const uint8_t *buf, int len);
  145. #define IO_MEM_SHIFT 3
  146. #define IO_MEM_RAM (0 << IO_MEM_SHIFT) /* hardcoded offset */
  147. #define IO_MEM_ROM (1 << IO_MEM_SHIFT) /* hardcoded offset */
  148. #define IO_MEM_UNASSIGNED (2 << IO_MEM_SHIFT)
  149. #define IO_MEM_NOTDIRTY (3 << IO_MEM_SHIFT)
  150. #define IO_MEM_SUBPAGE_RAM (4 << IO_MEM_SHIFT)
  151. /* Acts like a ROM when read and like a device when written. */
  152. #define IO_MEM_ROMD (1)
  153. #define IO_MEM_SUBPAGE (2)
  154. #endif
  155. #endif /* !CPU_COMMON_H */