nvme.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726
  1. /*
  2. * QEMU NVM Express
  3. *
  4. * Copyright (c) 2012 Intel Corporation
  5. * Copyright (c) 2021 Minwoo Im
  6. * Copyright (c) 2021 Samsung Electronics Co., Ltd.
  7. *
  8. * Authors:
  9. * Keith Busch <kbusch@kernel.org>
  10. * Klaus Jensen <k.jensen@samsung.com>
  11. * Gollu Appalanaidu <anaidu.gollu@samsung.com>
  12. * Dmitry Fomichev <dmitry.fomichev@wdc.com>
  13. * Minwoo Im <minwoo.im.dev@gmail.com>
  14. *
  15. * This code is licensed under the GNU GPL v2 or later.
  16. */
  17. #ifndef HW_NVME_NVME_H
  18. #define HW_NVME_NVME_H
  19. #include "qemu/uuid.h"
  20. #include "hw/pci/pci_device.h"
  21. #include "hw/block/block.h"
  22. #include "block/nvme.h"
  23. #define NVME_MAX_CONTROLLERS 256
  24. #define NVME_MAX_NAMESPACES 256
  25. #define NVME_EUI64_DEFAULT ((uint64_t)0x5254000000000000)
  26. #define NVME_FDP_MAX_EVENTS 63
  27. #define NVME_FDP_MAXPIDS 128
  28. /*
  29. * The controller only supports Submission and Completion Queue Entry Sizes of
  30. * 64 and 16 bytes respectively.
  31. */
  32. #define NVME_SQES 6
  33. #define NVME_CQES 4
  34. QEMU_BUILD_BUG_ON(NVME_MAX_NAMESPACES > NVME_NSID_BROADCAST - 1);
  35. typedef struct NvmeCtrl NvmeCtrl;
  36. typedef struct NvmeNamespace NvmeNamespace;
  37. #define TYPE_NVME_BUS "nvme-bus"
  38. OBJECT_DECLARE_SIMPLE_TYPE(NvmeBus, NVME_BUS)
  39. typedef struct NvmeBus {
  40. BusState parent_bus;
  41. } NvmeBus;
  42. #define TYPE_NVME_SUBSYS "nvme-subsys"
  43. #define NVME_SUBSYS(obj) \
  44. OBJECT_CHECK(NvmeSubsystem, (obj), TYPE_NVME_SUBSYS)
  45. #define SUBSYS_SLOT_RSVD (void *)0xFFFF
  46. typedef struct NvmeReclaimUnit {
  47. uint64_t ruamw;
  48. } NvmeReclaimUnit;
  49. typedef struct NvmeRuHandle {
  50. uint8_t ruht;
  51. uint8_t ruha;
  52. uint64_t event_filter;
  53. uint8_t lbafi;
  54. uint64_t ruamw;
  55. /* reclaim units indexed by reclaim group */
  56. NvmeReclaimUnit *rus;
  57. } NvmeRuHandle;
  58. typedef struct NvmeFdpEventBuffer {
  59. NvmeFdpEvent events[NVME_FDP_MAX_EVENTS];
  60. unsigned int nelems;
  61. unsigned int start;
  62. unsigned int next;
  63. } NvmeFdpEventBuffer;
  64. typedef struct NvmeEnduranceGroup {
  65. uint8_t event_conf;
  66. struct {
  67. NvmeFdpEventBuffer host_events, ctrl_events;
  68. uint16_t nruh;
  69. uint16_t nrg;
  70. uint8_t rgif;
  71. uint64_t runs;
  72. uint64_t hbmw;
  73. uint64_t mbmw;
  74. uint64_t mbe;
  75. bool enabled;
  76. NvmeRuHandle *ruhs;
  77. } fdp;
  78. } NvmeEnduranceGroup;
  79. typedef struct NvmeSubsystem {
  80. DeviceState parent_obj;
  81. NvmeBus bus;
  82. uint8_t subnqn[256];
  83. char *serial;
  84. NvmeCtrl *ctrls[NVME_MAX_CONTROLLERS];
  85. NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1];
  86. NvmeEnduranceGroup endgrp;
  87. struct {
  88. char *nqn;
  89. struct {
  90. bool enabled;
  91. uint64_t runs;
  92. uint16_t nruh;
  93. uint32_t nrg;
  94. } fdp;
  95. } params;
  96. } NvmeSubsystem;
  97. int nvme_subsys_register_ctrl(NvmeCtrl *n, Error **errp);
  98. void nvme_subsys_unregister_ctrl(NvmeSubsystem *subsys, NvmeCtrl *n);
  99. static inline NvmeCtrl *nvme_subsys_ctrl(NvmeSubsystem *subsys,
  100. uint32_t cntlid)
  101. {
  102. if (!subsys || cntlid >= NVME_MAX_CONTROLLERS) {
  103. return NULL;
  104. }
  105. if (subsys->ctrls[cntlid] == SUBSYS_SLOT_RSVD) {
  106. return NULL;
  107. }
  108. return subsys->ctrls[cntlid];
  109. }
  110. static inline NvmeNamespace *nvme_subsys_ns(NvmeSubsystem *subsys,
  111. uint32_t nsid)
  112. {
  113. if (!subsys || !nsid || nsid > NVME_MAX_NAMESPACES) {
  114. return NULL;
  115. }
  116. return subsys->namespaces[nsid];
  117. }
  118. #define TYPE_NVME_NS "nvme-ns"
  119. #define NVME_NS(obj) \
  120. OBJECT_CHECK(NvmeNamespace, (obj), TYPE_NVME_NS)
  121. typedef struct NvmeZone {
  122. NvmeZoneDescr d;
  123. uint64_t w_ptr;
  124. QTAILQ_ENTRY(NvmeZone) entry;
  125. } NvmeZone;
  126. #define FDP_EVT_MAX 0xff
  127. #define NVME_FDP_MAX_NS_RUHS 32u
  128. #define FDPVSS 0
  129. static const uint8_t nvme_fdp_evf_shifts[FDP_EVT_MAX] = {
  130. /* Host events */
  131. [FDP_EVT_RU_NOT_FULLY_WRITTEN] = 0,
  132. [FDP_EVT_RU_ATL_EXCEEDED] = 1,
  133. [FDP_EVT_CTRL_RESET_RUH] = 2,
  134. [FDP_EVT_INVALID_PID] = 3,
  135. /* CTRL events */
  136. [FDP_EVT_MEDIA_REALLOC] = 32,
  137. [FDP_EVT_RUH_IMPLICIT_RU_CHANGE] = 33,
  138. };
  139. #define NGUID_LEN 16
  140. typedef struct {
  141. uint8_t data[NGUID_LEN];
  142. } NvmeNGUID;
  143. bool nvme_nguid_is_null(const NvmeNGUID *nguid);
  144. extern const PropertyInfo qdev_prop_nguid;
  145. #define DEFINE_PROP_NGUID_NODEFAULT(_name, _state, _field) \
  146. DEFINE_PROP(_name, _state, _field, qdev_prop_nguid, NvmeNGUID)
  147. typedef struct NvmeNamespaceParams {
  148. bool detached;
  149. bool shared;
  150. uint32_t nsid;
  151. QemuUUID uuid;
  152. NvmeNGUID nguid;
  153. uint64_t eui64;
  154. bool eui64_default;
  155. uint16_t ms;
  156. uint8_t mset;
  157. uint8_t pi;
  158. uint8_t pil;
  159. uint8_t pif;
  160. uint16_t mssrl;
  161. uint32_t mcl;
  162. uint8_t msrc;
  163. bool zoned;
  164. bool cross_zone_read;
  165. uint64_t zone_size_bs;
  166. uint64_t zone_cap_bs;
  167. uint32_t max_active_zones;
  168. uint32_t max_open_zones;
  169. uint32_t zd_extension_size;
  170. uint32_t numzrwa;
  171. uint64_t zrwas;
  172. uint64_t zrwafg;
  173. struct {
  174. char *ruhs;
  175. } fdp;
  176. } NvmeNamespaceParams;
  177. typedef struct NvmeAtomic {
  178. uint32_t atomic_max_write_size;
  179. bool atomic_writes;
  180. } NvmeAtomic;
  181. typedef struct NvmeNamespace {
  182. DeviceState parent_obj;
  183. BlockConf blkconf;
  184. int32_t bootindex;
  185. int64_t size;
  186. int64_t moff;
  187. NvmeIdNs id_ns;
  188. NvmeIdNsNvm id_ns_nvm;
  189. NvmeIdNsInd id_ns_ind;
  190. NvmeLBAF lbaf;
  191. unsigned int nlbaf;
  192. size_t lbasz;
  193. uint8_t csi;
  194. uint16_t status;
  195. int attached;
  196. uint8_t pif;
  197. struct {
  198. uint16_t zrwas;
  199. uint16_t zrwafg;
  200. uint32_t numzrwa;
  201. } zns;
  202. QTAILQ_ENTRY(NvmeNamespace) entry;
  203. NvmeIdNsZoned *id_ns_zoned;
  204. NvmeZone *zone_array;
  205. QTAILQ_HEAD(, NvmeZone) exp_open_zones;
  206. QTAILQ_HEAD(, NvmeZone) imp_open_zones;
  207. QTAILQ_HEAD(, NvmeZone) closed_zones;
  208. QTAILQ_HEAD(, NvmeZone) full_zones;
  209. uint32_t num_zones;
  210. uint64_t zone_size;
  211. uint64_t zone_capacity;
  212. uint32_t zone_size_log2;
  213. uint8_t *zd_extensions;
  214. int32_t nr_open_zones;
  215. int32_t nr_active_zones;
  216. NvmeNamespaceParams params;
  217. NvmeSubsystem *subsys;
  218. NvmeEnduranceGroup *endgrp;
  219. /* NULL for shared namespaces; set to specific controller if private */
  220. NvmeCtrl *ctrl;
  221. struct {
  222. uint32_t err_rec;
  223. } features;
  224. struct {
  225. uint16_t nphs;
  226. /* reclaim unit handle identifiers indexed by placement handle */
  227. uint16_t *phs;
  228. } fdp;
  229. } NvmeNamespace;
  230. static inline uint32_t nvme_nsid(NvmeNamespace *ns)
  231. {
  232. if (ns) {
  233. return ns->params.nsid;
  234. }
  235. return 0;
  236. }
  237. static inline size_t nvme_l2b(NvmeNamespace *ns, uint64_t lba)
  238. {
  239. return lba << ns->lbaf.ds;
  240. }
  241. static inline size_t nvme_m2b(NvmeNamespace *ns, uint64_t lba)
  242. {
  243. return ns->lbaf.ms * lba;
  244. }
  245. static inline int64_t nvme_moff(NvmeNamespace *ns, uint64_t lba)
  246. {
  247. return ns->moff + nvme_m2b(ns, lba);
  248. }
  249. static inline bool nvme_ns_ext(NvmeNamespace *ns)
  250. {
  251. return !!NVME_ID_NS_FLBAS_EXTENDED(ns->id_ns.flbas);
  252. }
  253. static inline NvmeZoneState nvme_get_zone_state(NvmeZone *zone)
  254. {
  255. return zone->d.zs >> 4;
  256. }
  257. static inline void nvme_set_zone_state(NvmeZone *zone, NvmeZoneState state)
  258. {
  259. zone->d.zs = state << 4;
  260. }
  261. static inline uint64_t nvme_zone_rd_boundary(NvmeNamespace *ns, NvmeZone *zone)
  262. {
  263. return zone->d.zslba + ns->zone_size;
  264. }
  265. static inline uint64_t nvme_zone_wr_boundary(NvmeZone *zone)
  266. {
  267. return zone->d.zslba + zone->d.zcap;
  268. }
  269. static inline bool nvme_wp_is_valid(NvmeZone *zone)
  270. {
  271. uint8_t st = nvme_get_zone_state(zone);
  272. return st != NVME_ZONE_STATE_FULL &&
  273. st != NVME_ZONE_STATE_READ_ONLY &&
  274. st != NVME_ZONE_STATE_OFFLINE;
  275. }
  276. static inline uint8_t *nvme_get_zd_extension(NvmeNamespace *ns,
  277. uint32_t zone_idx)
  278. {
  279. return &ns->zd_extensions[zone_idx * ns->params.zd_extension_size];
  280. }
  281. static inline void nvme_aor_inc_open(NvmeNamespace *ns)
  282. {
  283. assert(ns->nr_open_zones >= 0);
  284. if (ns->params.max_open_zones) {
  285. ns->nr_open_zones++;
  286. assert(ns->nr_open_zones <= ns->params.max_open_zones);
  287. }
  288. }
  289. static inline void nvme_aor_dec_open(NvmeNamespace *ns)
  290. {
  291. if (ns->params.max_open_zones) {
  292. assert(ns->nr_open_zones > 0);
  293. ns->nr_open_zones--;
  294. }
  295. assert(ns->nr_open_zones >= 0);
  296. }
  297. static inline void nvme_aor_inc_active(NvmeNamespace *ns)
  298. {
  299. assert(ns->nr_active_zones >= 0);
  300. if (ns->params.max_active_zones) {
  301. ns->nr_active_zones++;
  302. assert(ns->nr_active_zones <= ns->params.max_active_zones);
  303. }
  304. }
  305. static inline void nvme_aor_dec_active(NvmeNamespace *ns)
  306. {
  307. if (ns->params.max_active_zones) {
  308. assert(ns->nr_active_zones > 0);
  309. ns->nr_active_zones--;
  310. assert(ns->nr_active_zones >= ns->nr_open_zones);
  311. }
  312. assert(ns->nr_active_zones >= 0);
  313. }
  314. static inline void nvme_fdp_stat_inc(uint64_t *a, uint64_t b)
  315. {
  316. uint64_t ret = *a + b;
  317. *a = ret < *a ? UINT64_MAX : ret;
  318. }
  319. void nvme_ns_init_format(NvmeNamespace *ns);
  320. int nvme_ns_setup(NvmeNamespace *ns, Error **errp);
  321. void nvme_ns_drain(NvmeNamespace *ns);
  322. void nvme_ns_shutdown(NvmeNamespace *ns);
  323. void nvme_ns_cleanup(NvmeNamespace *ns);
  324. typedef struct NvmeAsyncEvent {
  325. QTAILQ_ENTRY(NvmeAsyncEvent) entry;
  326. NvmeAerResult result;
  327. } NvmeAsyncEvent;
  328. enum {
  329. NVME_SG_ALLOC = 1 << 0,
  330. NVME_SG_DMA = 1 << 1,
  331. };
  332. typedef struct NvmeSg {
  333. int flags;
  334. union {
  335. QEMUSGList qsg;
  336. QEMUIOVector iov;
  337. };
  338. } NvmeSg;
  339. typedef enum NvmeTxDirection {
  340. NVME_TX_DIRECTION_TO_DEVICE = 0,
  341. NVME_TX_DIRECTION_FROM_DEVICE = 1,
  342. } NvmeTxDirection;
  343. typedef struct NvmeRequest {
  344. struct NvmeSQueue *sq;
  345. struct NvmeNamespace *ns;
  346. BlockAIOCB *aiocb;
  347. uint16_t status;
  348. void *opaque;
  349. NvmeCqe cqe;
  350. NvmeCmd cmd;
  351. BlockAcctCookie acct;
  352. NvmeSg sg;
  353. bool atomic_write;
  354. QTAILQ_ENTRY(NvmeRequest)entry;
  355. } NvmeRequest;
  356. typedef struct NvmeBounceContext {
  357. NvmeRequest *req;
  358. struct {
  359. QEMUIOVector iov;
  360. uint8_t *bounce;
  361. } data, mdata;
  362. } NvmeBounceContext;
  363. static inline const char *nvme_adm_opc_str(uint8_t opc)
  364. {
  365. switch (opc) {
  366. case NVME_ADM_CMD_DELETE_SQ: return "NVME_ADM_CMD_DELETE_SQ";
  367. case NVME_ADM_CMD_CREATE_SQ: return "NVME_ADM_CMD_CREATE_SQ";
  368. case NVME_ADM_CMD_GET_LOG_PAGE: return "NVME_ADM_CMD_GET_LOG_PAGE";
  369. case NVME_ADM_CMD_DELETE_CQ: return "NVME_ADM_CMD_DELETE_CQ";
  370. case NVME_ADM_CMD_CREATE_CQ: return "NVME_ADM_CMD_CREATE_CQ";
  371. case NVME_ADM_CMD_IDENTIFY: return "NVME_ADM_CMD_IDENTIFY";
  372. case NVME_ADM_CMD_ABORT: return "NVME_ADM_CMD_ABORT";
  373. case NVME_ADM_CMD_SET_FEATURES: return "NVME_ADM_CMD_SET_FEATURES";
  374. case NVME_ADM_CMD_GET_FEATURES: return "NVME_ADM_CMD_GET_FEATURES";
  375. case NVME_ADM_CMD_ASYNC_EV_REQ: return "NVME_ADM_CMD_ASYNC_EV_REQ";
  376. case NVME_ADM_CMD_NS_ATTACHMENT: return "NVME_ADM_CMD_NS_ATTACHMENT";
  377. case NVME_ADM_CMD_DIRECTIVE_SEND: return "NVME_ADM_CMD_DIRECTIVE_SEND";
  378. case NVME_ADM_CMD_VIRT_MNGMT: return "NVME_ADM_CMD_VIRT_MNGMT";
  379. case NVME_ADM_CMD_DIRECTIVE_RECV: return "NVME_ADM_CMD_DIRECTIVE_RECV";
  380. case NVME_ADM_CMD_DBBUF_CONFIG: return "NVME_ADM_CMD_DBBUF_CONFIG";
  381. case NVME_ADM_CMD_FORMAT_NVM: return "NVME_ADM_CMD_FORMAT_NVM";
  382. default: return "NVME_ADM_CMD_UNKNOWN";
  383. }
  384. }
  385. static inline const char *nvme_io_opc_str(uint8_t opc)
  386. {
  387. switch (opc) {
  388. case NVME_CMD_FLUSH: return "NVME_NVM_CMD_FLUSH";
  389. case NVME_CMD_WRITE: return "NVME_NVM_CMD_WRITE";
  390. case NVME_CMD_READ: return "NVME_NVM_CMD_READ";
  391. case NVME_CMD_COMPARE: return "NVME_NVM_CMD_COMPARE";
  392. case NVME_CMD_WRITE_ZEROES: return "NVME_NVM_CMD_WRITE_ZEROES";
  393. case NVME_CMD_DSM: return "NVME_NVM_CMD_DSM";
  394. case NVME_CMD_VERIFY: return "NVME_NVM_CMD_VERIFY";
  395. case NVME_CMD_COPY: return "NVME_NVM_CMD_COPY";
  396. case NVME_CMD_ZONE_MGMT_SEND: return "NVME_ZONED_CMD_MGMT_SEND";
  397. case NVME_CMD_ZONE_MGMT_RECV: return "NVME_ZONED_CMD_MGMT_RECV";
  398. case NVME_CMD_ZONE_APPEND: return "NVME_ZONED_CMD_ZONE_APPEND";
  399. default: return "NVME_NVM_CMD_UNKNOWN";
  400. }
  401. }
  402. typedef struct NvmeSQueue {
  403. struct NvmeCtrl *ctrl;
  404. uint16_t sqid;
  405. uint16_t cqid;
  406. uint32_t head;
  407. uint32_t tail;
  408. uint32_t size;
  409. uint64_t dma_addr;
  410. uint64_t db_addr;
  411. uint64_t ei_addr;
  412. QEMUBH *bh;
  413. EventNotifier notifier;
  414. bool ioeventfd_enabled;
  415. NvmeRequest *io_req;
  416. QTAILQ_HEAD(, NvmeRequest) req_list;
  417. QTAILQ_HEAD(, NvmeRequest) out_req_list;
  418. QTAILQ_ENTRY(NvmeSQueue) entry;
  419. } NvmeSQueue;
  420. typedef struct NvmeCQueue {
  421. struct NvmeCtrl *ctrl;
  422. uint8_t phase;
  423. uint16_t cqid;
  424. uint16_t irq_enabled;
  425. uint32_t head;
  426. uint32_t tail;
  427. uint32_t vector;
  428. uint32_t size;
  429. uint64_t dma_addr;
  430. uint64_t db_addr;
  431. uint64_t ei_addr;
  432. QEMUBH *bh;
  433. EventNotifier notifier;
  434. bool ioeventfd_enabled;
  435. QTAILQ_HEAD(, NvmeSQueue) sq_list;
  436. QTAILQ_HEAD(, NvmeRequest) req_list;
  437. } NvmeCQueue;
  438. #define TYPE_NVME "nvme"
  439. #define NVME(obj) \
  440. OBJECT_CHECK(NvmeCtrl, (obj), TYPE_NVME)
  441. typedef struct NvmeParams {
  442. char *serial;
  443. uint32_t num_queues; /* deprecated since 5.1 */
  444. uint32_t max_ioqpairs;
  445. uint16_t msix_qsize;
  446. uint16_t mqes;
  447. uint32_t cmb_size_mb;
  448. uint8_t aerl;
  449. uint32_t aer_max_queued;
  450. uint8_t mdts;
  451. uint8_t vsl;
  452. bool use_intel_id;
  453. uint8_t zasl;
  454. bool auto_transition_zones;
  455. bool legacy_cmb;
  456. bool ioeventfd;
  457. bool dbcs;
  458. uint16_t sriov_max_vfs;
  459. uint16_t sriov_vq_flexible;
  460. uint16_t sriov_vi_flexible;
  461. uint32_t sriov_max_vq_per_vf;
  462. uint32_t sriov_max_vi_per_vf;
  463. bool msix_exclusive_bar;
  464. bool ocp;
  465. struct {
  466. bool mem;
  467. } ctratt;
  468. uint16_t atomic_awun;
  469. uint16_t atomic_awupf;
  470. bool atomic_dn;
  471. } NvmeParams;
  472. typedef struct NvmeCtrl {
  473. PCIDevice parent_obj;
  474. MemoryRegion bar0;
  475. MemoryRegion iomem;
  476. NvmeBar bar;
  477. NvmeParams params;
  478. NvmeBus bus;
  479. uint16_t cntlid;
  480. bool qs_created;
  481. uint32_t page_size;
  482. uint16_t page_bits;
  483. uint16_t max_prp_ents;
  484. uint32_t max_q_ents;
  485. uint8_t outstanding_aers;
  486. uint32_t irq_status;
  487. int cq_pending;
  488. uint64_t host_timestamp; /* Timestamp sent by the host */
  489. uint64_t timestamp_set_qemu_clock_ms; /* QEMU clock time */
  490. uint64_t starttime_ms;
  491. uint16_t temperature;
  492. uint8_t smart_critical_warning;
  493. uint32_t conf_msix_qsize;
  494. uint32_t conf_ioqpairs;
  495. uint64_t dbbuf_dbs;
  496. uint64_t dbbuf_eis;
  497. bool dbbuf_enabled;
  498. struct {
  499. uint32_t acs[256];
  500. struct {
  501. uint32_t nvm[256];
  502. uint32_t zoned[256];
  503. } iocs;
  504. } cse;
  505. struct {
  506. MemoryRegion mem;
  507. uint8_t *buf;
  508. bool cmse;
  509. hwaddr cba;
  510. } cmb;
  511. struct {
  512. HostMemoryBackend *dev;
  513. bool cmse;
  514. hwaddr cba;
  515. } pmr;
  516. uint8_t aer_mask;
  517. NvmeRequest **aer_reqs;
  518. QTAILQ_HEAD(, NvmeAsyncEvent) aer_queue;
  519. int aer_queued;
  520. uint32_t dmrsl;
  521. /* Namespace ID is started with 1 so bitmap should be 1-based */
  522. #define NVME_CHANGED_NSID_SIZE (NVME_MAX_NAMESPACES + 1)
  523. DECLARE_BITMAP(changed_nsids, NVME_CHANGED_NSID_SIZE);
  524. NvmeSubsystem *subsys;
  525. NvmeNamespace namespace;
  526. NvmeNamespace *namespaces[NVME_MAX_NAMESPACES + 1];
  527. NvmeSQueue **sq;
  528. NvmeCQueue **cq;
  529. NvmeSQueue admin_sq;
  530. NvmeCQueue admin_cq;
  531. NvmeIdCtrl id_ctrl;
  532. struct {
  533. struct {
  534. uint16_t temp_thresh_hi;
  535. uint16_t temp_thresh_low;
  536. };
  537. uint32_t async_config;
  538. NvmeHostBehaviorSupport hbs;
  539. } features;
  540. NvmePriCtrlCap pri_ctrl_cap;
  541. uint32_t nr_sec_ctrls;
  542. NvmeSecCtrlEntry *sec_ctrl_list;
  543. struct {
  544. uint16_t vqrfap;
  545. uint16_t virfap;
  546. } next_pri_ctrl_cap; /* These override pri_ctrl_cap after reset */
  547. uint32_t dn; /* Disable Normal */
  548. NvmeAtomic atomic;
  549. } NvmeCtrl;
  550. typedef enum NvmeResetType {
  551. NVME_RESET_FUNCTION = 0,
  552. NVME_RESET_CONTROLLER = 1,
  553. } NvmeResetType;
  554. static inline NvmeNamespace *nvme_ns(NvmeCtrl *n, uint32_t nsid)
  555. {
  556. if (!nsid || nsid > NVME_MAX_NAMESPACES) {
  557. return NULL;
  558. }
  559. return n->namespaces[nsid];
  560. }
  561. static inline NvmeCQueue *nvme_cq(NvmeRequest *req)
  562. {
  563. NvmeSQueue *sq = req->sq;
  564. NvmeCtrl *n = sq->ctrl;
  565. return n->cq[sq->cqid];
  566. }
  567. static inline NvmeCtrl *nvme_ctrl(NvmeRequest *req)
  568. {
  569. NvmeSQueue *sq = req->sq;
  570. return sq->ctrl;
  571. }
  572. static inline uint16_t nvme_cid(NvmeRequest *req)
  573. {
  574. if (!req) {
  575. return 0xffff;
  576. }
  577. return le16_to_cpu(req->cqe.cid);
  578. }
  579. static inline NvmeSecCtrlEntry *nvme_sctrl(NvmeCtrl *n)
  580. {
  581. PCIDevice *pci_dev = &n->parent_obj;
  582. NvmeCtrl *pf = NVME(pcie_sriov_get_pf(pci_dev));
  583. if (pci_is_vf(pci_dev)) {
  584. return &pf->sec_ctrl_list[pcie_sriov_vf_number(pci_dev)];
  585. }
  586. return NULL;
  587. }
  588. static inline NvmeSecCtrlEntry *nvme_sctrl_for_cntlid(NvmeCtrl *n,
  589. uint16_t cntlid)
  590. {
  591. NvmeSecCtrlEntry *list = n->sec_ctrl_list;
  592. uint8_t i;
  593. for (i = 0; i < n->nr_sec_ctrls; i++) {
  594. if (le16_to_cpu(list[i].scid) == cntlid) {
  595. return &list[i];
  596. }
  597. }
  598. return NULL;
  599. }
  600. void nvme_attach_ns(NvmeCtrl *n, NvmeNamespace *ns);
  601. uint16_t nvme_bounce_data(NvmeCtrl *n, void *ptr, uint32_t len,
  602. NvmeTxDirection dir, NvmeRequest *req);
  603. uint16_t nvme_bounce_mdata(NvmeCtrl *n, void *ptr, uint32_t len,
  604. NvmeTxDirection dir, NvmeRequest *req);
  605. void nvme_rw_complete_cb(void *opaque, int ret);
  606. uint16_t nvme_map_dptr(NvmeCtrl *n, NvmeSg *sg, size_t len,
  607. NvmeCmd *cmd);
  608. #endif /* HW_NVME_NVME_H */