rocker.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518
  1. /*
  2. * QEMU rocker switch emulation - PCI device
  3. *
  4. * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
  5. * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include "qemu/osdep.h"
  18. #include "hw/pci/pci_device.h"
  19. #include "hw/qdev-properties.h"
  20. #include "hw/qdev-properties-system.h"
  21. #include "migration/vmstate.h"
  22. #include "hw/pci/msix.h"
  23. #include "net/net.h"
  24. #include "net/eth.h"
  25. #include "qapi/error.h"
  26. #include "qapi/qapi-commands-rocker.h"
  27. #include "qemu/iov.h"
  28. #include "qemu/module.h"
  29. #include "qemu/bitops.h"
  30. #include "qemu/log.h"
  31. #include "rocker.h"
  32. #include "rocker_hw.h"
  33. #include "rocker_fp.h"
  34. #include "rocker_desc.h"
  35. #include "rocker_tlv.h"
  36. #include "rocker_world.h"
  37. #include "rocker_of_dpa.h"
  38. struct rocker {
  39. /* private */
  40. PCIDevice parent_obj;
  41. /* public */
  42. MemoryRegion mmio;
  43. MemoryRegion msix_bar;
  44. /* switch configuration */
  45. char *name; /* switch name */
  46. char *world_name; /* world name */
  47. uint32_t fp_ports; /* front-panel port count */
  48. NICPeers *fp_ports_peers;
  49. MACAddr fp_start_macaddr; /* front-panel port 0 mac addr */
  50. uint64_t switch_id; /* switch id */
  51. /* front-panel ports */
  52. FpPort *fp_port[ROCKER_FP_PORTS_MAX];
  53. /* register backings */
  54. uint32_t test_reg;
  55. uint64_t test_reg64;
  56. dma_addr_t test_dma_addr;
  57. uint32_t test_dma_size;
  58. uint64_t lower32; /* lower 32-bit val in 2-part 64-bit access */
  59. /* desc rings */
  60. DescRing **rings;
  61. /* switch worlds */
  62. World *worlds[ROCKER_WORLD_TYPE_MAX];
  63. World *world_dflt;
  64. QLIST_ENTRY(rocker) next;
  65. };
  66. static QLIST_HEAD(, rocker) rockers;
  67. Rocker *rocker_find(const char *name)
  68. {
  69. Rocker *r;
  70. QLIST_FOREACH(r, &rockers, next)
  71. if (strcmp(r->name, name) == 0) {
  72. return r;
  73. }
  74. return NULL;
  75. }
  76. World *rocker_get_world(Rocker *r, enum rocker_world_type type)
  77. {
  78. if (type < ROCKER_WORLD_TYPE_MAX) {
  79. return r->worlds[type];
  80. }
  81. return NULL;
  82. }
  83. RockerSwitch *qmp_query_rocker(const char *name, Error **errp)
  84. {
  85. RockerSwitch *rocker;
  86. Rocker *r;
  87. r = rocker_find(name);
  88. if (!r) {
  89. error_setg(errp, "rocker %s not found", name);
  90. return NULL;
  91. }
  92. rocker = g_new0(RockerSwitch, 1);
  93. rocker->name = g_strdup(r->name);
  94. rocker->id = r->switch_id;
  95. rocker->ports = r->fp_ports;
  96. return rocker;
  97. }
  98. RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp)
  99. {
  100. RockerPortList *list = NULL;
  101. Rocker *r;
  102. int i;
  103. r = rocker_find(name);
  104. if (!r) {
  105. error_setg(errp, "rocker %s not found", name);
  106. return NULL;
  107. }
  108. for (i = r->fp_ports - 1; i >= 0; i--) {
  109. QAPI_LIST_PREPEND(list, fp_port_get_info(r->fp_port[i]));
  110. }
  111. return list;
  112. }
  113. uint32_t rocker_fp_ports(Rocker *r)
  114. {
  115. return r->fp_ports;
  116. }
  117. static uint32_t rocker_get_pport_by_tx_ring(Rocker *r,
  118. DescRing *ring)
  119. {
  120. return (desc_ring_index(ring) - 2) / 2 + 1;
  121. }
  122. static int tx_consume(Rocker *r, DescInfo *info)
  123. {
  124. PCIDevice *dev = PCI_DEVICE(r);
  125. char *buf = desc_get_buf(info, true);
  126. RockerTlv *tlv_frag;
  127. RockerTlv *tlvs[ROCKER_TLV_TX_MAX + 1];
  128. struct iovec iov[ROCKER_TX_FRAGS_MAX] = { { 0, }, };
  129. uint32_t pport;
  130. uint32_t port;
  131. uint16_t tx_offload = ROCKER_TX_OFFLOAD_NONE;
  132. uint16_t tx_l3_csum_off = 0;
  133. uint16_t tx_tso_mss = 0;
  134. uint16_t tx_tso_hdr_len = 0;
  135. int iovcnt = 0;
  136. int err = ROCKER_OK;
  137. int rem;
  138. int i;
  139. if (!buf) {
  140. return -ROCKER_ENXIO;
  141. }
  142. rocker_tlv_parse(tlvs, ROCKER_TLV_TX_MAX, buf, desc_tlv_size(info));
  143. if (!tlvs[ROCKER_TLV_TX_FRAGS]) {
  144. return -ROCKER_EINVAL;
  145. }
  146. pport = rocker_get_pport_by_tx_ring(r, desc_get_ring(info));
  147. if (!fp_port_from_pport(pport, &port)) {
  148. return -ROCKER_EINVAL;
  149. }
  150. if (tlvs[ROCKER_TLV_TX_OFFLOAD]) {
  151. tx_offload = rocker_tlv_get_u8(tlvs[ROCKER_TLV_TX_OFFLOAD]);
  152. }
  153. switch (tx_offload) {
  154. case ROCKER_TX_OFFLOAD_L3_CSUM:
  155. if (!tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) {
  156. return -ROCKER_EINVAL;
  157. }
  158. break;
  159. case ROCKER_TX_OFFLOAD_TSO:
  160. if (!tlvs[ROCKER_TLV_TX_TSO_MSS] ||
  161. !tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) {
  162. return -ROCKER_EINVAL;
  163. }
  164. break;
  165. }
  166. if (tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) {
  167. tx_l3_csum_off = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]);
  168. qemu_log_mask(LOG_UNIMP, "rocker %s: L3 not implemented"
  169. " (cksum off: %u)\n",
  170. __func__, tx_l3_csum_off);
  171. }
  172. if (tlvs[ROCKER_TLV_TX_TSO_MSS]) {
  173. tx_tso_mss = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_MSS]);
  174. qemu_log_mask(LOG_UNIMP, "rocker %s: TSO not implemented (MSS: %u)\n",
  175. __func__, tx_tso_mss);
  176. }
  177. if (tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) {
  178. tx_tso_hdr_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]);
  179. qemu_log_mask(LOG_UNIMP, "rocker %s: TSO not implemented"
  180. " (hdr length: %u)\n",
  181. __func__, tx_tso_hdr_len);
  182. }
  183. rocker_tlv_for_each_nested(tlv_frag, tlvs[ROCKER_TLV_TX_FRAGS], rem) {
  184. hwaddr frag_addr;
  185. uint16_t frag_len;
  186. if (rocker_tlv_type(tlv_frag) != ROCKER_TLV_TX_FRAG) {
  187. err = -ROCKER_EINVAL;
  188. goto err_bad_attr;
  189. }
  190. rocker_tlv_parse_nested(tlvs, ROCKER_TLV_TX_FRAG_ATTR_MAX, tlv_frag);
  191. if (!tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
  192. !tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) {
  193. err = -ROCKER_EINVAL;
  194. goto err_bad_attr;
  195. }
  196. frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
  197. frag_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
  198. if (iovcnt >= ROCKER_TX_FRAGS_MAX) {
  199. goto err_too_many_frags;
  200. }
  201. iov[iovcnt].iov_len = frag_len;
  202. iov[iovcnt].iov_base = g_malloc(frag_len);
  203. pci_dma_read(dev, frag_addr, iov[iovcnt].iov_base,
  204. iov[iovcnt].iov_len);
  205. iovcnt++;
  206. }
  207. err = fp_port_eg(r->fp_port[port], iov, iovcnt);
  208. err_too_many_frags:
  209. err_bad_attr:
  210. for (i = 0; i < ROCKER_TX_FRAGS_MAX; i++) {
  211. g_free(iov[i].iov_base);
  212. }
  213. return err;
  214. }
  215. static int cmd_get_port_settings(Rocker *r,
  216. DescInfo *info, char *buf,
  217. RockerTlv *cmd_info_tlv)
  218. {
  219. RockerTlv *tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
  220. RockerTlv *nest;
  221. FpPort *fp_port;
  222. uint32_t pport;
  223. uint32_t port;
  224. uint32_t speed;
  225. uint8_t duplex;
  226. uint8_t autoneg;
  227. uint8_t learning;
  228. char *phys_name;
  229. MACAddr macaddr;
  230. enum rocker_world_type mode;
  231. size_t tlv_size;
  232. int pos;
  233. int err;
  234. rocker_tlv_parse_nested(tlvs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
  235. cmd_info_tlv);
  236. if (!tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]) {
  237. return -ROCKER_EINVAL;
  238. }
  239. pport = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]);
  240. if (!fp_port_from_pport(pport, &port)) {
  241. return -ROCKER_EINVAL;
  242. }
  243. fp_port = r->fp_port[port];
  244. err = fp_port_get_settings(fp_port, &speed, &duplex, &autoneg);
  245. if (err) {
  246. return err;
  247. }
  248. fp_port_get_macaddr(fp_port, &macaddr);
  249. mode = world_type(fp_port_get_world(fp_port));
  250. learning = fp_port_get_learning(fp_port);
  251. phys_name = fp_port_get_name(fp_port);
  252. tlv_size = rocker_tlv_total_size(0) + /* nest */
  253. rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
  254. rocker_tlv_total_size(sizeof(uint32_t)) + /* speed */
  255. rocker_tlv_total_size(sizeof(uint8_t)) + /* duplex */
  256. rocker_tlv_total_size(sizeof(uint8_t)) + /* autoneg */
  257. rocker_tlv_total_size(sizeof(macaddr.a)) + /* macaddr */
  258. rocker_tlv_total_size(sizeof(uint8_t)) + /* mode */
  259. rocker_tlv_total_size(sizeof(uint8_t)) + /* learning */
  260. rocker_tlv_total_size(strlen(phys_name));
  261. if (tlv_size > desc_buf_size(info)) {
  262. return -ROCKER_EMSGSIZE;
  263. }
  264. pos = 0;
  265. nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_CMD_INFO);
  266. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, pport);
  267. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, speed);
  268. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, duplex);
  269. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, autoneg);
  270. rocker_tlv_put(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
  271. sizeof(macaddr.a), macaddr.a);
  272. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_MODE, mode);
  273. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
  274. learning);
  275. rocker_tlv_put(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME,
  276. strlen(phys_name), phys_name);
  277. rocker_tlv_nest_end(buf, &pos, nest);
  278. return desc_set_buf(info, tlv_size);
  279. }
  280. static int cmd_set_port_settings(Rocker *r,
  281. RockerTlv *cmd_info_tlv)
  282. {
  283. RockerTlv *tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
  284. FpPort *fp_port;
  285. uint32_t pport;
  286. uint32_t port;
  287. uint32_t speed;
  288. uint8_t duplex;
  289. uint8_t autoneg;
  290. uint8_t learning;
  291. MACAddr macaddr;
  292. enum rocker_world_type mode;
  293. int err;
  294. rocker_tlv_parse_nested(tlvs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
  295. cmd_info_tlv);
  296. if (!tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]) {
  297. return -ROCKER_EINVAL;
  298. }
  299. pport = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]);
  300. if (!fp_port_from_pport(pport, &port)) {
  301. return -ROCKER_EINVAL;
  302. }
  303. fp_port = r->fp_port[port];
  304. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] &&
  305. tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] &&
  306. tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) {
  307. speed = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
  308. duplex = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
  309. autoneg = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
  310. err = fp_port_set_settings(fp_port, speed, duplex, autoneg);
  311. if (err) {
  312. return err;
  313. }
  314. }
  315. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]) {
  316. if (rocker_tlv_len(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]) !=
  317. sizeof(macaddr.a)) {
  318. return -ROCKER_EINVAL;
  319. }
  320. memcpy(macaddr.a,
  321. rocker_tlv_data(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]),
  322. sizeof(macaddr.a));
  323. fp_port_set_macaddr(fp_port, &macaddr);
  324. }
  325. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]) {
  326. mode = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
  327. if (mode >= ROCKER_WORLD_TYPE_MAX) {
  328. return -ROCKER_EINVAL;
  329. }
  330. /* We don't support world change. */
  331. if (!fp_port_check_world(fp_port, r->worlds[mode])) {
  332. return -ROCKER_EINVAL;
  333. }
  334. }
  335. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING]) {
  336. learning =
  337. rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING]);
  338. fp_port_set_learning(fp_port, learning);
  339. }
  340. return ROCKER_OK;
  341. }
  342. static int cmd_consume(Rocker *r, DescInfo *info)
  343. {
  344. char *buf = desc_get_buf(info, false);
  345. RockerTlv *tlvs[ROCKER_TLV_CMD_MAX + 1];
  346. RockerTlv *info_tlv;
  347. World *world;
  348. uint16_t cmd;
  349. int err;
  350. if (!buf) {
  351. return -ROCKER_ENXIO;
  352. }
  353. rocker_tlv_parse(tlvs, ROCKER_TLV_CMD_MAX, buf, desc_tlv_size(info));
  354. if (!tlvs[ROCKER_TLV_CMD_TYPE] || !tlvs[ROCKER_TLV_CMD_INFO]) {
  355. return -ROCKER_EINVAL;
  356. }
  357. cmd = rocker_tlv_get_le16(tlvs[ROCKER_TLV_CMD_TYPE]);
  358. info_tlv = tlvs[ROCKER_TLV_CMD_INFO];
  359. /* This might be reworked to something like this:
  360. * Every world will have an array of command handlers from
  361. * ROCKER_TLV_CMD_TYPE_UNSPEC to ROCKER_TLV_CMD_TYPE_MAX. There is
  362. * up to each world to implement whatever command it want.
  363. * It can reference "generic" commands as cmd_set_port_settings or
  364. * cmd_get_port_settings
  365. */
  366. switch (cmd) {
  367. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
  368. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
  369. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
  370. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
  371. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
  372. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
  373. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
  374. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
  375. world = r->worlds[ROCKER_WORLD_TYPE_OF_DPA];
  376. err = world_do_cmd(world, info, buf, cmd, info_tlv);
  377. break;
  378. case ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS:
  379. err = cmd_get_port_settings(r, info, buf, info_tlv);
  380. break;
  381. case ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS:
  382. err = cmd_set_port_settings(r, info_tlv);
  383. break;
  384. default:
  385. err = -ROCKER_EINVAL;
  386. break;
  387. }
  388. return err;
  389. }
  390. static void rocker_msix_irq(Rocker *r, unsigned vector)
  391. {
  392. PCIDevice *dev = PCI_DEVICE(r);
  393. DPRINTF("MSI-X notify request for vector %d\n", vector);
  394. if (vector >= ROCKER_MSIX_VEC_COUNT(r->fp_ports)) {
  395. DPRINTF("incorrect vector %d\n", vector);
  396. return;
  397. }
  398. msix_notify(dev, vector);
  399. }
  400. int rocker_event_link_changed(Rocker *r, uint32_t pport, bool link_up)
  401. {
  402. DescRing *ring = r->rings[ROCKER_RING_EVENT];
  403. DescInfo *info = desc_ring_fetch_desc(ring);
  404. RockerTlv *nest;
  405. char *buf;
  406. size_t tlv_size;
  407. int pos;
  408. int err;
  409. if (!info) {
  410. return -ROCKER_ENOBUFS;
  411. }
  412. tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */
  413. rocker_tlv_total_size(0) + /* nest */
  414. rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
  415. rocker_tlv_total_size(sizeof(uint8_t)); /* link up */
  416. if (tlv_size > desc_buf_size(info)) {
  417. err = -ROCKER_EMSGSIZE;
  418. goto err_too_big;
  419. }
  420. buf = desc_get_buf(info, false);
  421. if (!buf) {
  422. err = -ROCKER_ENOMEM;
  423. goto err_no_mem;
  424. }
  425. pos = 0;
  426. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_TYPE,
  427. ROCKER_TLV_EVENT_TYPE_LINK_CHANGED);
  428. nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_EVENT_INFO);
  429. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, pport);
  430. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP,
  431. link_up ? 1 : 0);
  432. rocker_tlv_nest_end(buf, &pos, nest);
  433. err = desc_set_buf(info, tlv_size);
  434. err_too_big:
  435. err_no_mem:
  436. if (desc_ring_post_desc(ring, err)) {
  437. rocker_msix_irq(r, ROCKER_MSIX_VEC_EVENT);
  438. }
  439. return err;
  440. }
  441. int rocker_event_mac_vlan_seen(Rocker *r, uint32_t pport, uint8_t *addr,
  442. uint16_t vlan_id)
  443. {
  444. DescRing *ring = r->rings[ROCKER_RING_EVENT];
  445. DescInfo *info;
  446. FpPort *fp_port;
  447. uint32_t port;
  448. RockerTlv *nest;
  449. char *buf;
  450. size_t tlv_size;
  451. int pos;
  452. int err;
  453. if (!fp_port_from_pport(pport, &port)) {
  454. return -ROCKER_EINVAL;
  455. }
  456. fp_port = r->fp_port[port];
  457. if (!fp_port_get_learning(fp_port)) {
  458. return ROCKER_OK;
  459. }
  460. info = desc_ring_fetch_desc(ring);
  461. if (!info) {
  462. return -ROCKER_ENOBUFS;
  463. }
  464. tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */
  465. rocker_tlv_total_size(0) + /* nest */
  466. rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
  467. rocker_tlv_total_size(ETH_ALEN) + /* mac addr */
  468. rocker_tlv_total_size(sizeof(uint16_t)); /* vlan_id */
  469. if (tlv_size > desc_buf_size(info)) {
  470. err = -ROCKER_EMSGSIZE;
  471. goto err_too_big;
  472. }
  473. buf = desc_get_buf(info, false);
  474. if (!buf) {
  475. err = -ROCKER_ENOMEM;
  476. goto err_no_mem;
  477. }
  478. pos = 0;
  479. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_TYPE,
  480. ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN);
  481. nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_EVENT_INFO);
  482. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_PPORT, pport);
  483. rocker_tlv_put(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_MAC, ETH_ALEN, addr);
  484. rocker_tlv_put_u16(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, vlan_id);
  485. rocker_tlv_nest_end(buf, &pos, nest);
  486. err = desc_set_buf(info, tlv_size);
  487. err_too_big:
  488. err_no_mem:
  489. if (desc_ring_post_desc(ring, err)) {
  490. rocker_msix_irq(r, ROCKER_MSIX_VEC_EVENT);
  491. }
  492. return err;
  493. }
  494. static DescRing *rocker_get_rx_ring_by_pport(Rocker *r,
  495. uint32_t pport)
  496. {
  497. return r->rings[(pport - 1) * 2 + 3];
  498. }
  499. int rx_produce(World *world, uint32_t pport,
  500. const struct iovec *iov, int iovcnt, uint8_t copy_to_cpu)
  501. {
  502. Rocker *r = world_rocker(world);
  503. PCIDevice *dev = (PCIDevice *)r;
  504. DescRing *ring = rocker_get_rx_ring_by_pport(r, pport);
  505. DescInfo *info = desc_ring_fetch_desc(ring);
  506. char *data;
  507. size_t data_size = iov_size(iov, iovcnt);
  508. char *buf;
  509. uint16_t rx_flags = 0;
  510. uint16_t rx_csum = 0;
  511. size_t tlv_size;
  512. RockerTlv *tlvs[ROCKER_TLV_RX_MAX + 1];
  513. hwaddr frag_addr;
  514. uint16_t frag_max_len;
  515. int pos;
  516. int err;
  517. if (!info) {
  518. return -ROCKER_ENOBUFS;
  519. }
  520. buf = desc_get_buf(info, false);
  521. if (!buf) {
  522. err = -ROCKER_ENXIO;
  523. goto out;
  524. }
  525. rocker_tlv_parse(tlvs, ROCKER_TLV_RX_MAX, buf, desc_tlv_size(info));
  526. if (!tlvs[ROCKER_TLV_RX_FRAG_ADDR] ||
  527. !tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]) {
  528. err = -ROCKER_EINVAL;
  529. goto out;
  530. }
  531. frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_RX_FRAG_ADDR]);
  532. frag_max_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
  533. if (data_size > frag_max_len) {
  534. err = -ROCKER_EMSGSIZE;
  535. goto out;
  536. }
  537. if (copy_to_cpu) {
  538. rx_flags |= ROCKER_RX_FLAGS_FWD_OFFLOAD;
  539. }
  540. /* XXX calc rx flags/csum */
  541. tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* flags */
  542. rocker_tlv_total_size(sizeof(uint16_t)) + /* scum */
  543. rocker_tlv_total_size(sizeof(uint64_t)) + /* frag addr */
  544. rocker_tlv_total_size(sizeof(uint16_t)) + /* frag max len */
  545. rocker_tlv_total_size(sizeof(uint16_t)); /* frag len */
  546. if (tlv_size > desc_buf_size(info)) {
  547. err = -ROCKER_EMSGSIZE;
  548. goto out;
  549. }
  550. /* TODO:
  551. * iov dma write can be optimized in similar way e1000 does it in
  552. * e1000_receive_iov. But maybe if would make sense to introduce
  553. * generic helper iov_dma_write.
  554. */
  555. data = g_malloc(data_size);
  556. iov_to_buf(iov, iovcnt, 0, data, data_size);
  557. pci_dma_write(dev, frag_addr, data, data_size);
  558. g_free(data);
  559. pos = 0;
  560. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FLAGS, rx_flags);
  561. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_CSUM, rx_csum);
  562. rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_RX_FRAG_ADDR, frag_addr);
  563. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_MAX_LEN, frag_max_len);
  564. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_LEN, data_size);
  565. err = desc_set_buf(info, tlv_size);
  566. out:
  567. if (desc_ring_post_desc(ring, err)) {
  568. rocker_msix_irq(r, ROCKER_MSIX_VEC_RX(pport - 1));
  569. }
  570. return err;
  571. }
  572. int rocker_port_eg(Rocker *r, uint32_t pport,
  573. const struct iovec *iov, int iovcnt)
  574. {
  575. FpPort *fp_port;
  576. uint32_t port;
  577. if (!fp_port_from_pport(pport, &port)) {
  578. return -ROCKER_EINVAL;
  579. }
  580. fp_port = r->fp_port[port];
  581. return fp_port_eg(fp_port, iov, iovcnt);
  582. }
  583. static void rocker_test_dma_ctrl(Rocker *r, uint32_t val)
  584. {
  585. PCIDevice *dev = PCI_DEVICE(r);
  586. char *buf;
  587. int i;
  588. buf = g_malloc(r->test_dma_size);
  589. switch (val) {
  590. case ROCKER_TEST_DMA_CTRL_CLEAR:
  591. memset(buf, 0, r->test_dma_size);
  592. break;
  593. case ROCKER_TEST_DMA_CTRL_FILL:
  594. memset(buf, 0x96, r->test_dma_size);
  595. break;
  596. case ROCKER_TEST_DMA_CTRL_INVERT:
  597. pci_dma_read(dev, r->test_dma_addr, buf, r->test_dma_size);
  598. for (i = 0; i < r->test_dma_size; i++) {
  599. buf[i] = ~buf[i];
  600. }
  601. break;
  602. default:
  603. DPRINTF("not test dma control val=0x%08x\n", val);
  604. goto err_out;
  605. }
  606. pci_dma_write(dev, r->test_dma_addr, buf, r->test_dma_size);
  607. rocker_msix_irq(r, ROCKER_MSIX_VEC_TEST);
  608. err_out:
  609. g_free(buf);
  610. }
  611. static void rocker_reset(DeviceState *dev);
  612. static void rocker_control(Rocker *r, uint32_t val)
  613. {
  614. if (val & ROCKER_CONTROL_RESET) {
  615. rocker_reset(DEVICE(r));
  616. }
  617. }
  618. static int rocker_pci_ring_count(Rocker *r)
  619. {
  620. /* There are:
  621. * - command ring
  622. * - event ring
  623. * - tx and rx ring per each port
  624. */
  625. return 2 + (2 * r->fp_ports);
  626. }
  627. static bool rocker_addr_is_desc_reg(Rocker *r, hwaddr addr)
  628. {
  629. hwaddr start = ROCKER_DMA_DESC_BASE;
  630. hwaddr end = start + (ROCKER_DMA_DESC_SIZE * rocker_pci_ring_count(r));
  631. return addr >= start && addr < end;
  632. }
  633. static void rocker_port_phys_enable_write(Rocker *r, uint64_t new)
  634. {
  635. int i;
  636. bool old_enabled;
  637. bool new_enabled;
  638. FpPort *fp_port;
  639. for (i = 0; i < r->fp_ports; i++) {
  640. fp_port = r->fp_port[i];
  641. old_enabled = fp_port_enabled(fp_port);
  642. new_enabled = (new >> (i + 1)) & 0x1;
  643. if (new_enabled == old_enabled) {
  644. continue;
  645. }
  646. if (new_enabled) {
  647. fp_port_enable(r->fp_port[i]);
  648. } else {
  649. fp_port_disable(r->fp_port[i]);
  650. }
  651. }
  652. }
  653. static void rocker_io_writel(void *opaque, hwaddr addr, uint32_t val)
  654. {
  655. Rocker *r = opaque;
  656. if (rocker_addr_is_desc_reg(r, addr)) {
  657. unsigned index = ROCKER_RING_INDEX(addr);
  658. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  659. switch (offset) {
  660. case ROCKER_DMA_DESC_ADDR_OFFSET:
  661. r->lower32 = (uint64_t)val;
  662. break;
  663. case ROCKER_DMA_DESC_ADDR_OFFSET + 4:
  664. desc_ring_set_base_addr(r->rings[index],
  665. ((uint64_t)val) << 32 | r->lower32);
  666. r->lower32 = 0;
  667. break;
  668. case ROCKER_DMA_DESC_SIZE_OFFSET:
  669. desc_ring_set_size(r->rings[index], val);
  670. break;
  671. case ROCKER_DMA_DESC_HEAD_OFFSET:
  672. if (desc_ring_set_head(r->rings[index], val)) {
  673. rocker_msix_irq(r, desc_ring_get_msix_vector(r->rings[index]));
  674. }
  675. break;
  676. case ROCKER_DMA_DESC_CTRL_OFFSET:
  677. desc_ring_set_ctrl(r->rings[index], val);
  678. break;
  679. case ROCKER_DMA_DESC_CREDITS_OFFSET:
  680. if (desc_ring_ret_credits(r->rings[index], val)) {
  681. rocker_msix_irq(r, desc_ring_get_msix_vector(r->rings[index]));
  682. }
  683. break;
  684. default:
  685. DPRINTF("not implemented dma reg write(l) addr=0x" TARGET_FMT_plx
  686. " val=0x%08x (ring %d, addr=0x%02x)\n",
  687. addr, val, index, offset);
  688. break;
  689. }
  690. return;
  691. }
  692. switch (addr) {
  693. case ROCKER_TEST_REG:
  694. r->test_reg = val;
  695. break;
  696. case ROCKER_TEST_REG64:
  697. case ROCKER_TEST_DMA_ADDR:
  698. case ROCKER_PORT_PHYS_ENABLE:
  699. r->lower32 = (uint64_t)val;
  700. break;
  701. case ROCKER_TEST_REG64 + 4:
  702. r->test_reg64 = ((uint64_t)val) << 32 | r->lower32;
  703. r->lower32 = 0;
  704. break;
  705. case ROCKER_TEST_IRQ:
  706. rocker_msix_irq(r, val);
  707. break;
  708. case ROCKER_TEST_DMA_SIZE:
  709. r->test_dma_size = val & 0xFFFF;
  710. break;
  711. case ROCKER_TEST_DMA_ADDR + 4:
  712. r->test_dma_addr = ((uint64_t)val) << 32 | r->lower32;
  713. r->lower32 = 0;
  714. break;
  715. case ROCKER_TEST_DMA_CTRL:
  716. rocker_test_dma_ctrl(r, val);
  717. break;
  718. case ROCKER_CONTROL:
  719. rocker_control(r, val);
  720. break;
  721. case ROCKER_PORT_PHYS_ENABLE + 4:
  722. rocker_port_phys_enable_write(r, ((uint64_t)val) << 32 | r->lower32);
  723. r->lower32 = 0;
  724. break;
  725. default:
  726. DPRINTF("not implemented write(l) addr=0x" TARGET_FMT_plx
  727. " val=0x%08x\n", addr, val);
  728. break;
  729. }
  730. }
  731. static void rocker_io_writeq(void *opaque, hwaddr addr, uint64_t val)
  732. {
  733. Rocker *r = opaque;
  734. if (rocker_addr_is_desc_reg(r, addr)) {
  735. unsigned index = ROCKER_RING_INDEX(addr);
  736. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  737. switch (offset) {
  738. case ROCKER_DMA_DESC_ADDR_OFFSET:
  739. desc_ring_set_base_addr(r->rings[index], val);
  740. break;
  741. default:
  742. DPRINTF("not implemented dma reg write(q) addr=0x" TARGET_FMT_plx
  743. " val=0x" TARGET_FMT_plx " (ring %d, offset=0x%02x)\n",
  744. addr, val, index, offset);
  745. break;
  746. }
  747. return;
  748. }
  749. switch (addr) {
  750. case ROCKER_TEST_REG64:
  751. r->test_reg64 = val;
  752. break;
  753. case ROCKER_TEST_DMA_ADDR:
  754. r->test_dma_addr = val;
  755. break;
  756. case ROCKER_PORT_PHYS_ENABLE:
  757. rocker_port_phys_enable_write(r, val);
  758. break;
  759. default:
  760. DPRINTF("not implemented write(q) addr=0x" TARGET_FMT_plx
  761. " val=0x" TARGET_FMT_plx "\n", addr, val);
  762. break;
  763. }
  764. }
  765. #ifdef DEBUG_ROCKER
  766. #define regname(reg) case (reg): return #reg
  767. static const char *rocker_reg_name(void *opaque, hwaddr addr)
  768. {
  769. Rocker *r = opaque;
  770. if (rocker_addr_is_desc_reg(r, addr)) {
  771. unsigned index = ROCKER_RING_INDEX(addr);
  772. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  773. static char buf[100];
  774. char ring_name[10];
  775. switch (index) {
  776. case 0:
  777. sprintf(ring_name, "cmd");
  778. break;
  779. case 1:
  780. sprintf(ring_name, "event");
  781. break;
  782. default:
  783. sprintf(ring_name, "%s-%d", index % 2 ? "rx" : "tx",
  784. (index - 2) / 2);
  785. }
  786. switch (offset) {
  787. case ROCKER_DMA_DESC_ADDR_OFFSET:
  788. sprintf(buf, "Ring[%s] ADDR", ring_name);
  789. return buf;
  790. case ROCKER_DMA_DESC_ADDR_OFFSET+4:
  791. sprintf(buf, "Ring[%s] ADDR+4", ring_name);
  792. return buf;
  793. case ROCKER_DMA_DESC_SIZE_OFFSET:
  794. sprintf(buf, "Ring[%s] SIZE", ring_name);
  795. return buf;
  796. case ROCKER_DMA_DESC_HEAD_OFFSET:
  797. sprintf(buf, "Ring[%s] HEAD", ring_name);
  798. return buf;
  799. case ROCKER_DMA_DESC_TAIL_OFFSET:
  800. sprintf(buf, "Ring[%s] TAIL", ring_name);
  801. return buf;
  802. case ROCKER_DMA_DESC_CTRL_OFFSET:
  803. sprintf(buf, "Ring[%s] CTRL", ring_name);
  804. return buf;
  805. case ROCKER_DMA_DESC_CREDITS_OFFSET:
  806. sprintf(buf, "Ring[%s] CREDITS", ring_name);
  807. return buf;
  808. default:
  809. sprintf(buf, "Ring[%s] ???", ring_name);
  810. return buf;
  811. }
  812. } else {
  813. switch (addr) {
  814. regname(ROCKER_BOGUS_REG0);
  815. regname(ROCKER_BOGUS_REG1);
  816. regname(ROCKER_BOGUS_REG2);
  817. regname(ROCKER_BOGUS_REG3);
  818. regname(ROCKER_TEST_REG);
  819. regname(ROCKER_TEST_REG64);
  820. regname(ROCKER_TEST_REG64+4);
  821. regname(ROCKER_TEST_IRQ);
  822. regname(ROCKER_TEST_DMA_ADDR);
  823. regname(ROCKER_TEST_DMA_ADDR+4);
  824. regname(ROCKER_TEST_DMA_SIZE);
  825. regname(ROCKER_TEST_DMA_CTRL);
  826. regname(ROCKER_CONTROL);
  827. regname(ROCKER_PORT_PHYS_COUNT);
  828. regname(ROCKER_PORT_PHYS_LINK_STATUS);
  829. regname(ROCKER_PORT_PHYS_LINK_STATUS+4);
  830. regname(ROCKER_PORT_PHYS_ENABLE);
  831. regname(ROCKER_PORT_PHYS_ENABLE+4);
  832. regname(ROCKER_SWITCH_ID);
  833. regname(ROCKER_SWITCH_ID+4);
  834. }
  835. }
  836. return "???";
  837. }
  838. #else
  839. static const char *rocker_reg_name(void *opaque, hwaddr addr)
  840. {
  841. return NULL;
  842. }
  843. #endif
  844. static void rocker_mmio_write(void *opaque, hwaddr addr, uint64_t val,
  845. unsigned size)
  846. {
  847. DPRINTF("Write %s addr " TARGET_FMT_plx
  848. ", size %u, val " TARGET_FMT_plx "\n",
  849. rocker_reg_name(opaque, addr), addr, size, val);
  850. switch (size) {
  851. case 4:
  852. rocker_io_writel(opaque, addr, val);
  853. break;
  854. case 8:
  855. rocker_io_writeq(opaque, addr, val);
  856. break;
  857. }
  858. }
  859. static uint64_t rocker_port_phys_link_status(Rocker *r)
  860. {
  861. int i;
  862. uint64_t status = 0;
  863. for (i = 0; i < r->fp_ports; i++) {
  864. FpPort *port = r->fp_port[i];
  865. if (fp_port_get_link_up(port)) {
  866. status |= 1ULL << (i + 1);
  867. }
  868. }
  869. return status;
  870. }
  871. static uint64_t rocker_port_phys_enable_read(Rocker *r)
  872. {
  873. int i;
  874. uint64_t ret = 0;
  875. for (i = 0; i < r->fp_ports; i++) {
  876. FpPort *port = r->fp_port[i];
  877. if (fp_port_enabled(port)) {
  878. ret |= 1ULL << (i + 1);
  879. }
  880. }
  881. return ret;
  882. }
  883. static uint32_t rocker_io_readl(void *opaque, hwaddr addr)
  884. {
  885. Rocker *r = opaque;
  886. uint32_t ret;
  887. if (rocker_addr_is_desc_reg(r, addr)) {
  888. unsigned index = ROCKER_RING_INDEX(addr);
  889. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  890. switch (offset) {
  891. case ROCKER_DMA_DESC_ADDR_OFFSET:
  892. ret = (uint32_t)desc_ring_get_base_addr(r->rings[index]);
  893. break;
  894. case ROCKER_DMA_DESC_ADDR_OFFSET + 4:
  895. ret = (uint32_t)(desc_ring_get_base_addr(r->rings[index]) >> 32);
  896. break;
  897. case ROCKER_DMA_DESC_SIZE_OFFSET:
  898. ret = desc_ring_get_size(r->rings[index]);
  899. break;
  900. case ROCKER_DMA_DESC_HEAD_OFFSET:
  901. ret = desc_ring_get_head(r->rings[index]);
  902. break;
  903. case ROCKER_DMA_DESC_TAIL_OFFSET:
  904. ret = desc_ring_get_tail(r->rings[index]);
  905. break;
  906. case ROCKER_DMA_DESC_CREDITS_OFFSET:
  907. ret = desc_ring_get_credits(r->rings[index]);
  908. break;
  909. default:
  910. DPRINTF("not implemented dma reg read(l) addr=0x" TARGET_FMT_plx
  911. " (ring %d, addr=0x%02x)\n", addr, index, offset);
  912. ret = 0;
  913. break;
  914. }
  915. return ret;
  916. }
  917. switch (addr) {
  918. case ROCKER_BOGUS_REG0:
  919. case ROCKER_BOGUS_REG1:
  920. case ROCKER_BOGUS_REG2:
  921. case ROCKER_BOGUS_REG3:
  922. ret = 0xDEADBABE;
  923. break;
  924. case ROCKER_TEST_REG:
  925. ret = r->test_reg * 2;
  926. break;
  927. case ROCKER_TEST_REG64:
  928. ret = (uint32_t)(r->test_reg64 * 2);
  929. break;
  930. case ROCKER_TEST_REG64 + 4:
  931. ret = (uint32_t)((r->test_reg64 * 2) >> 32);
  932. break;
  933. case ROCKER_TEST_DMA_SIZE:
  934. ret = r->test_dma_size;
  935. break;
  936. case ROCKER_TEST_DMA_ADDR:
  937. ret = (uint32_t)r->test_dma_addr;
  938. break;
  939. case ROCKER_TEST_DMA_ADDR + 4:
  940. ret = (uint32_t)(r->test_dma_addr >> 32);
  941. break;
  942. case ROCKER_PORT_PHYS_COUNT:
  943. ret = r->fp_ports;
  944. break;
  945. case ROCKER_PORT_PHYS_LINK_STATUS:
  946. ret = (uint32_t)rocker_port_phys_link_status(r);
  947. break;
  948. case ROCKER_PORT_PHYS_LINK_STATUS + 4:
  949. ret = (uint32_t)(rocker_port_phys_link_status(r) >> 32);
  950. break;
  951. case ROCKER_PORT_PHYS_ENABLE:
  952. ret = (uint32_t)rocker_port_phys_enable_read(r);
  953. break;
  954. case ROCKER_PORT_PHYS_ENABLE + 4:
  955. ret = (uint32_t)(rocker_port_phys_enable_read(r) >> 32);
  956. break;
  957. case ROCKER_SWITCH_ID:
  958. ret = (uint32_t)r->switch_id;
  959. break;
  960. case ROCKER_SWITCH_ID + 4:
  961. ret = (uint32_t)(r->switch_id >> 32);
  962. break;
  963. default:
  964. DPRINTF("not implemented read(l) addr=0x" TARGET_FMT_plx "\n", addr);
  965. ret = 0;
  966. break;
  967. }
  968. return ret;
  969. }
  970. static uint64_t rocker_io_readq(void *opaque, hwaddr addr)
  971. {
  972. Rocker *r = opaque;
  973. uint64_t ret;
  974. if (rocker_addr_is_desc_reg(r, addr)) {
  975. unsigned index = ROCKER_RING_INDEX(addr);
  976. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  977. switch (addr & ROCKER_DMA_DESC_MASK) {
  978. case ROCKER_DMA_DESC_ADDR_OFFSET:
  979. ret = desc_ring_get_base_addr(r->rings[index]);
  980. break;
  981. default:
  982. DPRINTF("not implemented dma reg read(q) addr=0x" TARGET_FMT_plx
  983. " (ring %d, addr=0x%02x)\n", addr, index, offset);
  984. ret = 0;
  985. break;
  986. }
  987. return ret;
  988. }
  989. switch (addr) {
  990. case ROCKER_BOGUS_REG0:
  991. case ROCKER_BOGUS_REG2:
  992. ret = 0xDEADBABEDEADBABEULL;
  993. break;
  994. case ROCKER_TEST_REG64:
  995. ret = r->test_reg64 * 2;
  996. break;
  997. case ROCKER_TEST_DMA_ADDR:
  998. ret = r->test_dma_addr;
  999. break;
  1000. case ROCKER_PORT_PHYS_LINK_STATUS:
  1001. ret = rocker_port_phys_link_status(r);
  1002. break;
  1003. case ROCKER_PORT_PHYS_ENABLE:
  1004. ret = rocker_port_phys_enable_read(r);
  1005. break;
  1006. case ROCKER_SWITCH_ID:
  1007. ret = r->switch_id;
  1008. break;
  1009. default:
  1010. DPRINTF("not implemented read(q) addr=0x" TARGET_FMT_plx "\n", addr);
  1011. ret = 0;
  1012. break;
  1013. }
  1014. return ret;
  1015. }
  1016. static uint64_t rocker_mmio_read(void *opaque, hwaddr addr, unsigned size)
  1017. {
  1018. DPRINTF("Read %s addr " TARGET_FMT_plx ", size %u\n",
  1019. rocker_reg_name(opaque, addr), addr, size);
  1020. switch (size) {
  1021. case 4:
  1022. return rocker_io_readl(opaque, addr);
  1023. case 8:
  1024. return rocker_io_readq(opaque, addr);
  1025. }
  1026. return -1;
  1027. }
  1028. static const MemoryRegionOps rocker_mmio_ops = {
  1029. .read = rocker_mmio_read,
  1030. .write = rocker_mmio_write,
  1031. .endianness = DEVICE_LITTLE_ENDIAN,
  1032. .valid = {
  1033. .min_access_size = 4,
  1034. .max_access_size = 8,
  1035. },
  1036. .impl = {
  1037. .min_access_size = 4,
  1038. .max_access_size = 8,
  1039. },
  1040. };
  1041. static void rocker_msix_vectors_unuse(Rocker *r,
  1042. unsigned int num_vectors)
  1043. {
  1044. PCIDevice *dev = PCI_DEVICE(r);
  1045. int i;
  1046. for (i = 0; i < num_vectors; i++) {
  1047. msix_vector_unuse(dev, i);
  1048. }
  1049. }
  1050. static void rocker_msix_vectors_use(Rocker *r, unsigned int num_vectors)
  1051. {
  1052. PCIDevice *dev = PCI_DEVICE(r);
  1053. int i;
  1054. for (i = 0; i < num_vectors; i++) {
  1055. msix_vector_use(dev, i);
  1056. }
  1057. }
  1058. static int rocker_msix_init(Rocker *r, Error **errp)
  1059. {
  1060. PCIDevice *dev = PCI_DEVICE(r);
  1061. int err;
  1062. err = msix_init(dev, ROCKER_MSIX_VEC_COUNT(r->fp_ports),
  1063. &r->msix_bar,
  1064. ROCKER_PCI_MSIX_BAR_IDX, ROCKER_PCI_MSIX_TABLE_OFFSET,
  1065. &r->msix_bar,
  1066. ROCKER_PCI_MSIX_BAR_IDX, ROCKER_PCI_MSIX_PBA_OFFSET,
  1067. 0, errp);
  1068. if (err) {
  1069. return err;
  1070. }
  1071. rocker_msix_vectors_use(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports));
  1072. return 0;
  1073. }
  1074. static void rocker_msix_uninit(Rocker *r)
  1075. {
  1076. PCIDevice *dev = PCI_DEVICE(r);
  1077. msix_uninit(dev, &r->msix_bar, &r->msix_bar);
  1078. rocker_msix_vectors_unuse(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports));
  1079. }
  1080. static World *rocker_world_type_by_name(Rocker *r, const char *name)
  1081. {
  1082. int i;
  1083. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1084. if (strcmp(name, world_name(r->worlds[i])) == 0) {
  1085. return r->worlds[i];
  1086. }
  1087. }
  1088. return NULL;
  1089. }
  1090. static void pci_rocker_realize(PCIDevice *dev, Error **errp)
  1091. {
  1092. Rocker *r = ROCKER(dev);
  1093. const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } };
  1094. const MACAddr dflt = { .a = { 0x52, 0x54, 0x00, 0x12, 0x35, 0x01 } };
  1095. static int sw_index;
  1096. int i, err = 0;
  1097. /* allocate worlds */
  1098. r->worlds[ROCKER_WORLD_TYPE_OF_DPA] = of_dpa_world_alloc(r);
  1099. if (!r->world_name) {
  1100. r->world_name = g_strdup(world_name(r->worlds[ROCKER_WORLD_TYPE_OF_DPA]));
  1101. }
  1102. r->world_dflt = rocker_world_type_by_name(r, r->world_name);
  1103. if (!r->world_dflt) {
  1104. error_setg(errp,
  1105. "invalid argument requested world %s does not exist",
  1106. r->world_name);
  1107. goto err_world_type_by_name;
  1108. }
  1109. /* set up memory-mapped region at BAR0 */
  1110. memory_region_init_io(&r->mmio, OBJECT(r), &rocker_mmio_ops, r,
  1111. "rocker-mmio", ROCKER_PCI_BAR0_SIZE);
  1112. pci_register_bar(dev, ROCKER_PCI_BAR0_IDX,
  1113. PCI_BASE_ADDRESS_SPACE_MEMORY, &r->mmio);
  1114. /* set up memory-mapped region for MSI-X */
  1115. memory_region_init(&r->msix_bar, OBJECT(r), "rocker-msix-bar",
  1116. ROCKER_PCI_MSIX_BAR_SIZE);
  1117. pci_register_bar(dev, ROCKER_PCI_MSIX_BAR_IDX,
  1118. PCI_BASE_ADDRESS_SPACE_MEMORY, &r->msix_bar);
  1119. /* MSI-X init */
  1120. err = rocker_msix_init(r, errp);
  1121. if (err) {
  1122. goto err_msix_init;
  1123. }
  1124. /* validate switch properties */
  1125. if (!r->name) {
  1126. r->name = g_strdup(TYPE_ROCKER);
  1127. }
  1128. if (rocker_find(r->name)) {
  1129. error_setg(errp, "%s already exists", r->name);
  1130. goto err_duplicate;
  1131. }
  1132. /* Rocker name is passed in port name requests to OS with the intention
  1133. * that the name is used in interface names. Limit the length of the
  1134. * rocker name to avoid naming problems in the OS. Also, adding the
  1135. * port number as p# and unganged breakout b#, where # is at most 2
  1136. * digits, so leave room for it too (-1 for string terminator, -3 for
  1137. * p# and -3 for b#)
  1138. */
  1139. #define ROCKER_IFNAMSIZ 16
  1140. #define MAX_ROCKER_NAME_LEN (ROCKER_IFNAMSIZ - 1 - 3 - 3)
  1141. if (strlen(r->name) > MAX_ROCKER_NAME_LEN) {
  1142. error_setg(errp,
  1143. "name too long; please shorten to at most %d chars",
  1144. MAX_ROCKER_NAME_LEN);
  1145. goto err_name_too_long;
  1146. }
  1147. if (memcmp(&r->fp_start_macaddr, &zero, sizeof(zero)) == 0) {
  1148. memcpy(&r->fp_start_macaddr, &dflt, sizeof(dflt));
  1149. r->fp_start_macaddr.a[4] += (sw_index++);
  1150. }
  1151. if (!r->switch_id) {
  1152. memcpy(&r->switch_id, &r->fp_start_macaddr,
  1153. sizeof(r->fp_start_macaddr));
  1154. }
  1155. if (r->fp_ports > ROCKER_FP_PORTS_MAX) {
  1156. r->fp_ports = ROCKER_FP_PORTS_MAX;
  1157. }
  1158. r->rings = g_new(DescRing *, rocker_pci_ring_count(r));
  1159. /* Rings are ordered like this:
  1160. * - command ring
  1161. * - event ring
  1162. * - port0 tx ring
  1163. * - port0 rx ring
  1164. * - port1 tx ring
  1165. * - port1 rx ring
  1166. * .....
  1167. */
  1168. for (i = 0; i < rocker_pci_ring_count(r); i++) {
  1169. DescRing *ring = desc_ring_alloc(r, i);
  1170. if (i == ROCKER_RING_CMD) {
  1171. desc_ring_set_consume(ring, cmd_consume, ROCKER_MSIX_VEC_CMD);
  1172. } else if (i == ROCKER_RING_EVENT) {
  1173. desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_EVENT);
  1174. } else if (i % 2 == 0) {
  1175. desc_ring_set_consume(ring, tx_consume,
  1176. ROCKER_MSIX_VEC_TX((i - 2) / 2));
  1177. } else if (i % 2 == 1) {
  1178. desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_RX((i - 3) / 2));
  1179. }
  1180. r->rings[i] = ring;
  1181. }
  1182. for (i = 0; i < r->fp_ports; i++) {
  1183. FpPort *port =
  1184. fp_port_alloc(r, r->name, &r->fp_start_macaddr,
  1185. i, &r->fp_ports_peers[i]);
  1186. r->fp_port[i] = port;
  1187. fp_port_set_world(port, r->world_dflt);
  1188. }
  1189. QLIST_INSERT_HEAD(&rockers, r, next);
  1190. return;
  1191. err_name_too_long:
  1192. err_duplicate:
  1193. rocker_msix_uninit(r);
  1194. err_msix_init:
  1195. object_unparent(OBJECT(&r->msix_bar));
  1196. object_unparent(OBJECT(&r->mmio));
  1197. err_world_type_by_name:
  1198. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1199. if (r->worlds[i]) {
  1200. world_free(r->worlds[i]);
  1201. }
  1202. }
  1203. }
  1204. static void pci_rocker_uninit(PCIDevice *dev)
  1205. {
  1206. Rocker *r = ROCKER(dev);
  1207. int i;
  1208. QLIST_REMOVE(r, next);
  1209. for (i = 0; i < r->fp_ports; i++) {
  1210. FpPort *port = r->fp_port[i];
  1211. fp_port_free(port);
  1212. r->fp_port[i] = NULL;
  1213. }
  1214. for (i = 0; i < rocker_pci_ring_count(r); i++) {
  1215. if (r->rings[i]) {
  1216. desc_ring_free(r->rings[i]);
  1217. }
  1218. }
  1219. g_free(r->rings);
  1220. rocker_msix_uninit(r);
  1221. object_unparent(OBJECT(&r->msix_bar));
  1222. object_unparent(OBJECT(&r->mmio));
  1223. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1224. if (r->worlds[i]) {
  1225. world_free(r->worlds[i]);
  1226. }
  1227. }
  1228. g_free(r->fp_ports_peers);
  1229. }
  1230. static void rocker_reset(DeviceState *dev)
  1231. {
  1232. Rocker *r = ROCKER(dev);
  1233. int i;
  1234. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1235. if (r->worlds[i]) {
  1236. world_reset(r->worlds[i]);
  1237. }
  1238. }
  1239. for (i = 0; i < r->fp_ports; i++) {
  1240. fp_port_reset(r->fp_port[i]);
  1241. fp_port_set_world(r->fp_port[i], r->world_dflt);
  1242. }
  1243. r->test_reg = 0;
  1244. r->test_reg64 = 0;
  1245. r->test_dma_addr = 0;
  1246. r->test_dma_size = 0;
  1247. for (i = 0; i < rocker_pci_ring_count(r); i++) {
  1248. desc_ring_reset(r->rings[i]);
  1249. }
  1250. DPRINTF("Reset done\n");
  1251. }
  1252. static Property rocker_properties[] = {
  1253. DEFINE_PROP_STRING("name", Rocker, name),
  1254. DEFINE_PROP_STRING("world", Rocker, world_name),
  1255. DEFINE_PROP_MACADDR("fp_start_macaddr", Rocker,
  1256. fp_start_macaddr),
  1257. DEFINE_PROP_UINT64("switch_id", Rocker,
  1258. switch_id, 0),
  1259. DEFINE_PROP_ARRAY("ports", Rocker, fp_ports,
  1260. fp_ports_peers, qdev_prop_netdev, NICPeers),
  1261. DEFINE_PROP_END_OF_LIST(),
  1262. };
  1263. static const VMStateDescription rocker_vmsd = {
  1264. .name = TYPE_ROCKER,
  1265. .unmigratable = 1,
  1266. };
  1267. static void rocker_class_init(ObjectClass *klass, void *data)
  1268. {
  1269. DeviceClass *dc = DEVICE_CLASS(klass);
  1270. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1271. k->realize = pci_rocker_realize;
  1272. k->exit = pci_rocker_uninit;
  1273. k->vendor_id = PCI_VENDOR_ID_REDHAT;
  1274. k->device_id = PCI_DEVICE_ID_REDHAT_ROCKER;
  1275. k->revision = ROCKER_PCI_REVISION;
  1276. k->class_id = PCI_CLASS_NETWORK_OTHER;
  1277. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1278. dc->desc = "Rocker Switch";
  1279. dc->reset = rocker_reset;
  1280. device_class_set_props(dc, rocker_properties);
  1281. dc->vmsd = &rocker_vmsd;
  1282. }
  1283. static const TypeInfo rocker_info = {
  1284. .name = TYPE_ROCKER,
  1285. .parent = TYPE_PCI_DEVICE,
  1286. .instance_size = sizeof(Rocker),
  1287. .class_init = rocker_class_init,
  1288. .interfaces = (InterfaceInfo[]) {
  1289. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1290. { },
  1291. },
  1292. };
  1293. static void rocker_register_types(void)
  1294. {
  1295. type_register_static(&rocker_info);
  1296. }
  1297. type_init(rocker_register_types)