2
0

rocker.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513
  1. /*
  2. * QEMU rocker switch emulation - PCI device
  3. *
  4. * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
  5. * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include "qemu/osdep.h"
  18. #include "hw/pci/pci_device.h"
  19. #include "hw/qdev-properties.h"
  20. #include "hw/qdev-properties-system.h"
  21. #include "migration/vmstate.h"
  22. #include "hw/pci/msix.h"
  23. #include "net/net.h"
  24. #include "net/eth.h"
  25. #include "qapi/error.h"
  26. #include "qapi/qapi-commands-rocker.h"
  27. #include "qemu/iov.h"
  28. #include "qemu/module.h"
  29. #include "qemu/bitops.h"
  30. #include "qemu/log.h"
  31. #include "rocker.h"
  32. #include "rocker_hw.h"
  33. #include "rocker_fp.h"
  34. #include "rocker_desc.h"
  35. #include "rocker_tlv.h"
  36. #include "rocker_world.h"
  37. #include "rocker_of_dpa.h"
  38. struct rocker {
  39. /* private */
  40. PCIDevice parent_obj;
  41. /* public */
  42. MemoryRegion mmio;
  43. MemoryRegion msix_bar;
  44. /* switch configuration */
  45. char *name; /* switch name */
  46. char *world_name; /* world name */
  47. uint32_t fp_ports; /* front-panel port count */
  48. NICPeers *fp_ports_peers;
  49. MACAddr fp_start_macaddr; /* front-panel port 0 mac addr */
  50. uint64_t switch_id; /* switch id */
  51. /* front-panel ports */
  52. FpPort *fp_port[ROCKER_FP_PORTS_MAX];
  53. /* register backings */
  54. uint32_t test_reg;
  55. uint64_t test_reg64;
  56. dma_addr_t test_dma_addr;
  57. uint32_t test_dma_size;
  58. uint64_t lower32; /* lower 32-bit val in 2-part 64-bit access */
  59. /* desc rings */
  60. DescRing **rings;
  61. /* switch worlds */
  62. World *worlds[ROCKER_WORLD_TYPE_MAX];
  63. World *world_dflt;
  64. QLIST_ENTRY(rocker) next;
  65. };
  66. static QLIST_HEAD(, rocker) rockers;
  67. Rocker *rocker_find(const char *name)
  68. {
  69. Rocker *r;
  70. QLIST_FOREACH(r, &rockers, next)
  71. if (strcmp(r->name, name) == 0) {
  72. return r;
  73. }
  74. return NULL;
  75. }
  76. World *rocker_get_world(Rocker *r, enum rocker_world_type type)
  77. {
  78. if (type < ROCKER_WORLD_TYPE_MAX) {
  79. return r->worlds[type];
  80. }
  81. return NULL;
  82. }
  83. RockerSwitch *qmp_query_rocker(const char *name, Error **errp)
  84. {
  85. RockerSwitch *rocker;
  86. Rocker *r;
  87. r = rocker_find(name);
  88. if (!r) {
  89. error_setg(errp, "rocker %s not found", name);
  90. return NULL;
  91. }
  92. rocker = g_new0(RockerSwitch, 1);
  93. rocker->name = g_strdup(r->name);
  94. rocker->id = r->switch_id;
  95. rocker->ports = r->fp_ports;
  96. return rocker;
  97. }
  98. RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp)
  99. {
  100. RockerPortList *list = NULL;
  101. Rocker *r;
  102. int i;
  103. r = rocker_find(name);
  104. if (!r) {
  105. error_setg(errp, "rocker %s not found", name);
  106. return NULL;
  107. }
  108. for (i = r->fp_ports - 1; i >= 0; i--) {
  109. QAPI_LIST_PREPEND(list, fp_port_get_info(r->fp_port[i]));
  110. }
  111. return list;
  112. }
  113. static uint32_t rocker_get_pport_by_tx_ring(Rocker *r,
  114. DescRing *ring)
  115. {
  116. return (desc_ring_index(ring) - 2) / 2 + 1;
  117. }
  118. static int tx_consume(Rocker *r, DescInfo *info)
  119. {
  120. PCIDevice *dev = PCI_DEVICE(r);
  121. char *buf = desc_get_buf(info, true);
  122. RockerTlv *tlv_frag;
  123. RockerTlv *tlvs[ROCKER_TLV_TX_MAX + 1];
  124. struct iovec iov[ROCKER_TX_FRAGS_MAX] = { { 0, }, };
  125. uint32_t pport;
  126. uint32_t port;
  127. uint16_t tx_offload = ROCKER_TX_OFFLOAD_NONE;
  128. uint16_t tx_l3_csum_off = 0;
  129. uint16_t tx_tso_mss = 0;
  130. uint16_t tx_tso_hdr_len = 0;
  131. int iovcnt = 0;
  132. int err = ROCKER_OK;
  133. int rem;
  134. int i;
  135. if (!buf) {
  136. return -ROCKER_ENXIO;
  137. }
  138. rocker_tlv_parse(tlvs, ROCKER_TLV_TX_MAX, buf, desc_tlv_size(info));
  139. if (!tlvs[ROCKER_TLV_TX_FRAGS]) {
  140. return -ROCKER_EINVAL;
  141. }
  142. pport = rocker_get_pport_by_tx_ring(r, desc_get_ring(info));
  143. if (!fp_port_from_pport(pport, &port)) {
  144. return -ROCKER_EINVAL;
  145. }
  146. if (tlvs[ROCKER_TLV_TX_OFFLOAD]) {
  147. tx_offload = rocker_tlv_get_u8(tlvs[ROCKER_TLV_TX_OFFLOAD]);
  148. }
  149. switch (tx_offload) {
  150. case ROCKER_TX_OFFLOAD_L3_CSUM:
  151. if (!tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) {
  152. return -ROCKER_EINVAL;
  153. }
  154. break;
  155. case ROCKER_TX_OFFLOAD_TSO:
  156. if (!tlvs[ROCKER_TLV_TX_TSO_MSS] ||
  157. !tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) {
  158. return -ROCKER_EINVAL;
  159. }
  160. break;
  161. }
  162. if (tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) {
  163. tx_l3_csum_off = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]);
  164. qemu_log_mask(LOG_UNIMP, "rocker %s: L3 not implemented"
  165. " (cksum off: %u)\n",
  166. __func__, tx_l3_csum_off);
  167. }
  168. if (tlvs[ROCKER_TLV_TX_TSO_MSS]) {
  169. tx_tso_mss = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_MSS]);
  170. qemu_log_mask(LOG_UNIMP, "rocker %s: TSO not implemented (MSS: %u)\n",
  171. __func__, tx_tso_mss);
  172. }
  173. if (tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) {
  174. tx_tso_hdr_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]);
  175. qemu_log_mask(LOG_UNIMP, "rocker %s: TSO not implemented"
  176. " (hdr length: %u)\n",
  177. __func__, tx_tso_hdr_len);
  178. }
  179. rocker_tlv_for_each_nested(tlv_frag, tlvs[ROCKER_TLV_TX_FRAGS], rem) {
  180. hwaddr frag_addr;
  181. uint16_t frag_len;
  182. if (rocker_tlv_type(tlv_frag) != ROCKER_TLV_TX_FRAG) {
  183. err = -ROCKER_EINVAL;
  184. goto err_bad_attr;
  185. }
  186. rocker_tlv_parse_nested(tlvs, ROCKER_TLV_TX_FRAG_ATTR_MAX, tlv_frag);
  187. if (!tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
  188. !tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) {
  189. err = -ROCKER_EINVAL;
  190. goto err_bad_attr;
  191. }
  192. frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
  193. frag_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
  194. if (iovcnt >= ROCKER_TX_FRAGS_MAX) {
  195. goto err_too_many_frags;
  196. }
  197. iov[iovcnt].iov_len = frag_len;
  198. iov[iovcnt].iov_base = g_malloc(frag_len);
  199. pci_dma_read(dev, frag_addr, iov[iovcnt].iov_base,
  200. iov[iovcnt].iov_len);
  201. iovcnt++;
  202. }
  203. err = fp_port_eg(r->fp_port[port], iov, iovcnt);
  204. err_too_many_frags:
  205. err_bad_attr:
  206. for (i = 0; i < ROCKER_TX_FRAGS_MAX; i++) {
  207. g_free(iov[i].iov_base);
  208. }
  209. return err;
  210. }
  211. static int cmd_get_port_settings(Rocker *r,
  212. DescInfo *info, char *buf,
  213. RockerTlv *cmd_info_tlv)
  214. {
  215. RockerTlv *tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
  216. RockerTlv *nest;
  217. FpPort *fp_port;
  218. uint32_t pport;
  219. uint32_t port;
  220. uint32_t speed;
  221. uint8_t duplex;
  222. uint8_t autoneg;
  223. uint8_t learning;
  224. char *phys_name;
  225. MACAddr macaddr;
  226. enum rocker_world_type mode;
  227. size_t tlv_size;
  228. int pos;
  229. int err;
  230. rocker_tlv_parse_nested(tlvs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
  231. cmd_info_tlv);
  232. if (!tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]) {
  233. return -ROCKER_EINVAL;
  234. }
  235. pport = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]);
  236. if (!fp_port_from_pport(pport, &port)) {
  237. return -ROCKER_EINVAL;
  238. }
  239. fp_port = r->fp_port[port];
  240. err = fp_port_get_settings(fp_port, &speed, &duplex, &autoneg);
  241. if (err) {
  242. return err;
  243. }
  244. fp_port_get_macaddr(fp_port, &macaddr);
  245. mode = world_type(fp_port_get_world(fp_port));
  246. learning = fp_port_get_learning(fp_port);
  247. phys_name = fp_port_get_name(fp_port);
  248. tlv_size = rocker_tlv_total_size(0) + /* nest */
  249. rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
  250. rocker_tlv_total_size(sizeof(uint32_t)) + /* speed */
  251. rocker_tlv_total_size(sizeof(uint8_t)) + /* duplex */
  252. rocker_tlv_total_size(sizeof(uint8_t)) + /* autoneg */
  253. rocker_tlv_total_size(sizeof(macaddr.a)) + /* macaddr */
  254. rocker_tlv_total_size(sizeof(uint8_t)) + /* mode */
  255. rocker_tlv_total_size(sizeof(uint8_t)) + /* learning */
  256. rocker_tlv_total_size(strlen(phys_name));
  257. if (tlv_size > desc_buf_size(info)) {
  258. return -ROCKER_EMSGSIZE;
  259. }
  260. pos = 0;
  261. nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_CMD_INFO);
  262. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, pport);
  263. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, speed);
  264. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, duplex);
  265. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, autoneg);
  266. rocker_tlv_put(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
  267. sizeof(macaddr.a), macaddr.a);
  268. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_MODE, mode);
  269. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
  270. learning);
  271. rocker_tlv_put(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME,
  272. strlen(phys_name), phys_name);
  273. rocker_tlv_nest_end(buf, &pos, nest);
  274. return desc_set_buf(info, tlv_size);
  275. }
  276. static int cmd_set_port_settings(Rocker *r,
  277. RockerTlv *cmd_info_tlv)
  278. {
  279. RockerTlv *tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
  280. FpPort *fp_port;
  281. uint32_t pport;
  282. uint32_t port;
  283. uint32_t speed;
  284. uint8_t duplex;
  285. uint8_t autoneg;
  286. uint8_t learning;
  287. MACAddr macaddr;
  288. enum rocker_world_type mode;
  289. int err;
  290. rocker_tlv_parse_nested(tlvs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
  291. cmd_info_tlv);
  292. if (!tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]) {
  293. return -ROCKER_EINVAL;
  294. }
  295. pport = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]);
  296. if (!fp_port_from_pport(pport, &port)) {
  297. return -ROCKER_EINVAL;
  298. }
  299. fp_port = r->fp_port[port];
  300. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] &&
  301. tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] &&
  302. tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) {
  303. speed = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
  304. duplex = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
  305. autoneg = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
  306. err = fp_port_set_settings(fp_port, speed, duplex, autoneg);
  307. if (err) {
  308. return err;
  309. }
  310. }
  311. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]) {
  312. if (rocker_tlv_len(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]) !=
  313. sizeof(macaddr.a)) {
  314. return -ROCKER_EINVAL;
  315. }
  316. memcpy(macaddr.a,
  317. rocker_tlv_data(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]),
  318. sizeof(macaddr.a));
  319. fp_port_set_macaddr(fp_port, &macaddr);
  320. }
  321. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]) {
  322. mode = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
  323. if (mode >= ROCKER_WORLD_TYPE_MAX) {
  324. return -ROCKER_EINVAL;
  325. }
  326. /* We don't support world change. */
  327. if (!fp_port_check_world(fp_port, r->worlds[mode])) {
  328. return -ROCKER_EINVAL;
  329. }
  330. }
  331. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING]) {
  332. learning =
  333. rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING]);
  334. fp_port_set_learning(fp_port, learning);
  335. }
  336. return ROCKER_OK;
  337. }
  338. static int cmd_consume(Rocker *r, DescInfo *info)
  339. {
  340. char *buf = desc_get_buf(info, false);
  341. RockerTlv *tlvs[ROCKER_TLV_CMD_MAX + 1];
  342. RockerTlv *info_tlv;
  343. World *world;
  344. uint16_t cmd;
  345. int err;
  346. if (!buf) {
  347. return -ROCKER_ENXIO;
  348. }
  349. rocker_tlv_parse(tlvs, ROCKER_TLV_CMD_MAX, buf, desc_tlv_size(info));
  350. if (!tlvs[ROCKER_TLV_CMD_TYPE] || !tlvs[ROCKER_TLV_CMD_INFO]) {
  351. return -ROCKER_EINVAL;
  352. }
  353. cmd = rocker_tlv_get_le16(tlvs[ROCKER_TLV_CMD_TYPE]);
  354. info_tlv = tlvs[ROCKER_TLV_CMD_INFO];
  355. /* This might be reworked to something like this:
  356. * Every world will have an array of command handlers from
  357. * ROCKER_TLV_CMD_TYPE_UNSPEC to ROCKER_TLV_CMD_TYPE_MAX. There is
  358. * up to each world to implement whatever command it want.
  359. * It can reference "generic" commands as cmd_set_port_settings or
  360. * cmd_get_port_settings
  361. */
  362. switch (cmd) {
  363. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
  364. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
  365. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
  366. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
  367. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
  368. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
  369. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
  370. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
  371. world = r->worlds[ROCKER_WORLD_TYPE_OF_DPA];
  372. err = world_do_cmd(world, info, buf, cmd, info_tlv);
  373. break;
  374. case ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS:
  375. err = cmd_get_port_settings(r, info, buf, info_tlv);
  376. break;
  377. case ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS:
  378. err = cmd_set_port_settings(r, info_tlv);
  379. break;
  380. default:
  381. err = -ROCKER_EINVAL;
  382. break;
  383. }
  384. return err;
  385. }
  386. static void rocker_msix_irq(Rocker *r, unsigned vector)
  387. {
  388. PCIDevice *dev = PCI_DEVICE(r);
  389. DPRINTF("MSI-X notify request for vector %d\n", vector);
  390. if (vector >= ROCKER_MSIX_VEC_COUNT(r->fp_ports)) {
  391. DPRINTF("incorrect vector %d\n", vector);
  392. return;
  393. }
  394. msix_notify(dev, vector);
  395. }
  396. int rocker_event_link_changed(Rocker *r, uint32_t pport, bool link_up)
  397. {
  398. DescRing *ring = r->rings[ROCKER_RING_EVENT];
  399. DescInfo *info = desc_ring_fetch_desc(ring);
  400. RockerTlv *nest;
  401. char *buf;
  402. size_t tlv_size;
  403. int pos;
  404. int err;
  405. if (!info) {
  406. return -ROCKER_ENOBUFS;
  407. }
  408. tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */
  409. rocker_tlv_total_size(0) + /* nest */
  410. rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
  411. rocker_tlv_total_size(sizeof(uint8_t)); /* link up */
  412. if (tlv_size > desc_buf_size(info)) {
  413. err = -ROCKER_EMSGSIZE;
  414. goto err_too_big;
  415. }
  416. buf = desc_get_buf(info, false);
  417. if (!buf) {
  418. err = -ROCKER_ENOMEM;
  419. goto err_no_mem;
  420. }
  421. pos = 0;
  422. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_TYPE,
  423. ROCKER_TLV_EVENT_TYPE_LINK_CHANGED);
  424. nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_EVENT_INFO);
  425. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, pport);
  426. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP,
  427. link_up ? 1 : 0);
  428. rocker_tlv_nest_end(buf, &pos, nest);
  429. err = desc_set_buf(info, tlv_size);
  430. err_too_big:
  431. err_no_mem:
  432. if (desc_ring_post_desc(ring, err)) {
  433. rocker_msix_irq(r, ROCKER_MSIX_VEC_EVENT);
  434. }
  435. return err;
  436. }
  437. int rocker_event_mac_vlan_seen(Rocker *r, uint32_t pport, uint8_t *addr,
  438. uint16_t vlan_id)
  439. {
  440. DescRing *ring = r->rings[ROCKER_RING_EVENT];
  441. DescInfo *info;
  442. FpPort *fp_port;
  443. uint32_t port;
  444. RockerTlv *nest;
  445. char *buf;
  446. size_t tlv_size;
  447. int pos;
  448. int err;
  449. if (!fp_port_from_pport(pport, &port)) {
  450. return -ROCKER_EINVAL;
  451. }
  452. fp_port = r->fp_port[port];
  453. if (!fp_port_get_learning(fp_port)) {
  454. return ROCKER_OK;
  455. }
  456. info = desc_ring_fetch_desc(ring);
  457. if (!info) {
  458. return -ROCKER_ENOBUFS;
  459. }
  460. tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */
  461. rocker_tlv_total_size(0) + /* nest */
  462. rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
  463. rocker_tlv_total_size(ETH_ALEN) + /* mac addr */
  464. rocker_tlv_total_size(sizeof(uint16_t)); /* vlan_id */
  465. if (tlv_size > desc_buf_size(info)) {
  466. err = -ROCKER_EMSGSIZE;
  467. goto err_too_big;
  468. }
  469. buf = desc_get_buf(info, false);
  470. if (!buf) {
  471. err = -ROCKER_ENOMEM;
  472. goto err_no_mem;
  473. }
  474. pos = 0;
  475. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_TYPE,
  476. ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN);
  477. nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_EVENT_INFO);
  478. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_PPORT, pport);
  479. rocker_tlv_put(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_MAC, ETH_ALEN, addr);
  480. rocker_tlv_put_u16(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, vlan_id);
  481. rocker_tlv_nest_end(buf, &pos, nest);
  482. err = desc_set_buf(info, tlv_size);
  483. err_too_big:
  484. err_no_mem:
  485. if (desc_ring_post_desc(ring, err)) {
  486. rocker_msix_irq(r, ROCKER_MSIX_VEC_EVENT);
  487. }
  488. return err;
  489. }
  490. static DescRing *rocker_get_rx_ring_by_pport(Rocker *r,
  491. uint32_t pport)
  492. {
  493. return r->rings[(pport - 1) * 2 + 3];
  494. }
  495. int rx_produce(World *world, uint32_t pport,
  496. const struct iovec *iov, int iovcnt, uint8_t copy_to_cpu)
  497. {
  498. Rocker *r = world_rocker(world);
  499. PCIDevice *dev = (PCIDevice *)r;
  500. DescRing *ring = rocker_get_rx_ring_by_pport(r, pport);
  501. DescInfo *info = desc_ring_fetch_desc(ring);
  502. char *data;
  503. size_t data_size = iov_size(iov, iovcnt);
  504. char *buf;
  505. uint16_t rx_flags = 0;
  506. uint16_t rx_csum = 0;
  507. size_t tlv_size;
  508. RockerTlv *tlvs[ROCKER_TLV_RX_MAX + 1];
  509. hwaddr frag_addr;
  510. uint16_t frag_max_len;
  511. int pos;
  512. int err;
  513. if (!info) {
  514. return -ROCKER_ENOBUFS;
  515. }
  516. buf = desc_get_buf(info, false);
  517. if (!buf) {
  518. err = -ROCKER_ENXIO;
  519. goto out;
  520. }
  521. rocker_tlv_parse(tlvs, ROCKER_TLV_RX_MAX, buf, desc_tlv_size(info));
  522. if (!tlvs[ROCKER_TLV_RX_FRAG_ADDR] ||
  523. !tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]) {
  524. err = -ROCKER_EINVAL;
  525. goto out;
  526. }
  527. frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_RX_FRAG_ADDR]);
  528. frag_max_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
  529. if (data_size > frag_max_len) {
  530. err = -ROCKER_EMSGSIZE;
  531. goto out;
  532. }
  533. if (copy_to_cpu) {
  534. rx_flags |= ROCKER_RX_FLAGS_FWD_OFFLOAD;
  535. }
  536. /* XXX calc rx flags/csum */
  537. tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* flags */
  538. rocker_tlv_total_size(sizeof(uint16_t)) + /* scum */
  539. rocker_tlv_total_size(sizeof(uint64_t)) + /* frag addr */
  540. rocker_tlv_total_size(sizeof(uint16_t)) + /* frag max len */
  541. rocker_tlv_total_size(sizeof(uint16_t)); /* frag len */
  542. if (tlv_size > desc_buf_size(info)) {
  543. err = -ROCKER_EMSGSIZE;
  544. goto out;
  545. }
  546. /* TODO:
  547. * iov dma write can be optimized in similar way e1000 does it in
  548. * e1000_receive_iov. But maybe if would make sense to introduce
  549. * generic helper iov_dma_write.
  550. */
  551. data = g_malloc(data_size);
  552. iov_to_buf(iov, iovcnt, 0, data, data_size);
  553. pci_dma_write(dev, frag_addr, data, data_size);
  554. g_free(data);
  555. pos = 0;
  556. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FLAGS, rx_flags);
  557. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_CSUM, rx_csum);
  558. rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_RX_FRAG_ADDR, frag_addr);
  559. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_MAX_LEN, frag_max_len);
  560. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_LEN, data_size);
  561. err = desc_set_buf(info, tlv_size);
  562. out:
  563. if (desc_ring_post_desc(ring, err)) {
  564. rocker_msix_irq(r, ROCKER_MSIX_VEC_RX(pport - 1));
  565. }
  566. return err;
  567. }
  568. int rocker_port_eg(Rocker *r, uint32_t pport,
  569. const struct iovec *iov, int iovcnt)
  570. {
  571. FpPort *fp_port;
  572. uint32_t port;
  573. if (!fp_port_from_pport(pport, &port)) {
  574. return -ROCKER_EINVAL;
  575. }
  576. fp_port = r->fp_port[port];
  577. return fp_port_eg(fp_port, iov, iovcnt);
  578. }
  579. static void rocker_test_dma_ctrl(Rocker *r, uint32_t val)
  580. {
  581. PCIDevice *dev = PCI_DEVICE(r);
  582. char *buf;
  583. int i;
  584. buf = g_malloc(r->test_dma_size);
  585. switch (val) {
  586. case ROCKER_TEST_DMA_CTRL_CLEAR:
  587. memset(buf, 0, r->test_dma_size);
  588. break;
  589. case ROCKER_TEST_DMA_CTRL_FILL:
  590. memset(buf, 0x96, r->test_dma_size);
  591. break;
  592. case ROCKER_TEST_DMA_CTRL_INVERT:
  593. pci_dma_read(dev, r->test_dma_addr, buf, r->test_dma_size);
  594. for (i = 0; i < r->test_dma_size; i++) {
  595. buf[i] = ~buf[i];
  596. }
  597. break;
  598. default:
  599. DPRINTF("not test dma control val=0x%08x\n", val);
  600. goto err_out;
  601. }
  602. pci_dma_write(dev, r->test_dma_addr, buf, r->test_dma_size);
  603. rocker_msix_irq(r, ROCKER_MSIX_VEC_TEST);
  604. err_out:
  605. g_free(buf);
  606. }
  607. static void rocker_reset(DeviceState *dev);
  608. static void rocker_control(Rocker *r, uint32_t val)
  609. {
  610. if (val & ROCKER_CONTROL_RESET) {
  611. rocker_reset(DEVICE(r));
  612. }
  613. }
  614. static int rocker_pci_ring_count(Rocker *r)
  615. {
  616. /* There are:
  617. * - command ring
  618. * - event ring
  619. * - tx and rx ring per each port
  620. */
  621. return 2 + (2 * r->fp_ports);
  622. }
  623. static bool rocker_addr_is_desc_reg(Rocker *r, hwaddr addr)
  624. {
  625. hwaddr start = ROCKER_DMA_DESC_BASE;
  626. hwaddr end = start + (ROCKER_DMA_DESC_SIZE * rocker_pci_ring_count(r));
  627. return addr >= start && addr < end;
  628. }
  629. static void rocker_port_phys_enable_write(Rocker *r, uint64_t new)
  630. {
  631. int i;
  632. bool old_enabled;
  633. bool new_enabled;
  634. FpPort *fp_port;
  635. for (i = 0; i < r->fp_ports; i++) {
  636. fp_port = r->fp_port[i];
  637. old_enabled = fp_port_enabled(fp_port);
  638. new_enabled = (new >> (i + 1)) & 0x1;
  639. if (new_enabled == old_enabled) {
  640. continue;
  641. }
  642. if (new_enabled) {
  643. fp_port_enable(r->fp_port[i]);
  644. } else {
  645. fp_port_disable(r->fp_port[i]);
  646. }
  647. }
  648. }
  649. static void rocker_io_writel(void *opaque, hwaddr addr, uint32_t val)
  650. {
  651. Rocker *r = opaque;
  652. if (rocker_addr_is_desc_reg(r, addr)) {
  653. unsigned index = ROCKER_RING_INDEX(addr);
  654. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  655. switch (offset) {
  656. case ROCKER_DMA_DESC_ADDR_OFFSET:
  657. r->lower32 = (uint64_t)val;
  658. break;
  659. case ROCKER_DMA_DESC_ADDR_OFFSET + 4:
  660. desc_ring_set_base_addr(r->rings[index],
  661. ((uint64_t)val) << 32 | r->lower32);
  662. r->lower32 = 0;
  663. break;
  664. case ROCKER_DMA_DESC_SIZE_OFFSET:
  665. desc_ring_set_size(r->rings[index], val);
  666. break;
  667. case ROCKER_DMA_DESC_HEAD_OFFSET:
  668. if (desc_ring_set_head(r->rings[index], val)) {
  669. rocker_msix_irq(r, desc_ring_get_msix_vector(r->rings[index]));
  670. }
  671. break;
  672. case ROCKER_DMA_DESC_CTRL_OFFSET:
  673. desc_ring_set_ctrl(r->rings[index], val);
  674. break;
  675. case ROCKER_DMA_DESC_CREDITS_OFFSET:
  676. if (desc_ring_ret_credits(r->rings[index], val)) {
  677. rocker_msix_irq(r, desc_ring_get_msix_vector(r->rings[index]));
  678. }
  679. break;
  680. default:
  681. DPRINTF("not implemented dma reg write(l) addr=0x" HWADDR_FMT_plx
  682. " val=0x%08x (ring %d, addr=0x%02x)\n",
  683. addr, val, index, offset);
  684. break;
  685. }
  686. return;
  687. }
  688. switch (addr) {
  689. case ROCKER_TEST_REG:
  690. r->test_reg = val;
  691. break;
  692. case ROCKER_TEST_REG64:
  693. case ROCKER_TEST_DMA_ADDR:
  694. case ROCKER_PORT_PHYS_ENABLE:
  695. r->lower32 = (uint64_t)val;
  696. break;
  697. case ROCKER_TEST_REG64 + 4:
  698. r->test_reg64 = ((uint64_t)val) << 32 | r->lower32;
  699. r->lower32 = 0;
  700. break;
  701. case ROCKER_TEST_IRQ:
  702. rocker_msix_irq(r, val);
  703. break;
  704. case ROCKER_TEST_DMA_SIZE:
  705. r->test_dma_size = val & 0xFFFF;
  706. break;
  707. case ROCKER_TEST_DMA_ADDR + 4:
  708. r->test_dma_addr = ((uint64_t)val) << 32 | r->lower32;
  709. r->lower32 = 0;
  710. break;
  711. case ROCKER_TEST_DMA_CTRL:
  712. rocker_test_dma_ctrl(r, val);
  713. break;
  714. case ROCKER_CONTROL:
  715. rocker_control(r, val);
  716. break;
  717. case ROCKER_PORT_PHYS_ENABLE + 4:
  718. rocker_port_phys_enable_write(r, ((uint64_t)val) << 32 | r->lower32);
  719. r->lower32 = 0;
  720. break;
  721. default:
  722. DPRINTF("not implemented write(l) addr=0x" HWADDR_FMT_plx
  723. " val=0x%08x\n", addr, val);
  724. break;
  725. }
  726. }
  727. static void rocker_io_writeq(void *opaque, hwaddr addr, uint64_t val)
  728. {
  729. Rocker *r = opaque;
  730. if (rocker_addr_is_desc_reg(r, addr)) {
  731. unsigned index = ROCKER_RING_INDEX(addr);
  732. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  733. switch (offset) {
  734. case ROCKER_DMA_DESC_ADDR_OFFSET:
  735. desc_ring_set_base_addr(r->rings[index], val);
  736. break;
  737. default:
  738. DPRINTF("not implemented dma reg write(q) addr=0x" HWADDR_FMT_plx
  739. " val=0x" HWADDR_FMT_plx " (ring %d, offset=0x%02x)\n",
  740. addr, val, index, offset);
  741. break;
  742. }
  743. return;
  744. }
  745. switch (addr) {
  746. case ROCKER_TEST_REG64:
  747. r->test_reg64 = val;
  748. break;
  749. case ROCKER_TEST_DMA_ADDR:
  750. r->test_dma_addr = val;
  751. break;
  752. case ROCKER_PORT_PHYS_ENABLE:
  753. rocker_port_phys_enable_write(r, val);
  754. break;
  755. default:
  756. DPRINTF("not implemented write(q) addr=0x" HWADDR_FMT_plx
  757. " val=0x" HWADDR_FMT_plx "\n", addr, val);
  758. break;
  759. }
  760. }
  761. #ifdef DEBUG_ROCKER
  762. #define regname(reg) case (reg): return #reg
  763. static const char *rocker_reg_name(void *opaque, hwaddr addr)
  764. {
  765. Rocker *r = opaque;
  766. if (rocker_addr_is_desc_reg(r, addr)) {
  767. unsigned index = ROCKER_RING_INDEX(addr);
  768. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  769. static char buf[100];
  770. char ring_name[10];
  771. switch (index) {
  772. case 0:
  773. sprintf(ring_name, "cmd");
  774. break;
  775. case 1:
  776. sprintf(ring_name, "event");
  777. break;
  778. default:
  779. sprintf(ring_name, "%s-%d", index % 2 ? "rx" : "tx",
  780. (index - 2) / 2);
  781. }
  782. switch (offset) {
  783. case ROCKER_DMA_DESC_ADDR_OFFSET:
  784. sprintf(buf, "Ring[%s] ADDR", ring_name);
  785. return buf;
  786. case ROCKER_DMA_DESC_ADDR_OFFSET+4:
  787. sprintf(buf, "Ring[%s] ADDR+4", ring_name);
  788. return buf;
  789. case ROCKER_DMA_DESC_SIZE_OFFSET:
  790. sprintf(buf, "Ring[%s] SIZE", ring_name);
  791. return buf;
  792. case ROCKER_DMA_DESC_HEAD_OFFSET:
  793. sprintf(buf, "Ring[%s] HEAD", ring_name);
  794. return buf;
  795. case ROCKER_DMA_DESC_TAIL_OFFSET:
  796. sprintf(buf, "Ring[%s] TAIL", ring_name);
  797. return buf;
  798. case ROCKER_DMA_DESC_CTRL_OFFSET:
  799. sprintf(buf, "Ring[%s] CTRL", ring_name);
  800. return buf;
  801. case ROCKER_DMA_DESC_CREDITS_OFFSET:
  802. sprintf(buf, "Ring[%s] CREDITS", ring_name);
  803. return buf;
  804. default:
  805. sprintf(buf, "Ring[%s] ???", ring_name);
  806. return buf;
  807. }
  808. } else {
  809. switch (addr) {
  810. regname(ROCKER_BOGUS_REG0);
  811. regname(ROCKER_BOGUS_REG1);
  812. regname(ROCKER_BOGUS_REG2);
  813. regname(ROCKER_BOGUS_REG3);
  814. regname(ROCKER_TEST_REG);
  815. regname(ROCKER_TEST_REG64);
  816. regname(ROCKER_TEST_REG64+4);
  817. regname(ROCKER_TEST_IRQ);
  818. regname(ROCKER_TEST_DMA_ADDR);
  819. regname(ROCKER_TEST_DMA_ADDR+4);
  820. regname(ROCKER_TEST_DMA_SIZE);
  821. regname(ROCKER_TEST_DMA_CTRL);
  822. regname(ROCKER_CONTROL);
  823. regname(ROCKER_PORT_PHYS_COUNT);
  824. regname(ROCKER_PORT_PHYS_LINK_STATUS);
  825. regname(ROCKER_PORT_PHYS_LINK_STATUS+4);
  826. regname(ROCKER_PORT_PHYS_ENABLE);
  827. regname(ROCKER_PORT_PHYS_ENABLE+4);
  828. regname(ROCKER_SWITCH_ID);
  829. regname(ROCKER_SWITCH_ID+4);
  830. }
  831. }
  832. return "???";
  833. }
  834. #else
  835. static const char *rocker_reg_name(void *opaque, hwaddr addr)
  836. {
  837. return NULL;
  838. }
  839. #endif
  840. static void rocker_mmio_write(void *opaque, hwaddr addr, uint64_t val,
  841. unsigned size)
  842. {
  843. DPRINTF("Write %s addr " HWADDR_FMT_plx
  844. ", size %u, val " HWADDR_FMT_plx "\n",
  845. rocker_reg_name(opaque, addr), addr, size, val);
  846. switch (size) {
  847. case 4:
  848. rocker_io_writel(opaque, addr, val);
  849. break;
  850. case 8:
  851. rocker_io_writeq(opaque, addr, val);
  852. break;
  853. }
  854. }
  855. static uint64_t rocker_port_phys_link_status(Rocker *r)
  856. {
  857. int i;
  858. uint64_t status = 0;
  859. for (i = 0; i < r->fp_ports; i++) {
  860. FpPort *port = r->fp_port[i];
  861. if (fp_port_get_link_up(port)) {
  862. status |= 1ULL << (i + 1);
  863. }
  864. }
  865. return status;
  866. }
  867. static uint64_t rocker_port_phys_enable_read(Rocker *r)
  868. {
  869. int i;
  870. uint64_t ret = 0;
  871. for (i = 0; i < r->fp_ports; i++) {
  872. FpPort *port = r->fp_port[i];
  873. if (fp_port_enabled(port)) {
  874. ret |= 1ULL << (i + 1);
  875. }
  876. }
  877. return ret;
  878. }
  879. static uint32_t rocker_io_readl(void *opaque, hwaddr addr)
  880. {
  881. Rocker *r = opaque;
  882. uint32_t ret;
  883. if (rocker_addr_is_desc_reg(r, addr)) {
  884. unsigned index = ROCKER_RING_INDEX(addr);
  885. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  886. switch (offset) {
  887. case ROCKER_DMA_DESC_ADDR_OFFSET:
  888. ret = (uint32_t)desc_ring_get_base_addr(r->rings[index]);
  889. break;
  890. case ROCKER_DMA_DESC_ADDR_OFFSET + 4:
  891. ret = (uint32_t)(desc_ring_get_base_addr(r->rings[index]) >> 32);
  892. break;
  893. case ROCKER_DMA_DESC_SIZE_OFFSET:
  894. ret = desc_ring_get_size(r->rings[index]);
  895. break;
  896. case ROCKER_DMA_DESC_HEAD_OFFSET:
  897. ret = desc_ring_get_head(r->rings[index]);
  898. break;
  899. case ROCKER_DMA_DESC_TAIL_OFFSET:
  900. ret = desc_ring_get_tail(r->rings[index]);
  901. break;
  902. case ROCKER_DMA_DESC_CREDITS_OFFSET:
  903. ret = desc_ring_get_credits(r->rings[index]);
  904. break;
  905. default:
  906. DPRINTF("not implemented dma reg read(l) addr=0x" HWADDR_FMT_plx
  907. " (ring %d, addr=0x%02x)\n", addr, index, offset);
  908. ret = 0;
  909. break;
  910. }
  911. return ret;
  912. }
  913. switch (addr) {
  914. case ROCKER_BOGUS_REG0:
  915. case ROCKER_BOGUS_REG1:
  916. case ROCKER_BOGUS_REG2:
  917. case ROCKER_BOGUS_REG3:
  918. ret = 0xDEADBABE;
  919. break;
  920. case ROCKER_TEST_REG:
  921. ret = r->test_reg * 2;
  922. break;
  923. case ROCKER_TEST_REG64:
  924. ret = (uint32_t)(r->test_reg64 * 2);
  925. break;
  926. case ROCKER_TEST_REG64 + 4:
  927. ret = (uint32_t)((r->test_reg64 * 2) >> 32);
  928. break;
  929. case ROCKER_TEST_DMA_SIZE:
  930. ret = r->test_dma_size;
  931. break;
  932. case ROCKER_TEST_DMA_ADDR:
  933. ret = (uint32_t)r->test_dma_addr;
  934. break;
  935. case ROCKER_TEST_DMA_ADDR + 4:
  936. ret = (uint32_t)(r->test_dma_addr >> 32);
  937. break;
  938. case ROCKER_PORT_PHYS_COUNT:
  939. ret = r->fp_ports;
  940. break;
  941. case ROCKER_PORT_PHYS_LINK_STATUS:
  942. ret = (uint32_t)rocker_port_phys_link_status(r);
  943. break;
  944. case ROCKER_PORT_PHYS_LINK_STATUS + 4:
  945. ret = (uint32_t)(rocker_port_phys_link_status(r) >> 32);
  946. break;
  947. case ROCKER_PORT_PHYS_ENABLE:
  948. ret = (uint32_t)rocker_port_phys_enable_read(r);
  949. break;
  950. case ROCKER_PORT_PHYS_ENABLE + 4:
  951. ret = (uint32_t)(rocker_port_phys_enable_read(r) >> 32);
  952. break;
  953. case ROCKER_SWITCH_ID:
  954. ret = (uint32_t)r->switch_id;
  955. break;
  956. case ROCKER_SWITCH_ID + 4:
  957. ret = (uint32_t)(r->switch_id >> 32);
  958. break;
  959. default:
  960. DPRINTF("not implemented read(l) addr=0x" HWADDR_FMT_plx "\n", addr);
  961. ret = 0;
  962. break;
  963. }
  964. return ret;
  965. }
  966. static uint64_t rocker_io_readq(void *opaque, hwaddr addr)
  967. {
  968. Rocker *r = opaque;
  969. uint64_t ret;
  970. if (rocker_addr_is_desc_reg(r, addr)) {
  971. unsigned index = ROCKER_RING_INDEX(addr);
  972. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  973. switch (addr & ROCKER_DMA_DESC_MASK) {
  974. case ROCKER_DMA_DESC_ADDR_OFFSET:
  975. ret = desc_ring_get_base_addr(r->rings[index]);
  976. break;
  977. default:
  978. DPRINTF("not implemented dma reg read(q) addr=0x" HWADDR_FMT_plx
  979. " (ring %d, addr=0x%02x)\n", addr, index, offset);
  980. ret = 0;
  981. break;
  982. }
  983. return ret;
  984. }
  985. switch (addr) {
  986. case ROCKER_BOGUS_REG0:
  987. case ROCKER_BOGUS_REG2:
  988. ret = 0xDEADBABEDEADBABEULL;
  989. break;
  990. case ROCKER_TEST_REG64:
  991. ret = r->test_reg64 * 2;
  992. break;
  993. case ROCKER_TEST_DMA_ADDR:
  994. ret = r->test_dma_addr;
  995. break;
  996. case ROCKER_PORT_PHYS_LINK_STATUS:
  997. ret = rocker_port_phys_link_status(r);
  998. break;
  999. case ROCKER_PORT_PHYS_ENABLE:
  1000. ret = rocker_port_phys_enable_read(r);
  1001. break;
  1002. case ROCKER_SWITCH_ID:
  1003. ret = r->switch_id;
  1004. break;
  1005. default:
  1006. DPRINTF("not implemented read(q) addr=0x" HWADDR_FMT_plx "\n", addr);
  1007. ret = 0;
  1008. break;
  1009. }
  1010. return ret;
  1011. }
  1012. static uint64_t rocker_mmio_read(void *opaque, hwaddr addr, unsigned size)
  1013. {
  1014. DPRINTF("Read %s addr " HWADDR_FMT_plx ", size %u\n",
  1015. rocker_reg_name(opaque, addr), addr, size);
  1016. switch (size) {
  1017. case 4:
  1018. return rocker_io_readl(opaque, addr);
  1019. case 8:
  1020. return rocker_io_readq(opaque, addr);
  1021. }
  1022. return -1;
  1023. }
  1024. static const MemoryRegionOps rocker_mmio_ops = {
  1025. .read = rocker_mmio_read,
  1026. .write = rocker_mmio_write,
  1027. .endianness = DEVICE_LITTLE_ENDIAN,
  1028. .valid = {
  1029. .min_access_size = 4,
  1030. .max_access_size = 8,
  1031. },
  1032. .impl = {
  1033. .min_access_size = 4,
  1034. .max_access_size = 8,
  1035. },
  1036. };
  1037. static void rocker_msix_vectors_unuse(Rocker *r,
  1038. unsigned int num_vectors)
  1039. {
  1040. PCIDevice *dev = PCI_DEVICE(r);
  1041. int i;
  1042. for (i = 0; i < num_vectors; i++) {
  1043. msix_vector_unuse(dev, i);
  1044. }
  1045. }
  1046. static void rocker_msix_vectors_use(Rocker *r, unsigned int num_vectors)
  1047. {
  1048. PCIDevice *dev = PCI_DEVICE(r);
  1049. int i;
  1050. for (i = 0; i < num_vectors; i++) {
  1051. msix_vector_use(dev, i);
  1052. }
  1053. }
  1054. static int rocker_msix_init(Rocker *r, Error **errp)
  1055. {
  1056. PCIDevice *dev = PCI_DEVICE(r);
  1057. int err;
  1058. err = msix_init(dev, ROCKER_MSIX_VEC_COUNT(r->fp_ports),
  1059. &r->msix_bar,
  1060. ROCKER_PCI_MSIX_BAR_IDX, ROCKER_PCI_MSIX_TABLE_OFFSET,
  1061. &r->msix_bar,
  1062. ROCKER_PCI_MSIX_BAR_IDX, ROCKER_PCI_MSIX_PBA_OFFSET,
  1063. 0, errp);
  1064. if (err) {
  1065. return err;
  1066. }
  1067. rocker_msix_vectors_use(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports));
  1068. return 0;
  1069. }
  1070. static void rocker_msix_uninit(Rocker *r)
  1071. {
  1072. PCIDevice *dev = PCI_DEVICE(r);
  1073. msix_uninit(dev, &r->msix_bar, &r->msix_bar);
  1074. rocker_msix_vectors_unuse(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports));
  1075. }
  1076. static World *rocker_world_type_by_name(Rocker *r, const char *name)
  1077. {
  1078. int i;
  1079. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1080. if (strcmp(name, world_name(r->worlds[i])) == 0) {
  1081. return r->worlds[i];
  1082. }
  1083. }
  1084. return NULL;
  1085. }
  1086. static void pci_rocker_realize(PCIDevice *dev, Error **errp)
  1087. {
  1088. Rocker *r = ROCKER(dev);
  1089. const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } };
  1090. const MACAddr dflt = { .a = { 0x52, 0x54, 0x00, 0x12, 0x35, 0x01 } };
  1091. static int sw_index;
  1092. int i, err = 0;
  1093. /* allocate worlds */
  1094. r->worlds[ROCKER_WORLD_TYPE_OF_DPA] = of_dpa_world_alloc(r);
  1095. if (!r->world_name) {
  1096. r->world_name = g_strdup(world_name(r->worlds[ROCKER_WORLD_TYPE_OF_DPA]));
  1097. }
  1098. r->world_dflt = rocker_world_type_by_name(r, r->world_name);
  1099. if (!r->world_dflt) {
  1100. error_setg(errp,
  1101. "invalid argument requested world %s does not exist",
  1102. r->world_name);
  1103. goto err_world_type_by_name;
  1104. }
  1105. /* set up memory-mapped region at BAR0 */
  1106. memory_region_init_io(&r->mmio, OBJECT(r), &rocker_mmio_ops, r,
  1107. "rocker-mmio", ROCKER_PCI_BAR0_SIZE);
  1108. pci_register_bar(dev, ROCKER_PCI_BAR0_IDX,
  1109. PCI_BASE_ADDRESS_SPACE_MEMORY, &r->mmio);
  1110. /* set up memory-mapped region for MSI-X */
  1111. memory_region_init(&r->msix_bar, OBJECT(r), "rocker-msix-bar",
  1112. ROCKER_PCI_MSIX_BAR_SIZE);
  1113. pci_register_bar(dev, ROCKER_PCI_MSIX_BAR_IDX,
  1114. PCI_BASE_ADDRESS_SPACE_MEMORY, &r->msix_bar);
  1115. /* MSI-X init */
  1116. err = rocker_msix_init(r, errp);
  1117. if (err) {
  1118. goto err_msix_init;
  1119. }
  1120. /* validate switch properties */
  1121. if (!r->name) {
  1122. r->name = g_strdup(TYPE_ROCKER);
  1123. }
  1124. if (rocker_find(r->name)) {
  1125. error_setg(errp, "%s already exists", r->name);
  1126. goto err_duplicate;
  1127. }
  1128. /* Rocker name is passed in port name requests to OS with the intention
  1129. * that the name is used in interface names. Limit the length of the
  1130. * rocker name to avoid naming problems in the OS. Also, adding the
  1131. * port number as p# and unganged breakout b#, where # is at most 2
  1132. * digits, so leave room for it too (-1 for string terminator, -3 for
  1133. * p# and -3 for b#)
  1134. */
  1135. #define ROCKER_IFNAMSIZ 16
  1136. #define MAX_ROCKER_NAME_LEN (ROCKER_IFNAMSIZ - 1 - 3 - 3)
  1137. if (strlen(r->name) > MAX_ROCKER_NAME_LEN) {
  1138. error_setg(errp,
  1139. "name too long; please shorten to at most %d chars",
  1140. MAX_ROCKER_NAME_LEN);
  1141. goto err_name_too_long;
  1142. }
  1143. if (memcmp(&r->fp_start_macaddr, &zero, sizeof(zero)) == 0) {
  1144. memcpy(&r->fp_start_macaddr, &dflt, sizeof(dflt));
  1145. r->fp_start_macaddr.a[4] += (sw_index++);
  1146. }
  1147. if (!r->switch_id) {
  1148. memcpy(&r->switch_id, &r->fp_start_macaddr,
  1149. sizeof(r->fp_start_macaddr));
  1150. }
  1151. if (r->fp_ports > ROCKER_FP_PORTS_MAX) {
  1152. r->fp_ports = ROCKER_FP_PORTS_MAX;
  1153. }
  1154. r->rings = g_new(DescRing *, rocker_pci_ring_count(r));
  1155. /* Rings are ordered like this:
  1156. * - command ring
  1157. * - event ring
  1158. * - port0 tx ring
  1159. * - port0 rx ring
  1160. * - port1 tx ring
  1161. * - port1 rx ring
  1162. * .....
  1163. */
  1164. for (i = 0; i < rocker_pci_ring_count(r); i++) {
  1165. DescRing *ring = desc_ring_alloc(r, i);
  1166. if (i == ROCKER_RING_CMD) {
  1167. desc_ring_set_consume(ring, cmd_consume, ROCKER_MSIX_VEC_CMD);
  1168. } else if (i == ROCKER_RING_EVENT) {
  1169. desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_EVENT);
  1170. } else if (i % 2 == 0) {
  1171. desc_ring_set_consume(ring, tx_consume,
  1172. ROCKER_MSIX_VEC_TX((i - 2) / 2));
  1173. } else if (i % 2 == 1) {
  1174. desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_RX((i - 3) / 2));
  1175. }
  1176. r->rings[i] = ring;
  1177. }
  1178. for (i = 0; i < r->fp_ports; i++) {
  1179. FpPort *port =
  1180. fp_port_alloc(r, r->name, &r->fp_start_macaddr,
  1181. i, &r->fp_ports_peers[i]);
  1182. r->fp_port[i] = port;
  1183. fp_port_set_world(port, r->world_dflt);
  1184. }
  1185. QLIST_INSERT_HEAD(&rockers, r, next);
  1186. return;
  1187. err_name_too_long:
  1188. err_duplicate:
  1189. rocker_msix_uninit(r);
  1190. err_msix_init:
  1191. object_unparent(OBJECT(&r->msix_bar));
  1192. object_unparent(OBJECT(&r->mmio));
  1193. err_world_type_by_name:
  1194. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1195. if (r->worlds[i]) {
  1196. world_free(r->worlds[i]);
  1197. }
  1198. }
  1199. }
  1200. static void pci_rocker_uninit(PCIDevice *dev)
  1201. {
  1202. Rocker *r = ROCKER(dev);
  1203. int i;
  1204. QLIST_REMOVE(r, next);
  1205. for (i = 0; i < r->fp_ports; i++) {
  1206. FpPort *port = r->fp_port[i];
  1207. fp_port_free(port);
  1208. r->fp_port[i] = NULL;
  1209. }
  1210. for (i = 0; i < rocker_pci_ring_count(r); i++) {
  1211. if (r->rings[i]) {
  1212. desc_ring_free(r->rings[i]);
  1213. }
  1214. }
  1215. g_free(r->rings);
  1216. rocker_msix_uninit(r);
  1217. object_unparent(OBJECT(&r->msix_bar));
  1218. object_unparent(OBJECT(&r->mmio));
  1219. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1220. if (r->worlds[i]) {
  1221. world_free(r->worlds[i]);
  1222. }
  1223. }
  1224. g_free(r->fp_ports_peers);
  1225. }
  1226. static void rocker_reset(DeviceState *dev)
  1227. {
  1228. Rocker *r = ROCKER(dev);
  1229. int i;
  1230. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1231. if (r->worlds[i]) {
  1232. world_reset(r->worlds[i]);
  1233. }
  1234. }
  1235. for (i = 0; i < r->fp_ports; i++) {
  1236. fp_port_reset(r->fp_port[i]);
  1237. fp_port_set_world(r->fp_port[i], r->world_dflt);
  1238. }
  1239. r->test_reg = 0;
  1240. r->test_reg64 = 0;
  1241. r->test_dma_addr = 0;
  1242. r->test_dma_size = 0;
  1243. for (i = 0; i < rocker_pci_ring_count(r); i++) {
  1244. desc_ring_reset(r->rings[i]);
  1245. }
  1246. DPRINTF("Reset done\n");
  1247. }
  1248. static Property rocker_properties[] = {
  1249. DEFINE_PROP_STRING("name", Rocker, name),
  1250. DEFINE_PROP_STRING("world", Rocker, world_name),
  1251. DEFINE_PROP_MACADDR("fp_start_macaddr", Rocker,
  1252. fp_start_macaddr),
  1253. DEFINE_PROP_UINT64("switch_id", Rocker,
  1254. switch_id, 0),
  1255. DEFINE_PROP_ARRAY("ports", Rocker, fp_ports,
  1256. fp_ports_peers, qdev_prop_netdev, NICPeers),
  1257. DEFINE_PROP_END_OF_LIST(),
  1258. };
  1259. static const VMStateDescription rocker_vmsd = {
  1260. .name = TYPE_ROCKER,
  1261. .unmigratable = 1,
  1262. };
  1263. static void rocker_class_init(ObjectClass *klass, void *data)
  1264. {
  1265. DeviceClass *dc = DEVICE_CLASS(klass);
  1266. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1267. k->realize = pci_rocker_realize;
  1268. k->exit = pci_rocker_uninit;
  1269. k->vendor_id = PCI_VENDOR_ID_REDHAT;
  1270. k->device_id = PCI_DEVICE_ID_REDHAT_ROCKER;
  1271. k->revision = ROCKER_PCI_REVISION;
  1272. k->class_id = PCI_CLASS_NETWORK_OTHER;
  1273. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1274. dc->desc = "Rocker Switch";
  1275. device_class_set_legacy_reset(dc, rocker_reset);
  1276. device_class_set_props(dc, rocker_properties);
  1277. dc->vmsd = &rocker_vmsd;
  1278. }
  1279. static const TypeInfo rocker_info = {
  1280. .name = TYPE_ROCKER,
  1281. .parent = TYPE_PCI_DEVICE,
  1282. .instance_size = sizeof(Rocker),
  1283. .class_init = rocker_class_init,
  1284. .interfaces = (InterfaceInfo[]) {
  1285. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1286. { },
  1287. },
  1288. };
  1289. static void rocker_register_types(void)
  1290. {
  1291. type_register_static(&rocker_info);
  1292. }
  1293. type_init(rocker_register_types)