2
0

rocker.c 43 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542
  1. /*
  2. * QEMU rocker switch emulation - PCI device
  3. *
  4. * Copyright (c) 2014 Scott Feldman <sfeldma@gmail.com>
  5. * Copyright (c) 2014 Jiri Pirko <jiri@resnulli.us>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. */
  17. #include "qemu/osdep.h"
  18. #include "hw/pci/pci.h"
  19. #include "hw/qdev-properties.h"
  20. #include "migration/vmstate.h"
  21. #include "hw/pci/msix.h"
  22. #include "net/net.h"
  23. #include "net/eth.h"
  24. #include "qapi/error.h"
  25. #include "qapi/qapi-commands-rocker.h"
  26. #include "qemu/iov.h"
  27. #include "qemu/module.h"
  28. #include "qemu/bitops.h"
  29. #include "rocker.h"
  30. #include "rocker_hw.h"
  31. #include "rocker_fp.h"
  32. #include "rocker_desc.h"
  33. #include "rocker_tlv.h"
  34. #include "rocker_world.h"
  35. #include "rocker_of_dpa.h"
  36. struct rocker {
  37. /* private */
  38. PCIDevice parent_obj;
  39. /* public */
  40. MemoryRegion mmio;
  41. MemoryRegion msix_bar;
  42. /* switch configuration */
  43. char *name; /* switch name */
  44. char *world_name; /* world name */
  45. uint32_t fp_ports; /* front-panel port count */
  46. NICPeers *fp_ports_peers;
  47. MACAddr fp_start_macaddr; /* front-panel port 0 mac addr */
  48. uint64_t switch_id; /* switch id */
  49. /* front-panel ports */
  50. FpPort *fp_port[ROCKER_FP_PORTS_MAX];
  51. /* register backings */
  52. uint32_t test_reg;
  53. uint64_t test_reg64;
  54. dma_addr_t test_dma_addr;
  55. uint32_t test_dma_size;
  56. uint64_t lower32; /* lower 32-bit val in 2-part 64-bit access */
  57. /* desc rings */
  58. DescRing **rings;
  59. /* switch worlds */
  60. World *worlds[ROCKER_WORLD_TYPE_MAX];
  61. World *world_dflt;
  62. QLIST_ENTRY(rocker) next;
  63. };
  64. #define TYPE_ROCKER "rocker"
  65. #define ROCKER(obj) \
  66. OBJECT_CHECK(Rocker, (obj), TYPE_ROCKER)
  67. static QLIST_HEAD(, rocker) rockers;
  68. Rocker *rocker_find(const char *name)
  69. {
  70. Rocker *r;
  71. QLIST_FOREACH(r, &rockers, next)
  72. if (strcmp(r->name, name) == 0) {
  73. return r;
  74. }
  75. return NULL;
  76. }
  77. World *rocker_get_world(Rocker *r, enum rocker_world_type type)
  78. {
  79. if (type < ROCKER_WORLD_TYPE_MAX) {
  80. return r->worlds[type];
  81. }
  82. return NULL;
  83. }
  84. RockerSwitch *qmp_query_rocker(const char *name, Error **errp)
  85. {
  86. RockerSwitch *rocker;
  87. Rocker *r;
  88. r = rocker_find(name);
  89. if (!r) {
  90. error_setg(errp, "rocker %s not found", name);
  91. return NULL;
  92. }
  93. rocker = g_new0(RockerSwitch, 1);
  94. rocker->name = g_strdup(r->name);
  95. rocker->id = r->switch_id;
  96. rocker->ports = r->fp_ports;
  97. return rocker;
  98. }
  99. RockerPortList *qmp_query_rocker_ports(const char *name, Error **errp)
  100. {
  101. RockerPortList *list = NULL;
  102. Rocker *r;
  103. int i;
  104. r = rocker_find(name);
  105. if (!r) {
  106. error_setg(errp, "rocker %s not found", name);
  107. return NULL;
  108. }
  109. for (i = r->fp_ports - 1; i >= 0; i--) {
  110. RockerPortList *info = g_malloc0(sizeof(*info));
  111. info->value = g_malloc0(sizeof(*info->value));
  112. struct fp_port *port = r->fp_port[i];
  113. fp_port_get_info(port, info);
  114. info->next = list;
  115. list = info;
  116. }
  117. return list;
  118. }
  119. uint32_t rocker_fp_ports(Rocker *r)
  120. {
  121. return r->fp_ports;
  122. }
  123. static uint32_t rocker_get_pport_by_tx_ring(Rocker *r,
  124. DescRing *ring)
  125. {
  126. return (desc_ring_index(ring) - 2) / 2 + 1;
  127. }
  128. static int tx_consume(Rocker *r, DescInfo *info)
  129. {
  130. PCIDevice *dev = PCI_DEVICE(r);
  131. char *buf = desc_get_buf(info, true);
  132. RockerTlv *tlv_frag;
  133. RockerTlv *tlvs[ROCKER_TLV_TX_MAX + 1];
  134. struct iovec iov[ROCKER_TX_FRAGS_MAX] = { { 0, }, };
  135. uint32_t pport;
  136. uint32_t port;
  137. uint16_t tx_offload = ROCKER_TX_OFFLOAD_NONE;
  138. uint16_t tx_l3_csum_off = 0;
  139. uint16_t tx_tso_mss = 0;
  140. uint16_t tx_tso_hdr_len = 0;
  141. int iovcnt = 0;
  142. int err = ROCKER_OK;
  143. int rem;
  144. int i;
  145. if (!buf) {
  146. return -ROCKER_ENXIO;
  147. }
  148. rocker_tlv_parse(tlvs, ROCKER_TLV_TX_MAX, buf, desc_tlv_size(info));
  149. if (!tlvs[ROCKER_TLV_TX_FRAGS]) {
  150. return -ROCKER_EINVAL;
  151. }
  152. pport = rocker_get_pport_by_tx_ring(r, desc_get_ring(info));
  153. if (!fp_port_from_pport(pport, &port)) {
  154. return -ROCKER_EINVAL;
  155. }
  156. if (tlvs[ROCKER_TLV_TX_OFFLOAD]) {
  157. tx_offload = rocker_tlv_get_u8(tlvs[ROCKER_TLV_TX_OFFLOAD]);
  158. }
  159. switch (tx_offload) {
  160. case ROCKER_TX_OFFLOAD_L3_CSUM:
  161. if (!tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) {
  162. return -ROCKER_EINVAL;
  163. }
  164. break;
  165. case ROCKER_TX_OFFLOAD_TSO:
  166. if (!tlvs[ROCKER_TLV_TX_TSO_MSS] ||
  167. !tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) {
  168. return -ROCKER_EINVAL;
  169. }
  170. break;
  171. }
  172. if (tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) {
  173. tx_l3_csum_off = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]);
  174. }
  175. if (tlvs[ROCKER_TLV_TX_TSO_MSS]) {
  176. tx_tso_mss = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_MSS]);
  177. }
  178. if (tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) {
  179. tx_tso_hdr_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]);
  180. }
  181. rocker_tlv_for_each_nested(tlv_frag, tlvs[ROCKER_TLV_TX_FRAGS], rem) {
  182. hwaddr frag_addr;
  183. uint16_t frag_len;
  184. if (rocker_tlv_type(tlv_frag) != ROCKER_TLV_TX_FRAG) {
  185. err = -ROCKER_EINVAL;
  186. goto err_bad_attr;
  187. }
  188. rocker_tlv_parse_nested(tlvs, ROCKER_TLV_TX_FRAG_ATTR_MAX, tlv_frag);
  189. if (!tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
  190. !tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) {
  191. err = -ROCKER_EINVAL;
  192. goto err_bad_attr;
  193. }
  194. frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
  195. frag_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
  196. if (iovcnt >= ROCKER_TX_FRAGS_MAX) {
  197. goto err_too_many_frags;
  198. }
  199. iov[iovcnt].iov_len = frag_len;
  200. iov[iovcnt].iov_base = g_malloc(frag_len);
  201. pci_dma_read(dev, frag_addr, iov[iovcnt].iov_base,
  202. iov[iovcnt].iov_len);
  203. iovcnt++;
  204. }
  205. if (iovcnt) {
  206. /* XXX perform Tx offloads */
  207. /* XXX silence compiler for now */
  208. tx_l3_csum_off += tx_tso_mss = tx_tso_hdr_len = 0;
  209. }
  210. err = fp_port_eg(r->fp_port[port], iov, iovcnt);
  211. err_too_many_frags:
  212. err_bad_attr:
  213. for (i = 0; i < ROCKER_TX_FRAGS_MAX; i++) {
  214. g_free(iov[i].iov_base);
  215. }
  216. return err;
  217. }
  218. static int cmd_get_port_settings(Rocker *r,
  219. DescInfo *info, char *buf,
  220. RockerTlv *cmd_info_tlv)
  221. {
  222. RockerTlv *tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
  223. RockerTlv *nest;
  224. FpPort *fp_port;
  225. uint32_t pport;
  226. uint32_t port;
  227. uint32_t speed;
  228. uint8_t duplex;
  229. uint8_t autoneg;
  230. uint8_t learning;
  231. char *phys_name;
  232. MACAddr macaddr;
  233. enum rocker_world_type mode;
  234. size_t tlv_size;
  235. int pos;
  236. int err;
  237. rocker_tlv_parse_nested(tlvs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
  238. cmd_info_tlv);
  239. if (!tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]) {
  240. return -ROCKER_EINVAL;
  241. }
  242. pport = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]);
  243. if (!fp_port_from_pport(pport, &port)) {
  244. return -ROCKER_EINVAL;
  245. }
  246. fp_port = r->fp_port[port];
  247. err = fp_port_get_settings(fp_port, &speed, &duplex, &autoneg);
  248. if (err) {
  249. return err;
  250. }
  251. fp_port_get_macaddr(fp_port, &macaddr);
  252. mode = world_type(fp_port_get_world(fp_port));
  253. learning = fp_port_get_learning(fp_port);
  254. phys_name = fp_port_get_name(fp_port);
  255. tlv_size = rocker_tlv_total_size(0) + /* nest */
  256. rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
  257. rocker_tlv_total_size(sizeof(uint32_t)) + /* speed */
  258. rocker_tlv_total_size(sizeof(uint8_t)) + /* duplex */
  259. rocker_tlv_total_size(sizeof(uint8_t)) + /* autoneg */
  260. rocker_tlv_total_size(sizeof(macaddr.a)) + /* macaddr */
  261. rocker_tlv_total_size(sizeof(uint8_t)) + /* mode */
  262. rocker_tlv_total_size(sizeof(uint8_t)) + /* learning */
  263. rocker_tlv_total_size(strlen(phys_name));
  264. if (tlv_size > desc_buf_size(info)) {
  265. return -ROCKER_EMSGSIZE;
  266. }
  267. pos = 0;
  268. nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_CMD_INFO);
  269. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_PPORT, pport);
  270. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_SPEED, speed);
  271. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX, duplex);
  272. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG, autoneg);
  273. rocker_tlv_put(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR,
  274. sizeof(macaddr.a), macaddr.a);
  275. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_MODE, mode);
  276. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING,
  277. learning);
  278. rocker_tlv_put(buf, &pos, ROCKER_TLV_CMD_PORT_SETTINGS_PHYS_NAME,
  279. strlen(phys_name), phys_name);
  280. rocker_tlv_nest_end(buf, &pos, nest);
  281. return desc_set_buf(info, tlv_size);
  282. }
  283. static int cmd_set_port_settings(Rocker *r,
  284. RockerTlv *cmd_info_tlv)
  285. {
  286. RockerTlv *tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MAX + 1];
  287. FpPort *fp_port;
  288. uint32_t pport;
  289. uint32_t port;
  290. uint32_t speed;
  291. uint8_t duplex;
  292. uint8_t autoneg;
  293. uint8_t learning;
  294. MACAddr macaddr;
  295. enum rocker_world_type mode;
  296. int err;
  297. rocker_tlv_parse_nested(tlvs, ROCKER_TLV_CMD_PORT_SETTINGS_MAX,
  298. cmd_info_tlv);
  299. if (!tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]) {
  300. return -ROCKER_EINVAL;
  301. }
  302. pport = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_PPORT]);
  303. if (!fp_port_from_pport(pport, &port)) {
  304. return -ROCKER_EINVAL;
  305. }
  306. fp_port = r->fp_port[port];
  307. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED] &&
  308. tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX] &&
  309. tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]) {
  310. speed = rocker_tlv_get_le32(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_SPEED]);
  311. duplex = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_DUPLEX]);
  312. autoneg = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_AUTONEG]);
  313. err = fp_port_set_settings(fp_port, speed, duplex, autoneg);
  314. if (err) {
  315. return err;
  316. }
  317. }
  318. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]) {
  319. if (rocker_tlv_len(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]) !=
  320. sizeof(macaddr.a)) {
  321. return -ROCKER_EINVAL;
  322. }
  323. memcpy(macaddr.a,
  324. rocker_tlv_data(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MACADDR]),
  325. sizeof(macaddr.a));
  326. fp_port_set_macaddr(fp_port, &macaddr);
  327. }
  328. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]) {
  329. mode = rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_MODE]);
  330. if (mode >= ROCKER_WORLD_TYPE_MAX) {
  331. return -ROCKER_EINVAL;
  332. }
  333. /* We don't support world change. */
  334. if (!fp_port_check_world(fp_port, r->worlds[mode])) {
  335. return -ROCKER_EINVAL;
  336. }
  337. }
  338. if (tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING]) {
  339. learning =
  340. rocker_tlv_get_u8(tlvs[ROCKER_TLV_CMD_PORT_SETTINGS_LEARNING]);
  341. fp_port_set_learning(fp_port, learning);
  342. }
  343. return ROCKER_OK;
  344. }
  345. static int cmd_consume(Rocker *r, DescInfo *info)
  346. {
  347. char *buf = desc_get_buf(info, false);
  348. RockerTlv *tlvs[ROCKER_TLV_CMD_MAX + 1];
  349. RockerTlv *info_tlv;
  350. World *world;
  351. uint16_t cmd;
  352. int err;
  353. if (!buf) {
  354. return -ROCKER_ENXIO;
  355. }
  356. rocker_tlv_parse(tlvs, ROCKER_TLV_CMD_MAX, buf, desc_tlv_size(info));
  357. if (!tlvs[ROCKER_TLV_CMD_TYPE] || !tlvs[ROCKER_TLV_CMD_INFO]) {
  358. return -ROCKER_EINVAL;
  359. }
  360. cmd = rocker_tlv_get_le16(tlvs[ROCKER_TLV_CMD_TYPE]);
  361. info_tlv = tlvs[ROCKER_TLV_CMD_INFO];
  362. /* This might be reworked to something like this:
  363. * Every world will have an array of command handlers from
  364. * ROCKER_TLV_CMD_TYPE_UNSPEC to ROCKER_TLV_CMD_TYPE_MAX. There is
  365. * up to each world to implement whatever command it want.
  366. * It can reference "generic" commands as cmd_set_port_settings or
  367. * cmd_get_port_settings
  368. */
  369. switch (cmd) {
  370. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_ADD:
  371. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_MOD:
  372. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_DEL:
  373. case ROCKER_TLV_CMD_TYPE_OF_DPA_FLOW_GET_STATS:
  374. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_ADD:
  375. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_MOD:
  376. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_DEL:
  377. case ROCKER_TLV_CMD_TYPE_OF_DPA_GROUP_GET_STATS:
  378. world = r->worlds[ROCKER_WORLD_TYPE_OF_DPA];
  379. err = world_do_cmd(world, info, buf, cmd, info_tlv);
  380. break;
  381. case ROCKER_TLV_CMD_TYPE_GET_PORT_SETTINGS:
  382. err = cmd_get_port_settings(r, info, buf, info_tlv);
  383. break;
  384. case ROCKER_TLV_CMD_TYPE_SET_PORT_SETTINGS:
  385. err = cmd_set_port_settings(r, info_tlv);
  386. break;
  387. default:
  388. err = -ROCKER_EINVAL;
  389. break;
  390. }
  391. return err;
  392. }
  393. static void rocker_msix_irq(Rocker *r, unsigned vector)
  394. {
  395. PCIDevice *dev = PCI_DEVICE(r);
  396. DPRINTF("MSI-X notify request for vector %d\n", vector);
  397. if (vector >= ROCKER_MSIX_VEC_COUNT(r->fp_ports)) {
  398. DPRINTF("incorrect vector %d\n", vector);
  399. return;
  400. }
  401. msix_notify(dev, vector);
  402. }
  403. int rocker_event_link_changed(Rocker *r, uint32_t pport, bool link_up)
  404. {
  405. DescRing *ring = r->rings[ROCKER_RING_EVENT];
  406. DescInfo *info = desc_ring_fetch_desc(ring);
  407. RockerTlv *nest;
  408. char *buf;
  409. size_t tlv_size;
  410. int pos;
  411. int err;
  412. if (!info) {
  413. return -ROCKER_ENOBUFS;
  414. }
  415. tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */
  416. rocker_tlv_total_size(0) + /* nest */
  417. rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
  418. rocker_tlv_total_size(sizeof(uint8_t)); /* link up */
  419. if (tlv_size > desc_buf_size(info)) {
  420. err = -ROCKER_EMSGSIZE;
  421. goto err_too_big;
  422. }
  423. buf = desc_get_buf(info, false);
  424. if (!buf) {
  425. err = -ROCKER_ENOMEM;
  426. goto err_no_mem;
  427. }
  428. pos = 0;
  429. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_TYPE,
  430. ROCKER_TLV_EVENT_TYPE_LINK_CHANGED);
  431. nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_EVENT_INFO);
  432. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_LINK_CHANGED_PPORT, pport);
  433. rocker_tlv_put_u8(buf, &pos, ROCKER_TLV_EVENT_LINK_CHANGED_LINKUP,
  434. link_up ? 1 : 0);
  435. rocker_tlv_nest_end(buf, &pos, nest);
  436. err = desc_set_buf(info, tlv_size);
  437. err_too_big:
  438. err_no_mem:
  439. if (desc_ring_post_desc(ring, err)) {
  440. rocker_msix_irq(r, ROCKER_MSIX_VEC_EVENT);
  441. }
  442. return err;
  443. }
  444. int rocker_event_mac_vlan_seen(Rocker *r, uint32_t pport, uint8_t *addr,
  445. uint16_t vlan_id)
  446. {
  447. DescRing *ring = r->rings[ROCKER_RING_EVENT];
  448. DescInfo *info;
  449. FpPort *fp_port;
  450. uint32_t port;
  451. RockerTlv *nest;
  452. char *buf;
  453. size_t tlv_size;
  454. int pos;
  455. int err;
  456. if (!fp_port_from_pport(pport, &port)) {
  457. return -ROCKER_EINVAL;
  458. }
  459. fp_port = r->fp_port[port];
  460. if (!fp_port_get_learning(fp_port)) {
  461. return ROCKER_OK;
  462. }
  463. info = desc_ring_fetch_desc(ring);
  464. if (!info) {
  465. return -ROCKER_ENOBUFS;
  466. }
  467. tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* event type */
  468. rocker_tlv_total_size(0) + /* nest */
  469. rocker_tlv_total_size(sizeof(uint32_t)) + /* pport */
  470. rocker_tlv_total_size(ETH_ALEN) + /* mac addr */
  471. rocker_tlv_total_size(sizeof(uint16_t)); /* vlan_id */
  472. if (tlv_size > desc_buf_size(info)) {
  473. err = -ROCKER_EMSGSIZE;
  474. goto err_too_big;
  475. }
  476. buf = desc_get_buf(info, false);
  477. if (!buf) {
  478. err = -ROCKER_ENOMEM;
  479. goto err_no_mem;
  480. }
  481. pos = 0;
  482. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_TYPE,
  483. ROCKER_TLV_EVENT_TYPE_MAC_VLAN_SEEN);
  484. nest = rocker_tlv_nest_start(buf, &pos, ROCKER_TLV_EVENT_INFO);
  485. rocker_tlv_put_le32(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_PPORT, pport);
  486. rocker_tlv_put(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_MAC, ETH_ALEN, addr);
  487. rocker_tlv_put_u16(buf, &pos, ROCKER_TLV_EVENT_MAC_VLAN_VLAN_ID, vlan_id);
  488. rocker_tlv_nest_end(buf, &pos, nest);
  489. err = desc_set_buf(info, tlv_size);
  490. err_too_big:
  491. err_no_mem:
  492. if (desc_ring_post_desc(ring, err)) {
  493. rocker_msix_irq(r, ROCKER_MSIX_VEC_EVENT);
  494. }
  495. return err;
  496. }
  497. static DescRing *rocker_get_rx_ring_by_pport(Rocker *r,
  498. uint32_t pport)
  499. {
  500. return r->rings[(pport - 1) * 2 + 3];
  501. }
  502. int rx_produce(World *world, uint32_t pport,
  503. const struct iovec *iov, int iovcnt, uint8_t copy_to_cpu)
  504. {
  505. Rocker *r = world_rocker(world);
  506. PCIDevice *dev = (PCIDevice *)r;
  507. DescRing *ring = rocker_get_rx_ring_by_pport(r, pport);
  508. DescInfo *info = desc_ring_fetch_desc(ring);
  509. char *data;
  510. size_t data_size = iov_size(iov, iovcnt);
  511. char *buf;
  512. uint16_t rx_flags = 0;
  513. uint16_t rx_csum = 0;
  514. size_t tlv_size;
  515. RockerTlv *tlvs[ROCKER_TLV_RX_MAX + 1];
  516. hwaddr frag_addr;
  517. uint16_t frag_max_len;
  518. int pos;
  519. int err;
  520. if (!info) {
  521. return -ROCKER_ENOBUFS;
  522. }
  523. buf = desc_get_buf(info, false);
  524. if (!buf) {
  525. err = -ROCKER_ENXIO;
  526. goto out;
  527. }
  528. rocker_tlv_parse(tlvs, ROCKER_TLV_RX_MAX, buf, desc_tlv_size(info));
  529. if (!tlvs[ROCKER_TLV_RX_FRAG_ADDR] ||
  530. !tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]) {
  531. err = -ROCKER_EINVAL;
  532. goto out;
  533. }
  534. frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_RX_FRAG_ADDR]);
  535. frag_max_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_RX_FRAG_MAX_LEN]);
  536. if (data_size > frag_max_len) {
  537. err = -ROCKER_EMSGSIZE;
  538. goto out;
  539. }
  540. if (copy_to_cpu) {
  541. rx_flags |= ROCKER_RX_FLAGS_FWD_OFFLOAD;
  542. }
  543. /* XXX calc rx flags/csum */
  544. tlv_size = rocker_tlv_total_size(sizeof(uint16_t)) + /* flags */
  545. rocker_tlv_total_size(sizeof(uint16_t)) + /* scum */
  546. rocker_tlv_total_size(sizeof(uint64_t)) + /* frag addr */
  547. rocker_tlv_total_size(sizeof(uint16_t)) + /* frag max len */
  548. rocker_tlv_total_size(sizeof(uint16_t)); /* frag len */
  549. if (tlv_size > desc_buf_size(info)) {
  550. err = -ROCKER_EMSGSIZE;
  551. goto out;
  552. }
  553. /* TODO:
  554. * iov dma write can be optimized in similar way e1000 does it in
  555. * e1000_receive_iov. But maybe if would make sense to introduce
  556. * generic helper iov_dma_write.
  557. */
  558. data = g_malloc(data_size);
  559. iov_to_buf(iov, iovcnt, 0, data, data_size);
  560. pci_dma_write(dev, frag_addr, data, data_size);
  561. g_free(data);
  562. pos = 0;
  563. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FLAGS, rx_flags);
  564. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_CSUM, rx_csum);
  565. rocker_tlv_put_le64(buf, &pos, ROCKER_TLV_RX_FRAG_ADDR, frag_addr);
  566. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_MAX_LEN, frag_max_len);
  567. rocker_tlv_put_le16(buf, &pos, ROCKER_TLV_RX_FRAG_LEN, data_size);
  568. err = desc_set_buf(info, tlv_size);
  569. out:
  570. if (desc_ring_post_desc(ring, err)) {
  571. rocker_msix_irq(r, ROCKER_MSIX_VEC_RX(pport - 1));
  572. }
  573. return err;
  574. }
  575. int rocker_port_eg(Rocker *r, uint32_t pport,
  576. const struct iovec *iov, int iovcnt)
  577. {
  578. FpPort *fp_port;
  579. uint32_t port;
  580. if (!fp_port_from_pport(pport, &port)) {
  581. return -ROCKER_EINVAL;
  582. }
  583. fp_port = r->fp_port[port];
  584. return fp_port_eg(fp_port, iov, iovcnt);
  585. }
  586. static void rocker_test_dma_ctrl(Rocker *r, uint32_t val)
  587. {
  588. PCIDevice *dev = PCI_DEVICE(r);
  589. char *buf;
  590. int i;
  591. buf = g_malloc(r->test_dma_size);
  592. switch (val) {
  593. case ROCKER_TEST_DMA_CTRL_CLEAR:
  594. memset(buf, 0, r->test_dma_size);
  595. break;
  596. case ROCKER_TEST_DMA_CTRL_FILL:
  597. memset(buf, 0x96, r->test_dma_size);
  598. break;
  599. case ROCKER_TEST_DMA_CTRL_INVERT:
  600. pci_dma_read(dev, r->test_dma_addr, buf, r->test_dma_size);
  601. for (i = 0; i < r->test_dma_size; i++) {
  602. buf[i] = ~buf[i];
  603. }
  604. break;
  605. default:
  606. DPRINTF("not test dma control val=0x%08x\n", val);
  607. goto err_out;
  608. }
  609. pci_dma_write(dev, r->test_dma_addr, buf, r->test_dma_size);
  610. rocker_msix_irq(r, ROCKER_MSIX_VEC_TEST);
  611. err_out:
  612. g_free(buf);
  613. }
  614. static void rocker_reset(DeviceState *dev);
  615. static void rocker_control(Rocker *r, uint32_t val)
  616. {
  617. if (val & ROCKER_CONTROL_RESET) {
  618. rocker_reset(DEVICE(r));
  619. }
  620. }
  621. static int rocker_pci_ring_count(Rocker *r)
  622. {
  623. /* There are:
  624. * - command ring
  625. * - event ring
  626. * - tx and rx ring per each port
  627. */
  628. return 2 + (2 * r->fp_ports);
  629. }
  630. static bool rocker_addr_is_desc_reg(Rocker *r, hwaddr addr)
  631. {
  632. hwaddr start = ROCKER_DMA_DESC_BASE;
  633. hwaddr end = start + (ROCKER_DMA_DESC_SIZE * rocker_pci_ring_count(r));
  634. return addr >= start && addr < end;
  635. }
  636. static void rocker_port_phys_enable_write(Rocker *r, uint64_t new)
  637. {
  638. int i;
  639. bool old_enabled;
  640. bool new_enabled;
  641. FpPort *fp_port;
  642. for (i = 0; i < r->fp_ports; i++) {
  643. fp_port = r->fp_port[i];
  644. old_enabled = fp_port_enabled(fp_port);
  645. new_enabled = (new >> (i + 1)) & 0x1;
  646. if (new_enabled == old_enabled) {
  647. continue;
  648. }
  649. if (new_enabled) {
  650. fp_port_enable(r->fp_port[i]);
  651. } else {
  652. fp_port_disable(r->fp_port[i]);
  653. }
  654. }
  655. }
  656. static void rocker_io_writel(void *opaque, hwaddr addr, uint32_t val)
  657. {
  658. Rocker *r = opaque;
  659. if (rocker_addr_is_desc_reg(r, addr)) {
  660. unsigned index = ROCKER_RING_INDEX(addr);
  661. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  662. switch (offset) {
  663. case ROCKER_DMA_DESC_ADDR_OFFSET:
  664. r->lower32 = (uint64_t)val;
  665. break;
  666. case ROCKER_DMA_DESC_ADDR_OFFSET + 4:
  667. desc_ring_set_base_addr(r->rings[index],
  668. ((uint64_t)val) << 32 | r->lower32);
  669. r->lower32 = 0;
  670. break;
  671. case ROCKER_DMA_DESC_SIZE_OFFSET:
  672. desc_ring_set_size(r->rings[index], val);
  673. break;
  674. case ROCKER_DMA_DESC_HEAD_OFFSET:
  675. if (desc_ring_set_head(r->rings[index], val)) {
  676. rocker_msix_irq(r, desc_ring_get_msix_vector(r->rings[index]));
  677. }
  678. break;
  679. case ROCKER_DMA_DESC_CTRL_OFFSET:
  680. desc_ring_set_ctrl(r->rings[index], val);
  681. break;
  682. case ROCKER_DMA_DESC_CREDITS_OFFSET:
  683. if (desc_ring_ret_credits(r->rings[index], val)) {
  684. rocker_msix_irq(r, desc_ring_get_msix_vector(r->rings[index]));
  685. }
  686. break;
  687. default:
  688. DPRINTF("not implemented dma reg write(l) addr=0x" TARGET_FMT_plx
  689. " val=0x%08x (ring %d, addr=0x%02x)\n",
  690. addr, val, index, offset);
  691. break;
  692. }
  693. return;
  694. }
  695. switch (addr) {
  696. case ROCKER_TEST_REG:
  697. r->test_reg = val;
  698. break;
  699. case ROCKER_TEST_REG64:
  700. case ROCKER_TEST_DMA_ADDR:
  701. case ROCKER_PORT_PHYS_ENABLE:
  702. r->lower32 = (uint64_t)val;
  703. break;
  704. case ROCKER_TEST_REG64 + 4:
  705. r->test_reg64 = ((uint64_t)val) << 32 | r->lower32;
  706. r->lower32 = 0;
  707. break;
  708. case ROCKER_TEST_IRQ:
  709. rocker_msix_irq(r, val);
  710. break;
  711. case ROCKER_TEST_DMA_SIZE:
  712. r->test_dma_size = val & 0xFFFF;
  713. break;
  714. case ROCKER_TEST_DMA_ADDR + 4:
  715. r->test_dma_addr = ((uint64_t)val) << 32 | r->lower32;
  716. r->lower32 = 0;
  717. break;
  718. case ROCKER_TEST_DMA_CTRL:
  719. rocker_test_dma_ctrl(r, val);
  720. break;
  721. case ROCKER_CONTROL:
  722. rocker_control(r, val);
  723. break;
  724. case ROCKER_PORT_PHYS_ENABLE + 4:
  725. rocker_port_phys_enable_write(r, ((uint64_t)val) << 32 | r->lower32);
  726. r->lower32 = 0;
  727. break;
  728. default:
  729. DPRINTF("not implemented write(l) addr=0x" TARGET_FMT_plx
  730. " val=0x%08x\n", addr, val);
  731. break;
  732. }
  733. }
  734. static void rocker_io_writeq(void *opaque, hwaddr addr, uint64_t val)
  735. {
  736. Rocker *r = opaque;
  737. if (rocker_addr_is_desc_reg(r, addr)) {
  738. unsigned index = ROCKER_RING_INDEX(addr);
  739. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  740. switch (offset) {
  741. case ROCKER_DMA_DESC_ADDR_OFFSET:
  742. desc_ring_set_base_addr(r->rings[index], val);
  743. break;
  744. default:
  745. DPRINTF("not implemented dma reg write(q) addr=0x" TARGET_FMT_plx
  746. " val=0x" TARGET_FMT_plx " (ring %d, offset=0x%02x)\n",
  747. addr, val, index, offset);
  748. break;
  749. }
  750. return;
  751. }
  752. switch (addr) {
  753. case ROCKER_TEST_REG64:
  754. r->test_reg64 = val;
  755. break;
  756. case ROCKER_TEST_DMA_ADDR:
  757. r->test_dma_addr = val;
  758. break;
  759. case ROCKER_PORT_PHYS_ENABLE:
  760. rocker_port_phys_enable_write(r, val);
  761. break;
  762. default:
  763. DPRINTF("not implemented write(q) addr=0x" TARGET_FMT_plx
  764. " val=0x" TARGET_FMT_plx "\n", addr, val);
  765. break;
  766. }
  767. }
  768. #ifdef DEBUG_ROCKER
  769. #define regname(reg) case (reg): return #reg
  770. static const char *rocker_reg_name(void *opaque, hwaddr addr)
  771. {
  772. Rocker *r = opaque;
  773. if (rocker_addr_is_desc_reg(r, addr)) {
  774. unsigned index = ROCKER_RING_INDEX(addr);
  775. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  776. static char buf[100];
  777. char ring_name[10];
  778. switch (index) {
  779. case 0:
  780. sprintf(ring_name, "cmd");
  781. break;
  782. case 1:
  783. sprintf(ring_name, "event");
  784. break;
  785. default:
  786. sprintf(ring_name, "%s-%d", index % 2 ? "rx" : "tx",
  787. (index - 2) / 2);
  788. }
  789. switch (offset) {
  790. case ROCKER_DMA_DESC_ADDR_OFFSET:
  791. sprintf(buf, "Ring[%s] ADDR", ring_name);
  792. return buf;
  793. case ROCKER_DMA_DESC_ADDR_OFFSET+4:
  794. sprintf(buf, "Ring[%s] ADDR+4", ring_name);
  795. return buf;
  796. case ROCKER_DMA_DESC_SIZE_OFFSET:
  797. sprintf(buf, "Ring[%s] SIZE", ring_name);
  798. return buf;
  799. case ROCKER_DMA_DESC_HEAD_OFFSET:
  800. sprintf(buf, "Ring[%s] HEAD", ring_name);
  801. return buf;
  802. case ROCKER_DMA_DESC_TAIL_OFFSET:
  803. sprintf(buf, "Ring[%s] TAIL", ring_name);
  804. return buf;
  805. case ROCKER_DMA_DESC_CTRL_OFFSET:
  806. sprintf(buf, "Ring[%s] CTRL", ring_name);
  807. return buf;
  808. case ROCKER_DMA_DESC_CREDITS_OFFSET:
  809. sprintf(buf, "Ring[%s] CREDITS", ring_name);
  810. return buf;
  811. default:
  812. sprintf(buf, "Ring[%s] ???", ring_name);
  813. return buf;
  814. }
  815. } else {
  816. switch (addr) {
  817. regname(ROCKER_BOGUS_REG0);
  818. regname(ROCKER_BOGUS_REG1);
  819. regname(ROCKER_BOGUS_REG2);
  820. regname(ROCKER_BOGUS_REG3);
  821. regname(ROCKER_TEST_REG);
  822. regname(ROCKER_TEST_REG64);
  823. regname(ROCKER_TEST_REG64+4);
  824. regname(ROCKER_TEST_IRQ);
  825. regname(ROCKER_TEST_DMA_ADDR);
  826. regname(ROCKER_TEST_DMA_ADDR+4);
  827. regname(ROCKER_TEST_DMA_SIZE);
  828. regname(ROCKER_TEST_DMA_CTRL);
  829. regname(ROCKER_CONTROL);
  830. regname(ROCKER_PORT_PHYS_COUNT);
  831. regname(ROCKER_PORT_PHYS_LINK_STATUS);
  832. regname(ROCKER_PORT_PHYS_LINK_STATUS+4);
  833. regname(ROCKER_PORT_PHYS_ENABLE);
  834. regname(ROCKER_PORT_PHYS_ENABLE+4);
  835. regname(ROCKER_SWITCH_ID);
  836. regname(ROCKER_SWITCH_ID+4);
  837. }
  838. }
  839. return "???";
  840. }
  841. #else
  842. static const char *rocker_reg_name(void *opaque, hwaddr addr)
  843. {
  844. return NULL;
  845. }
  846. #endif
  847. static void rocker_mmio_write(void *opaque, hwaddr addr, uint64_t val,
  848. unsigned size)
  849. {
  850. DPRINTF("Write %s addr " TARGET_FMT_plx
  851. ", size %u, val " TARGET_FMT_plx "\n",
  852. rocker_reg_name(opaque, addr), addr, size, val);
  853. switch (size) {
  854. case 4:
  855. rocker_io_writel(opaque, addr, val);
  856. break;
  857. case 8:
  858. rocker_io_writeq(opaque, addr, val);
  859. break;
  860. }
  861. }
  862. static uint64_t rocker_port_phys_link_status(Rocker *r)
  863. {
  864. int i;
  865. uint64_t status = 0;
  866. for (i = 0; i < r->fp_ports; i++) {
  867. FpPort *port = r->fp_port[i];
  868. if (fp_port_get_link_up(port)) {
  869. status |= 1 << (i + 1);
  870. }
  871. }
  872. return status;
  873. }
  874. static uint64_t rocker_port_phys_enable_read(Rocker *r)
  875. {
  876. int i;
  877. uint64_t ret = 0;
  878. for (i = 0; i < r->fp_ports; i++) {
  879. FpPort *port = r->fp_port[i];
  880. if (fp_port_enabled(port)) {
  881. ret |= 1 << (i + 1);
  882. }
  883. }
  884. return ret;
  885. }
  886. static uint32_t rocker_io_readl(void *opaque, hwaddr addr)
  887. {
  888. Rocker *r = opaque;
  889. uint32_t ret;
  890. if (rocker_addr_is_desc_reg(r, addr)) {
  891. unsigned index = ROCKER_RING_INDEX(addr);
  892. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  893. switch (offset) {
  894. case ROCKER_DMA_DESC_ADDR_OFFSET:
  895. ret = (uint32_t)desc_ring_get_base_addr(r->rings[index]);
  896. break;
  897. case ROCKER_DMA_DESC_ADDR_OFFSET + 4:
  898. ret = (uint32_t)(desc_ring_get_base_addr(r->rings[index]) >> 32);
  899. break;
  900. case ROCKER_DMA_DESC_SIZE_OFFSET:
  901. ret = desc_ring_get_size(r->rings[index]);
  902. break;
  903. case ROCKER_DMA_DESC_HEAD_OFFSET:
  904. ret = desc_ring_get_head(r->rings[index]);
  905. break;
  906. case ROCKER_DMA_DESC_TAIL_OFFSET:
  907. ret = desc_ring_get_tail(r->rings[index]);
  908. break;
  909. case ROCKER_DMA_DESC_CREDITS_OFFSET:
  910. ret = desc_ring_get_credits(r->rings[index]);
  911. break;
  912. default:
  913. DPRINTF("not implemented dma reg read(l) addr=0x" TARGET_FMT_plx
  914. " (ring %d, addr=0x%02x)\n", addr, index, offset);
  915. ret = 0;
  916. break;
  917. }
  918. return ret;
  919. }
  920. switch (addr) {
  921. case ROCKER_BOGUS_REG0:
  922. case ROCKER_BOGUS_REG1:
  923. case ROCKER_BOGUS_REG2:
  924. case ROCKER_BOGUS_REG3:
  925. ret = 0xDEADBABE;
  926. break;
  927. case ROCKER_TEST_REG:
  928. ret = r->test_reg * 2;
  929. break;
  930. case ROCKER_TEST_REG64:
  931. ret = (uint32_t)(r->test_reg64 * 2);
  932. break;
  933. case ROCKER_TEST_REG64 + 4:
  934. ret = (uint32_t)((r->test_reg64 * 2) >> 32);
  935. break;
  936. case ROCKER_TEST_DMA_SIZE:
  937. ret = r->test_dma_size;
  938. break;
  939. case ROCKER_TEST_DMA_ADDR:
  940. ret = (uint32_t)r->test_dma_addr;
  941. break;
  942. case ROCKER_TEST_DMA_ADDR + 4:
  943. ret = (uint32_t)(r->test_dma_addr >> 32);
  944. break;
  945. case ROCKER_PORT_PHYS_COUNT:
  946. ret = r->fp_ports;
  947. break;
  948. case ROCKER_PORT_PHYS_LINK_STATUS:
  949. ret = (uint32_t)rocker_port_phys_link_status(r);
  950. break;
  951. case ROCKER_PORT_PHYS_LINK_STATUS + 4:
  952. ret = (uint32_t)(rocker_port_phys_link_status(r) >> 32);
  953. break;
  954. case ROCKER_PORT_PHYS_ENABLE:
  955. ret = (uint32_t)rocker_port_phys_enable_read(r);
  956. break;
  957. case ROCKER_PORT_PHYS_ENABLE + 4:
  958. ret = (uint32_t)(rocker_port_phys_enable_read(r) >> 32);
  959. break;
  960. case ROCKER_SWITCH_ID:
  961. ret = (uint32_t)r->switch_id;
  962. break;
  963. case ROCKER_SWITCH_ID + 4:
  964. ret = (uint32_t)(r->switch_id >> 32);
  965. break;
  966. default:
  967. DPRINTF("not implemented read(l) addr=0x" TARGET_FMT_plx "\n", addr);
  968. ret = 0;
  969. break;
  970. }
  971. return ret;
  972. }
  973. static uint64_t rocker_io_readq(void *opaque, hwaddr addr)
  974. {
  975. Rocker *r = opaque;
  976. uint64_t ret;
  977. if (rocker_addr_is_desc_reg(r, addr)) {
  978. unsigned index = ROCKER_RING_INDEX(addr);
  979. unsigned offset = addr & ROCKER_DMA_DESC_MASK;
  980. switch (addr & ROCKER_DMA_DESC_MASK) {
  981. case ROCKER_DMA_DESC_ADDR_OFFSET:
  982. ret = desc_ring_get_base_addr(r->rings[index]);
  983. break;
  984. default:
  985. DPRINTF("not implemented dma reg read(q) addr=0x" TARGET_FMT_plx
  986. " (ring %d, addr=0x%02x)\n", addr, index, offset);
  987. ret = 0;
  988. break;
  989. }
  990. return ret;
  991. }
  992. switch (addr) {
  993. case ROCKER_BOGUS_REG0:
  994. case ROCKER_BOGUS_REG2:
  995. ret = 0xDEADBABEDEADBABEULL;
  996. break;
  997. case ROCKER_TEST_REG64:
  998. ret = r->test_reg64 * 2;
  999. break;
  1000. case ROCKER_TEST_DMA_ADDR:
  1001. ret = r->test_dma_addr;
  1002. break;
  1003. case ROCKER_PORT_PHYS_LINK_STATUS:
  1004. ret = rocker_port_phys_link_status(r);
  1005. break;
  1006. case ROCKER_PORT_PHYS_ENABLE:
  1007. ret = rocker_port_phys_enable_read(r);
  1008. break;
  1009. case ROCKER_SWITCH_ID:
  1010. ret = r->switch_id;
  1011. break;
  1012. default:
  1013. DPRINTF("not implemented read(q) addr=0x" TARGET_FMT_plx "\n", addr);
  1014. ret = 0;
  1015. break;
  1016. }
  1017. return ret;
  1018. }
  1019. static uint64_t rocker_mmio_read(void *opaque, hwaddr addr, unsigned size)
  1020. {
  1021. DPRINTF("Read %s addr " TARGET_FMT_plx ", size %u\n",
  1022. rocker_reg_name(opaque, addr), addr, size);
  1023. switch (size) {
  1024. case 4:
  1025. return rocker_io_readl(opaque, addr);
  1026. case 8:
  1027. return rocker_io_readq(opaque, addr);
  1028. }
  1029. return -1;
  1030. }
  1031. static const MemoryRegionOps rocker_mmio_ops = {
  1032. .read = rocker_mmio_read,
  1033. .write = rocker_mmio_write,
  1034. .endianness = DEVICE_LITTLE_ENDIAN,
  1035. .valid = {
  1036. .min_access_size = 4,
  1037. .max_access_size = 8,
  1038. },
  1039. .impl = {
  1040. .min_access_size = 4,
  1041. .max_access_size = 8,
  1042. },
  1043. };
  1044. static void rocker_msix_vectors_unuse(Rocker *r,
  1045. unsigned int num_vectors)
  1046. {
  1047. PCIDevice *dev = PCI_DEVICE(r);
  1048. int i;
  1049. for (i = 0; i < num_vectors; i++) {
  1050. msix_vector_unuse(dev, i);
  1051. }
  1052. }
  1053. static int rocker_msix_vectors_use(Rocker *r,
  1054. unsigned int num_vectors)
  1055. {
  1056. PCIDevice *dev = PCI_DEVICE(r);
  1057. int err;
  1058. int i;
  1059. for (i = 0; i < num_vectors; i++) {
  1060. err = msix_vector_use(dev, i);
  1061. if (err) {
  1062. goto rollback;
  1063. }
  1064. }
  1065. return 0;
  1066. rollback:
  1067. rocker_msix_vectors_unuse(r, i);
  1068. return err;
  1069. }
  1070. static int rocker_msix_init(Rocker *r, Error **errp)
  1071. {
  1072. PCIDevice *dev = PCI_DEVICE(r);
  1073. int err;
  1074. err = msix_init(dev, ROCKER_MSIX_VEC_COUNT(r->fp_ports),
  1075. &r->msix_bar,
  1076. ROCKER_PCI_MSIX_BAR_IDX, ROCKER_PCI_MSIX_TABLE_OFFSET,
  1077. &r->msix_bar,
  1078. ROCKER_PCI_MSIX_BAR_IDX, ROCKER_PCI_MSIX_PBA_OFFSET,
  1079. 0, errp);
  1080. if (err) {
  1081. return err;
  1082. }
  1083. err = rocker_msix_vectors_use(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports));
  1084. if (err) {
  1085. goto err_msix_vectors_use;
  1086. }
  1087. return 0;
  1088. err_msix_vectors_use:
  1089. msix_uninit(dev, &r->msix_bar, &r->msix_bar);
  1090. return err;
  1091. }
  1092. static void rocker_msix_uninit(Rocker *r)
  1093. {
  1094. PCIDevice *dev = PCI_DEVICE(r);
  1095. msix_uninit(dev, &r->msix_bar, &r->msix_bar);
  1096. rocker_msix_vectors_unuse(r, ROCKER_MSIX_VEC_COUNT(r->fp_ports));
  1097. }
  1098. static World *rocker_world_type_by_name(Rocker *r, const char *name)
  1099. {
  1100. int i;
  1101. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1102. if (strcmp(name, world_name(r->worlds[i])) == 0) {
  1103. return r->worlds[i];
  1104. }
  1105. }
  1106. return NULL;
  1107. }
  1108. static void pci_rocker_realize(PCIDevice *dev, Error **errp)
  1109. {
  1110. Rocker *r = ROCKER(dev);
  1111. const MACAddr zero = { .a = { 0, 0, 0, 0, 0, 0 } };
  1112. const MACAddr dflt = { .a = { 0x52, 0x54, 0x00, 0x12, 0x35, 0x01 } };
  1113. static int sw_index;
  1114. int i, err = 0;
  1115. /* allocate worlds */
  1116. r->worlds[ROCKER_WORLD_TYPE_OF_DPA] = of_dpa_world_alloc(r);
  1117. if (!r->world_name) {
  1118. r->world_name = g_strdup(world_name(r->worlds[ROCKER_WORLD_TYPE_OF_DPA]));
  1119. }
  1120. r->world_dflt = rocker_world_type_by_name(r, r->world_name);
  1121. if (!r->world_dflt) {
  1122. error_setg(errp,
  1123. "invalid argument requested world %s does not exist",
  1124. r->world_name);
  1125. goto err_world_type_by_name;
  1126. }
  1127. /* set up memory-mapped region at BAR0 */
  1128. memory_region_init_io(&r->mmio, OBJECT(r), &rocker_mmio_ops, r,
  1129. "rocker-mmio", ROCKER_PCI_BAR0_SIZE);
  1130. pci_register_bar(dev, ROCKER_PCI_BAR0_IDX,
  1131. PCI_BASE_ADDRESS_SPACE_MEMORY, &r->mmio);
  1132. /* set up memory-mapped region for MSI-X */
  1133. memory_region_init(&r->msix_bar, OBJECT(r), "rocker-msix-bar",
  1134. ROCKER_PCI_MSIX_BAR_SIZE);
  1135. pci_register_bar(dev, ROCKER_PCI_MSIX_BAR_IDX,
  1136. PCI_BASE_ADDRESS_SPACE_MEMORY, &r->msix_bar);
  1137. /* MSI-X init */
  1138. err = rocker_msix_init(r, errp);
  1139. if (err) {
  1140. goto err_msix_init;
  1141. }
  1142. /* validate switch properties */
  1143. if (!r->name) {
  1144. r->name = g_strdup(TYPE_ROCKER);
  1145. }
  1146. if (rocker_find(r->name)) {
  1147. error_setg(errp, "%s already exists", r->name);
  1148. goto err_duplicate;
  1149. }
  1150. /* Rocker name is passed in port name requests to OS with the intention
  1151. * that the name is used in interface names. Limit the length of the
  1152. * rocker name to avoid naming problems in the OS. Also, adding the
  1153. * port number as p# and unganged breakout b#, where # is at most 2
  1154. * digits, so leave room for it too (-1 for string terminator, -3 for
  1155. * p# and -3 for b#)
  1156. */
  1157. #define ROCKER_IFNAMSIZ 16
  1158. #define MAX_ROCKER_NAME_LEN (ROCKER_IFNAMSIZ - 1 - 3 - 3)
  1159. if (strlen(r->name) > MAX_ROCKER_NAME_LEN) {
  1160. error_setg(errp,
  1161. "name too long; please shorten to at most %d chars",
  1162. MAX_ROCKER_NAME_LEN);
  1163. goto err_name_too_long;
  1164. }
  1165. if (memcmp(&r->fp_start_macaddr, &zero, sizeof(zero)) == 0) {
  1166. memcpy(&r->fp_start_macaddr, &dflt, sizeof(dflt));
  1167. r->fp_start_macaddr.a[4] += (sw_index++);
  1168. }
  1169. if (!r->switch_id) {
  1170. memcpy(&r->switch_id, &r->fp_start_macaddr,
  1171. sizeof(r->fp_start_macaddr));
  1172. }
  1173. if (r->fp_ports > ROCKER_FP_PORTS_MAX) {
  1174. r->fp_ports = ROCKER_FP_PORTS_MAX;
  1175. }
  1176. r->rings = g_new(DescRing *, rocker_pci_ring_count(r));
  1177. /* Rings are ordered like this:
  1178. * - command ring
  1179. * - event ring
  1180. * - port0 tx ring
  1181. * - port0 rx ring
  1182. * - port1 tx ring
  1183. * - port1 rx ring
  1184. * .....
  1185. */
  1186. for (i = 0; i < rocker_pci_ring_count(r); i++) {
  1187. DescRing *ring = desc_ring_alloc(r, i);
  1188. if (i == ROCKER_RING_CMD) {
  1189. desc_ring_set_consume(ring, cmd_consume, ROCKER_MSIX_VEC_CMD);
  1190. } else if (i == ROCKER_RING_EVENT) {
  1191. desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_EVENT);
  1192. } else if (i % 2 == 0) {
  1193. desc_ring_set_consume(ring, tx_consume,
  1194. ROCKER_MSIX_VEC_TX((i - 2) / 2));
  1195. } else if (i % 2 == 1) {
  1196. desc_ring_set_consume(ring, NULL, ROCKER_MSIX_VEC_RX((i - 3) / 2));
  1197. }
  1198. r->rings[i] = ring;
  1199. }
  1200. for (i = 0; i < r->fp_ports; i++) {
  1201. FpPort *port =
  1202. fp_port_alloc(r, r->name, &r->fp_start_macaddr,
  1203. i, &r->fp_ports_peers[i]);
  1204. r->fp_port[i] = port;
  1205. fp_port_set_world(port, r->world_dflt);
  1206. }
  1207. QLIST_INSERT_HEAD(&rockers, r, next);
  1208. return;
  1209. err_name_too_long:
  1210. err_duplicate:
  1211. rocker_msix_uninit(r);
  1212. err_msix_init:
  1213. object_unparent(OBJECT(&r->msix_bar));
  1214. object_unparent(OBJECT(&r->mmio));
  1215. err_world_type_by_name:
  1216. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1217. if (r->worlds[i]) {
  1218. world_free(r->worlds[i]);
  1219. }
  1220. }
  1221. }
  1222. static void pci_rocker_uninit(PCIDevice *dev)
  1223. {
  1224. Rocker *r = ROCKER(dev);
  1225. int i;
  1226. QLIST_REMOVE(r, next);
  1227. for (i = 0; i < r->fp_ports; i++) {
  1228. FpPort *port = r->fp_port[i];
  1229. fp_port_free(port);
  1230. r->fp_port[i] = NULL;
  1231. }
  1232. for (i = 0; i < rocker_pci_ring_count(r); i++) {
  1233. if (r->rings[i]) {
  1234. desc_ring_free(r->rings[i]);
  1235. }
  1236. }
  1237. g_free(r->rings);
  1238. rocker_msix_uninit(r);
  1239. object_unparent(OBJECT(&r->msix_bar));
  1240. object_unparent(OBJECT(&r->mmio));
  1241. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1242. if (r->worlds[i]) {
  1243. world_free(r->worlds[i]);
  1244. }
  1245. }
  1246. g_free(r->fp_ports_peers);
  1247. }
  1248. static void rocker_reset(DeviceState *dev)
  1249. {
  1250. Rocker *r = ROCKER(dev);
  1251. int i;
  1252. for (i = 0; i < ROCKER_WORLD_TYPE_MAX; i++) {
  1253. if (r->worlds[i]) {
  1254. world_reset(r->worlds[i]);
  1255. }
  1256. }
  1257. for (i = 0; i < r->fp_ports; i++) {
  1258. fp_port_reset(r->fp_port[i]);
  1259. fp_port_set_world(r->fp_port[i], r->world_dflt);
  1260. }
  1261. r->test_reg = 0;
  1262. r->test_reg64 = 0;
  1263. r->test_dma_addr = 0;
  1264. r->test_dma_size = 0;
  1265. for (i = 0; i < rocker_pci_ring_count(r); i++) {
  1266. desc_ring_reset(r->rings[i]);
  1267. }
  1268. DPRINTF("Reset done\n");
  1269. }
  1270. static Property rocker_properties[] = {
  1271. DEFINE_PROP_STRING("name", Rocker, name),
  1272. DEFINE_PROP_STRING("world", Rocker, world_name),
  1273. DEFINE_PROP_MACADDR("fp_start_macaddr", Rocker,
  1274. fp_start_macaddr),
  1275. DEFINE_PROP_UINT64("switch_id", Rocker,
  1276. switch_id, 0),
  1277. DEFINE_PROP_ARRAY("ports", Rocker, fp_ports,
  1278. fp_ports_peers, qdev_prop_netdev, NICPeers),
  1279. DEFINE_PROP_END_OF_LIST(),
  1280. };
  1281. static const VMStateDescription rocker_vmsd = {
  1282. .name = TYPE_ROCKER,
  1283. .unmigratable = 1,
  1284. };
  1285. static void rocker_class_init(ObjectClass *klass, void *data)
  1286. {
  1287. DeviceClass *dc = DEVICE_CLASS(klass);
  1288. PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
  1289. k->realize = pci_rocker_realize;
  1290. k->exit = pci_rocker_uninit;
  1291. k->vendor_id = PCI_VENDOR_ID_REDHAT;
  1292. k->device_id = PCI_DEVICE_ID_REDHAT_ROCKER;
  1293. k->revision = ROCKER_PCI_REVISION;
  1294. k->class_id = PCI_CLASS_NETWORK_OTHER;
  1295. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  1296. dc->desc = "Rocker Switch";
  1297. dc->reset = rocker_reset;
  1298. dc->props = rocker_properties;
  1299. dc->vmsd = &rocker_vmsd;
  1300. }
  1301. static const TypeInfo rocker_info = {
  1302. .name = TYPE_ROCKER,
  1303. .parent = TYPE_PCI_DEVICE,
  1304. .instance_size = sizeof(Rocker),
  1305. .class_init = rocker_class_init,
  1306. .interfaces = (InterfaceInfo[]) {
  1307. { INTERFACE_CONVENTIONAL_PCI_DEVICE },
  1308. { },
  1309. },
  1310. };
  1311. static void rocker_register_types(void)
  1312. {
  1313. type_register_static(&rocker_info);
  1314. }
  1315. type_init(rocker_register_types)