2
0

spapr_llan.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891
  1. /*
  2. * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator
  3. *
  4. * PAPR Inter-VM Logical Lan, aka ibmveth
  5. *
  6. * Copyright (c) 2010,2011 David Gibson, IBM Corporation.
  7. *
  8. * Permission is hereby granted, free of charge, to any person obtaining a copy
  9. * of this software and associated documentation files (the "Software"), to deal
  10. * in the Software without restriction, including without limitation the rights
  11. * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  12. * copies of the Software, and to permit persons to whom the Software is
  13. * furnished to do so, subject to the following conditions:
  14. *
  15. * The above copyright notice and this permission notice shall be included in
  16. * all copies or substantial portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  21. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  22. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  23. * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  24. * THE SOFTWARE.
  25. *
  26. */
  27. #include "qemu/osdep.h"
  28. #include "qemu/log.h"
  29. #include "qemu/module.h"
  30. #include "net/net.h"
  31. #include "migration/vmstate.h"
  32. #include "hw/ppc/spapr.h"
  33. #include "hw/ppc/spapr_vio.h"
  34. #include "hw/qdev-properties.h"
  35. #include "system/system.h"
  36. #include "trace.h"
  37. #include <libfdt.h>
  38. #include "qom/object.h"
  39. #define ETH_ALEN 6
  40. #define MAX_PACKET_SIZE 65536
  41. /* Compatibility flags for migration */
  42. #define SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT 0
  43. #define SPAPRVLAN_FLAG_RX_BUF_POOLS (1 << SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT)
  44. /*
  45. * Virtual LAN device
  46. */
  47. typedef uint64_t vlan_bd_t;
  48. #define VLAN_BD_VALID 0x8000000000000000ULL
  49. #define VLAN_BD_TOGGLE 0x4000000000000000ULL
  50. #define VLAN_BD_NO_CSUM 0x0200000000000000ULL
  51. #define VLAN_BD_CSUM_GOOD 0x0100000000000000ULL
  52. #define VLAN_BD_LEN_MASK 0x00ffffff00000000ULL
  53. #define VLAN_BD_LEN(bd) (((bd) & VLAN_BD_LEN_MASK) >> 32)
  54. #define VLAN_BD_ADDR_MASK 0x00000000ffffffffULL
  55. #define VLAN_BD_ADDR(bd) ((bd) & VLAN_BD_ADDR_MASK)
  56. #define VLAN_VALID_BD(addr, len) (VLAN_BD_VALID | \
  57. (((len) << 32) & VLAN_BD_LEN_MASK) | \
  58. (addr & VLAN_BD_ADDR_MASK))
  59. #define VLAN_RXQC_TOGGLE 0x80
  60. #define VLAN_RXQC_VALID 0x40
  61. #define VLAN_RXQC_NO_CSUM 0x02
  62. #define VLAN_RXQC_CSUM_GOOD 0x01
  63. #define VLAN_RQ_ALIGNMENT 16
  64. #define VLAN_RXQ_BD_OFF 0
  65. #define VLAN_FILTER_BD_OFF 8
  66. #define VLAN_RX_BDS_OFF 16
  67. /*
  68. * The final 8 bytes of the buffer list is a counter of frames dropped
  69. * because there was not a buffer in the buffer list capable of holding
  70. * the frame. We must avoid it, or the operating system will report garbage
  71. * for this statistic.
  72. */
  73. #define VLAN_RX_BDS_LEN (SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF - 8)
  74. #define VLAN_MAX_BUFS (VLAN_RX_BDS_LEN / 8)
  75. #define TYPE_VIO_SPAPR_VLAN_DEVICE "spapr-vlan"
  76. OBJECT_DECLARE_SIMPLE_TYPE(SpaprVioVlan, VIO_SPAPR_VLAN_DEVICE)
  77. #define RX_POOL_MAX_BDS 4096
  78. #define RX_MAX_POOLS 5
  79. typedef struct {
  80. int32_t bufsize;
  81. int32_t count;
  82. vlan_bd_t bds[RX_POOL_MAX_BDS];
  83. } RxBufPool;
  84. struct SpaprVioVlan {
  85. SpaprVioDevice sdev;
  86. NICConf nicconf;
  87. NICState *nic;
  88. MACAddr perm_mac;
  89. bool isopen;
  90. hwaddr buf_list;
  91. uint32_t add_buf_ptr, use_buf_ptr, rx_bufs;
  92. hwaddr rxq_ptr;
  93. QEMUTimer *rxp_timer;
  94. uint32_t compat_flags; /* Compatibility flags for migration */
  95. RxBufPool *rx_pool[RX_MAX_POOLS]; /* Receive buffer descriptor pools */
  96. };
  97. static bool spapr_vlan_can_receive(NetClientState *nc)
  98. {
  99. SpaprVioVlan *dev = qemu_get_nic_opaque(nc);
  100. return dev->isopen && dev->rx_bufs > 0;
  101. }
  102. /**
  103. * The last 8 bytes of the receive buffer list page (that has been
  104. * supplied by the guest with the H_REGISTER_LOGICAL_LAN call) contain
  105. * a counter for frames that have been dropped because there was no
  106. * suitable receive buffer available. This function is used to increase
  107. * this counter by one.
  108. */
  109. static void spapr_vlan_record_dropped_rx_frame(SpaprVioVlan *dev)
  110. {
  111. uint64_t cnt;
  112. cnt = vio_ldq(&dev->sdev, dev->buf_list + 4096 - 8);
  113. vio_stq(&dev->sdev, dev->buf_list + 4096 - 8, cnt + 1);
  114. }
  115. /**
  116. * Get buffer descriptor from one of our receive buffer pools
  117. */
  118. static vlan_bd_t spapr_vlan_get_rx_bd_from_pool(SpaprVioVlan *dev,
  119. size_t size)
  120. {
  121. vlan_bd_t bd;
  122. int pool;
  123. for (pool = 0; pool < RX_MAX_POOLS; pool++) {
  124. if (dev->rx_pool[pool]->count > 0 &&
  125. dev->rx_pool[pool]->bufsize >= size + 8) {
  126. break;
  127. }
  128. }
  129. if (pool == RX_MAX_POOLS) {
  130. /* Failed to find a suitable buffer */
  131. return 0;
  132. }
  133. trace_spapr_vlan_get_rx_bd_from_pool_found(pool,
  134. dev->rx_pool[pool]->count,
  135. dev->rx_bufs);
  136. /* Remove the buffer from the pool */
  137. dev->rx_pool[pool]->count--;
  138. bd = dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count];
  139. dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count] = 0;
  140. return bd;
  141. }
  142. /**
  143. * Get buffer descriptor from the receive buffer list page that has been
  144. * supplied by the guest with the H_REGISTER_LOGICAL_LAN call
  145. */
  146. static vlan_bd_t spapr_vlan_get_rx_bd_from_page(SpaprVioVlan *dev,
  147. size_t size)
  148. {
  149. int buf_ptr = dev->use_buf_ptr;
  150. vlan_bd_t bd;
  151. do {
  152. buf_ptr += 8;
  153. if (buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) {
  154. buf_ptr = VLAN_RX_BDS_OFF;
  155. }
  156. bd = vio_ldq(&dev->sdev, dev->buf_list + buf_ptr);
  157. trace_spapr_vlan_get_rx_bd_from_page(buf_ptr, (uint64_t)bd);
  158. } while ((!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8)
  159. && buf_ptr != dev->use_buf_ptr);
  160. if (!(bd & VLAN_BD_VALID) || VLAN_BD_LEN(bd) < size + 8) {
  161. /* Failed to find a suitable buffer */
  162. return 0;
  163. }
  164. /* Remove the buffer from the pool */
  165. dev->use_buf_ptr = buf_ptr;
  166. vio_stq(&dev->sdev, dev->buf_list + dev->use_buf_ptr, 0);
  167. trace_spapr_vlan_get_rx_bd_from_page_found(dev->use_buf_ptr, dev->rx_bufs);
  168. return bd;
  169. }
  170. static ssize_t spapr_vlan_receive(NetClientState *nc, const uint8_t *buf,
  171. size_t size)
  172. {
  173. SpaprVioVlan *dev = qemu_get_nic_opaque(nc);
  174. SpaprVioDevice *sdev = VIO_SPAPR_DEVICE(dev);
  175. vlan_bd_t rxq_bd = vio_ldq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF);
  176. vlan_bd_t bd;
  177. uint64_t handle;
  178. uint8_t control;
  179. trace_spapr_vlan_receive(sdev->qdev.id, dev->rx_bufs);
  180. if (!dev->isopen) {
  181. return -1;
  182. }
  183. if (!dev->rx_bufs) {
  184. spapr_vlan_record_dropped_rx_frame(dev);
  185. return 0;
  186. }
  187. if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
  188. bd = spapr_vlan_get_rx_bd_from_pool(dev, size);
  189. } else {
  190. bd = spapr_vlan_get_rx_bd_from_page(dev, size);
  191. }
  192. if (!bd) {
  193. spapr_vlan_record_dropped_rx_frame(dev);
  194. return 0;
  195. }
  196. dev->rx_bufs--;
  197. /* Transfer the packet data */
  198. if (spapr_vio_dma_write(sdev, VLAN_BD_ADDR(bd) + 8, buf, size) < 0) {
  199. return -1;
  200. }
  201. trace_spapr_vlan_receive_dma_completed();
  202. /* Update the receive queue */
  203. control = VLAN_RXQC_TOGGLE | VLAN_RXQC_VALID;
  204. if (rxq_bd & VLAN_BD_TOGGLE) {
  205. control ^= VLAN_RXQC_TOGGLE;
  206. }
  207. handle = vio_ldq(sdev, VLAN_BD_ADDR(bd));
  208. vio_stq(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 8, handle);
  209. vio_stl(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 4, size);
  210. vio_sth(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr + 2, 8);
  211. vio_stb(sdev, VLAN_BD_ADDR(rxq_bd) + dev->rxq_ptr, control);
  212. trace_spapr_vlan_receive_wrote(dev->rxq_ptr,
  213. vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
  214. dev->rxq_ptr),
  215. vio_ldq(sdev, VLAN_BD_ADDR(rxq_bd) +
  216. dev->rxq_ptr + 8));
  217. dev->rxq_ptr += 16;
  218. if (dev->rxq_ptr >= VLAN_BD_LEN(rxq_bd)) {
  219. dev->rxq_ptr = 0;
  220. vio_stq(sdev, dev->buf_list + VLAN_RXQ_BD_OFF, rxq_bd ^ VLAN_BD_TOGGLE);
  221. }
  222. if (sdev->signal_state & 1) {
  223. spapr_vio_irq_pulse(sdev);
  224. }
  225. return size;
  226. }
  227. static NetClientInfo net_spapr_vlan_info = {
  228. .type = NET_CLIENT_DRIVER_NIC,
  229. .size = sizeof(NICState),
  230. .can_receive = spapr_vlan_can_receive,
  231. .receive = spapr_vlan_receive,
  232. };
  233. static void spapr_vlan_flush_rx_queue(void *opaque)
  234. {
  235. SpaprVioVlan *dev = opaque;
  236. qemu_flush_queued_packets(qemu_get_queue(dev->nic));
  237. }
  238. static void spapr_vlan_reset_rx_pool(RxBufPool *rxp)
  239. {
  240. /*
  241. * Use INT_MAX as bufsize so that unused buffers are moved to the end
  242. * of the list during the qsort in spapr_vlan_add_rxbuf_to_pool() later.
  243. */
  244. rxp->bufsize = INT_MAX;
  245. rxp->count = 0;
  246. memset(rxp->bds, 0, sizeof(rxp->bds));
  247. }
  248. static void spapr_vlan_reset(SpaprVioDevice *sdev)
  249. {
  250. SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
  251. int i;
  252. dev->buf_list = 0;
  253. dev->rx_bufs = 0;
  254. dev->isopen = 0;
  255. if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
  256. for (i = 0; i < RX_MAX_POOLS; i++) {
  257. spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
  258. }
  259. }
  260. memcpy(&dev->nicconf.macaddr.a, &dev->perm_mac.a,
  261. sizeof(dev->nicconf.macaddr.a));
  262. qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
  263. }
  264. static void spapr_vlan_realize(SpaprVioDevice *sdev, Error **errp)
  265. {
  266. SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
  267. qemu_macaddr_default_if_unset(&dev->nicconf.macaddr);
  268. memcpy(&dev->perm_mac.a, &dev->nicconf.macaddr.a, sizeof(dev->perm_mac.a));
  269. dev->nic = qemu_new_nic(&net_spapr_vlan_info, &dev->nicconf,
  270. object_get_typename(OBJECT(sdev)), sdev->qdev.id,
  271. &sdev->qdev.mem_reentrancy_guard, dev);
  272. qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
  273. dev->rxp_timer = timer_new_us(QEMU_CLOCK_VIRTUAL, spapr_vlan_flush_rx_queue,
  274. dev);
  275. }
  276. static void spapr_vlan_instance_init(Object *obj)
  277. {
  278. SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj);
  279. int i;
  280. device_add_bootindex_property(obj, &dev->nicconf.bootindex,
  281. "bootindex", "",
  282. DEVICE(dev));
  283. if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
  284. for (i = 0; i < RX_MAX_POOLS; i++) {
  285. dev->rx_pool[i] = g_new(RxBufPool, 1);
  286. spapr_vlan_reset_rx_pool(dev->rx_pool[i]);
  287. }
  288. }
  289. }
  290. static void spapr_vlan_instance_finalize(Object *obj)
  291. {
  292. SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(obj);
  293. int i;
  294. if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
  295. for (i = 0; i < RX_MAX_POOLS; i++) {
  296. g_free(dev->rx_pool[i]);
  297. dev->rx_pool[i] = NULL;
  298. }
  299. }
  300. if (dev->rxp_timer) {
  301. timer_free(dev->rxp_timer);
  302. }
  303. }
  304. void spapr_vlan_create(SpaprVioBus *bus, NICInfo *nd)
  305. {
  306. DeviceState *dev;
  307. dev = qdev_new("spapr-vlan");
  308. qdev_set_nic_properties(dev, nd);
  309. qdev_realize_and_unref(dev, &bus->bus, &error_fatal);
  310. }
  311. static int spapr_vlan_devnode(SpaprVioDevice *dev, void *fdt, int node_off)
  312. {
  313. SpaprVioVlan *vdev = VIO_SPAPR_VLAN_DEVICE(dev);
  314. uint8_t padded_mac[8] = {0, 0};
  315. int ret;
  316. /* Some old phyp versions give the mac address in an 8-byte
  317. * property. The kernel driver (before 3.10) has an insane workaround;
  318. * rather than doing the obvious thing and checking the property
  319. * length, it checks whether the first byte has 0b10 in the low
  320. * bits. If a correct 6-byte property has a different first byte
  321. * the kernel will get the wrong mac address, overrunning its
  322. * buffer in the process (read only, thank goodness).
  323. *
  324. * Here we return a 6-byte address unless that would break a pre-3.10
  325. * driver. In that case we return a padded 8-byte address to allow the old
  326. * workaround to succeed. */
  327. if ((vdev->nicconf.macaddr.a[0] & 0x3) == 0x2) {
  328. ret = fdt_setprop(fdt, node_off, "local-mac-address",
  329. &vdev->nicconf.macaddr, ETH_ALEN);
  330. } else {
  331. memcpy(&padded_mac[2], &vdev->nicconf.macaddr, ETH_ALEN);
  332. ret = fdt_setprop(fdt, node_off, "local-mac-address",
  333. padded_mac, sizeof(padded_mac));
  334. }
  335. if (ret < 0) {
  336. return ret;
  337. }
  338. ret = fdt_setprop_cell(fdt, node_off, "ibm,mac-address-filters", 0);
  339. if (ret < 0) {
  340. return ret;
  341. }
  342. return 0;
  343. }
  344. static int check_bd(SpaprVioVlan *dev, vlan_bd_t bd,
  345. target_ulong alignment)
  346. {
  347. if ((VLAN_BD_ADDR(bd) % alignment)
  348. || (VLAN_BD_LEN(bd) % alignment)) {
  349. return -1;
  350. }
  351. if (!spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
  352. VLAN_BD_LEN(bd), DMA_DIRECTION_FROM_DEVICE)
  353. || !spapr_vio_dma_valid(&dev->sdev, VLAN_BD_ADDR(bd),
  354. VLAN_BD_LEN(bd), DMA_DIRECTION_TO_DEVICE)) {
  355. return -1;
  356. }
  357. return 0;
  358. }
  359. static target_ulong h_register_logical_lan(PowerPCCPU *cpu,
  360. SpaprMachineState *spapr,
  361. target_ulong opcode,
  362. target_ulong *args)
  363. {
  364. target_ulong reg = args[0];
  365. target_ulong buf_list = args[1];
  366. target_ulong rec_queue = args[2];
  367. target_ulong filter_list = args[3];
  368. SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
  369. SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
  370. vlan_bd_t filter_list_bd;
  371. if (!dev) {
  372. return H_PARAMETER;
  373. }
  374. if (dev->isopen) {
  375. hcall_dprintf("H_REGISTER_LOGICAL_LAN called twice without "
  376. "H_FREE_LOGICAL_LAN\n");
  377. return H_RESOURCE;
  378. }
  379. if (check_bd(dev, VLAN_VALID_BD(buf_list, SPAPR_TCE_PAGE_SIZE),
  380. SPAPR_TCE_PAGE_SIZE) < 0) {
  381. hcall_dprintf("Bad buf_list 0x" TARGET_FMT_lx "\n", buf_list);
  382. return H_PARAMETER;
  383. }
  384. filter_list_bd = VLAN_VALID_BD(filter_list, SPAPR_TCE_PAGE_SIZE);
  385. if (check_bd(dev, filter_list_bd, SPAPR_TCE_PAGE_SIZE) < 0) {
  386. hcall_dprintf("Bad filter_list 0x" TARGET_FMT_lx "\n", filter_list);
  387. return H_PARAMETER;
  388. }
  389. if (!(rec_queue & VLAN_BD_VALID)
  390. || (check_bd(dev, rec_queue, VLAN_RQ_ALIGNMENT) < 0)) {
  391. hcall_dprintf("Bad receive queue\n");
  392. return H_PARAMETER;
  393. }
  394. dev->buf_list = buf_list;
  395. sdev->signal_state = 0;
  396. rec_queue &= ~VLAN_BD_TOGGLE;
  397. /* Initialize the buffer list */
  398. vio_stq(sdev, buf_list, rec_queue);
  399. vio_stq(sdev, buf_list + 8, filter_list_bd);
  400. spapr_vio_dma_set(sdev, buf_list + VLAN_RX_BDS_OFF, 0,
  401. SPAPR_TCE_PAGE_SIZE - VLAN_RX_BDS_OFF);
  402. dev->add_buf_ptr = VLAN_RX_BDS_OFF - 8;
  403. dev->use_buf_ptr = VLAN_RX_BDS_OFF - 8;
  404. dev->rx_bufs = 0;
  405. dev->rxq_ptr = 0;
  406. /* Initialize the receive queue */
  407. spapr_vio_dma_set(sdev, VLAN_BD_ADDR(rec_queue), 0, VLAN_BD_LEN(rec_queue));
  408. dev->isopen = 1;
  409. qemu_flush_queued_packets(qemu_get_queue(dev->nic));
  410. return H_SUCCESS;
  411. }
  412. static target_ulong h_free_logical_lan(PowerPCCPU *cpu,
  413. SpaprMachineState *spapr,
  414. target_ulong opcode, target_ulong *args)
  415. {
  416. target_ulong reg = args[0];
  417. SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
  418. SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
  419. if (!dev) {
  420. return H_PARAMETER;
  421. }
  422. if (!dev->isopen) {
  423. hcall_dprintf("H_FREE_LOGICAL_LAN called without "
  424. "H_REGISTER_LOGICAL_LAN\n");
  425. return H_RESOURCE;
  426. }
  427. spapr_vlan_reset(sdev);
  428. return H_SUCCESS;
  429. }
  430. /**
  431. * Used for qsort, this function compares two RxBufPools by size.
  432. */
  433. static int rx_pool_size_compare(const void *p1, const void *p2)
  434. {
  435. const RxBufPool *pool1 = *(RxBufPool **)p1;
  436. const RxBufPool *pool2 = *(RxBufPool **)p2;
  437. if (pool1->bufsize < pool2->bufsize) {
  438. return -1;
  439. }
  440. return pool1->bufsize > pool2->bufsize;
  441. }
  442. /**
  443. * Search for a matching buffer pool with exact matching size,
  444. * or return -1 if no matching pool has been found.
  445. */
  446. static int spapr_vlan_get_rx_pool_id(SpaprVioVlan *dev, int size)
  447. {
  448. int pool;
  449. for (pool = 0; pool < RX_MAX_POOLS; pool++) {
  450. if (dev->rx_pool[pool]->bufsize == size) {
  451. return pool;
  452. }
  453. }
  454. return -1;
  455. }
  456. /**
  457. * Enqueuing receive buffer by adding it to one of our receive buffer pools
  458. */
  459. static target_long spapr_vlan_add_rxbuf_to_pool(SpaprVioVlan *dev,
  460. target_ulong buf)
  461. {
  462. int size = VLAN_BD_LEN(buf);
  463. int pool;
  464. pool = spapr_vlan_get_rx_pool_id(dev, size);
  465. if (pool < 0) {
  466. /*
  467. * No matching pool found? Try to use a new one. If the guest used all
  468. * pools before, but changed the size of one pool in the meantime, we might
  469. * need to recycle that pool here (if it's empty already). Thus scan
  470. * all buffer pools now, starting with the last (likely empty) one.
  471. */
  472. for (pool = RX_MAX_POOLS - 1; pool >= 0 ; pool--) {
  473. if (dev->rx_pool[pool]->count == 0) {
  474. dev->rx_pool[pool]->bufsize = size;
  475. /*
  476. * Sort pools by size so that spapr_vlan_receive()
  477. * can later find the smallest buffer pool easily.
  478. */
  479. qsort(dev->rx_pool, RX_MAX_POOLS, sizeof(dev->rx_pool[0]),
  480. rx_pool_size_compare);
  481. pool = spapr_vlan_get_rx_pool_id(dev, size);
  482. trace_spapr_vlan_add_rxbuf_to_pool_create(pool,
  483. VLAN_BD_LEN(buf));
  484. break;
  485. }
  486. }
  487. }
  488. /* Still no usable pool? Give up */
  489. if (pool < 0 || dev->rx_pool[pool]->count >= RX_POOL_MAX_BDS) {
  490. return H_RESOURCE;
  491. }
  492. trace_spapr_vlan_add_rxbuf_to_pool(pool, VLAN_BD_LEN(buf),
  493. dev->rx_pool[pool]->count);
  494. dev->rx_pool[pool]->bds[dev->rx_pool[pool]->count++] = buf;
  495. return 0;
  496. }
  497. /**
  498. * This is the old way of enqueuing receive buffers: Add it to the rx queue
  499. * page that has been supplied by the guest (which is quite limited in size).
  500. */
  501. static target_long spapr_vlan_add_rxbuf_to_page(SpaprVioVlan *dev,
  502. target_ulong buf)
  503. {
  504. vlan_bd_t bd;
  505. if (dev->rx_bufs >= VLAN_MAX_BUFS) {
  506. return H_RESOURCE;
  507. }
  508. do {
  509. dev->add_buf_ptr += 8;
  510. if (dev->add_buf_ptr >= VLAN_RX_BDS_LEN + VLAN_RX_BDS_OFF) {
  511. dev->add_buf_ptr = VLAN_RX_BDS_OFF;
  512. }
  513. bd = vio_ldq(&dev->sdev, dev->buf_list + dev->add_buf_ptr);
  514. } while (bd & VLAN_BD_VALID);
  515. vio_stq(&dev->sdev, dev->buf_list + dev->add_buf_ptr, buf);
  516. trace_spapr_vlan_add_rxbuf_to_page(dev->add_buf_ptr, dev->rx_bufs, buf);
  517. return 0;
  518. }
  519. static target_ulong h_add_logical_lan_buffer(PowerPCCPU *cpu,
  520. SpaprMachineState *spapr,
  521. target_ulong opcode,
  522. target_ulong *args)
  523. {
  524. target_ulong reg = args[0];
  525. target_ulong buf = args[1];
  526. SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
  527. SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
  528. target_long ret;
  529. trace_spapr_vlan_h_add_logical_lan_buffer(reg, buf);
  530. if (!sdev) {
  531. hcall_dprintf("Bad device\n");
  532. return H_PARAMETER;
  533. }
  534. if ((check_bd(dev, buf, 4) < 0)
  535. || (VLAN_BD_LEN(buf) < 16)) {
  536. hcall_dprintf("Bad buffer enqueued\n");
  537. return H_PARAMETER;
  538. }
  539. if (!dev->isopen) {
  540. return H_RESOURCE;
  541. }
  542. if (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) {
  543. ret = spapr_vlan_add_rxbuf_to_pool(dev, buf);
  544. } else {
  545. ret = spapr_vlan_add_rxbuf_to_page(dev, buf);
  546. }
  547. if (ret) {
  548. return ret;
  549. }
  550. dev->rx_bufs++;
  551. /*
  552. * Give guest some more time to add additional RX buffers before we
  553. * flush the receive queue, so that e.g. fragmented IP packets can
  554. * be passed to the guest in one go later (instead of passing single
  555. * fragments if there is only one receive buffer available).
  556. */
  557. timer_mod(dev->rxp_timer, qemu_clock_get_us(QEMU_CLOCK_VIRTUAL) + 500);
  558. return H_SUCCESS;
  559. }
  560. static target_ulong h_send_logical_lan(PowerPCCPU *cpu,
  561. SpaprMachineState *spapr,
  562. target_ulong opcode, target_ulong *args)
  563. {
  564. target_ulong reg = args[0];
  565. target_ulong *bufs = args + 1;
  566. target_ulong continue_token = args[7];
  567. SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
  568. SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
  569. unsigned total_len;
  570. uint8_t *p;
  571. g_autofree uint8_t *lbuf = NULL;
  572. int i, nbufs;
  573. int ret;
  574. trace_spapr_vlan_h_send_logical_lan(reg, continue_token);
  575. if (!sdev) {
  576. return H_PARAMETER;
  577. }
  578. trace_spapr_vlan_h_send_logical_lan_rxbufs(dev->rx_bufs);
  579. if (!dev->isopen) {
  580. return H_DROPPED;
  581. }
  582. if (continue_token) {
  583. return H_HARDWARE; /* FIXME actually handle this */
  584. }
  585. total_len = 0;
  586. for (i = 0; i < 6; i++) {
  587. trace_spapr_vlan_h_send_logical_lan_buf_desc(bufs[i]);
  588. if (!(bufs[i] & VLAN_BD_VALID)) {
  589. break;
  590. }
  591. total_len += VLAN_BD_LEN(bufs[i]);
  592. }
  593. nbufs = i;
  594. trace_spapr_vlan_h_send_logical_lan_total(nbufs, total_len);
  595. if (total_len == 0) {
  596. return H_SUCCESS;
  597. }
  598. if (total_len > MAX_PACKET_SIZE) {
  599. /* Don't let the guest force too large an allocation */
  600. return H_RESOURCE;
  601. }
  602. lbuf = g_malloc(total_len);
  603. p = lbuf;
  604. for (i = 0; i < nbufs; i++) {
  605. ret = spapr_vio_dma_read(sdev, VLAN_BD_ADDR(bufs[i]),
  606. p, VLAN_BD_LEN(bufs[i]));
  607. if (ret < 0) {
  608. return ret;
  609. }
  610. p += VLAN_BD_LEN(bufs[i]);
  611. }
  612. qemu_send_packet(qemu_get_queue(dev->nic), lbuf, total_len);
  613. return H_SUCCESS;
  614. }
  615. static target_ulong h_multicast_ctrl(PowerPCCPU *cpu, SpaprMachineState *spapr,
  616. target_ulong opcode, target_ulong *args)
  617. {
  618. target_ulong reg = args[0];
  619. SpaprVioDevice *dev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
  620. if (!dev) {
  621. return H_PARAMETER;
  622. }
  623. return H_SUCCESS;
  624. }
  625. static target_ulong h_change_logical_lan_mac(PowerPCCPU *cpu,
  626. SpaprMachineState *spapr,
  627. target_ulong opcode,
  628. target_ulong *args)
  629. {
  630. target_ulong reg = args[0];
  631. target_ulong macaddr = args[1];
  632. SpaprVioDevice *sdev = spapr_vio_find_by_reg(spapr->vio_bus, reg);
  633. SpaprVioVlan *dev = VIO_SPAPR_VLAN_DEVICE(sdev);
  634. int i;
  635. if (!dev) {
  636. hcall_dprintf("H_CHANGE_LOGICAL_LAN_MAC called when "
  637. "no NIC is present\n");
  638. return H_PARAMETER;
  639. }
  640. for (i = 0; i < ETH_ALEN; i++) {
  641. dev->nicconf.macaddr.a[ETH_ALEN - i - 1] = macaddr & 0xff;
  642. macaddr >>= 8;
  643. }
  644. qemu_format_nic_info_str(qemu_get_queue(dev->nic), dev->nicconf.macaddr.a);
  645. return H_SUCCESS;
  646. }
  647. static const Property spapr_vlan_properties[] = {
  648. DEFINE_SPAPR_PROPERTIES(SpaprVioVlan, sdev),
  649. DEFINE_NIC_PROPERTIES(SpaprVioVlan, nicconf),
  650. DEFINE_PROP_BIT("use-rx-buffer-pools", SpaprVioVlan,
  651. compat_flags, SPAPRVLAN_FLAG_RX_BUF_POOLS_BIT, true),
  652. };
  653. static bool spapr_vlan_rx_buffer_pools_needed(void *opaque)
  654. {
  655. SpaprVioVlan *dev = opaque;
  656. return (dev->compat_flags & SPAPRVLAN_FLAG_RX_BUF_POOLS) != 0;
  657. }
  658. static const VMStateDescription vmstate_rx_buffer_pool = {
  659. .name = "spapr_llan/rx_buffer_pool",
  660. .version_id = 1,
  661. .minimum_version_id = 1,
  662. .needed = spapr_vlan_rx_buffer_pools_needed,
  663. .fields = (const VMStateField[]) {
  664. VMSTATE_INT32(bufsize, RxBufPool),
  665. VMSTATE_INT32(count, RxBufPool),
  666. VMSTATE_UINT64_ARRAY(bds, RxBufPool, RX_POOL_MAX_BDS),
  667. VMSTATE_END_OF_LIST()
  668. }
  669. };
  670. static const VMStateDescription vmstate_rx_pools = {
  671. .name = "spapr_llan/rx_pools",
  672. .version_id = 1,
  673. .minimum_version_id = 1,
  674. .needed = spapr_vlan_rx_buffer_pools_needed,
  675. .fields = (const VMStateField[]) {
  676. VMSTATE_ARRAY_OF_POINTER_TO_STRUCT(rx_pool, SpaprVioVlan,
  677. RX_MAX_POOLS, 1,
  678. vmstate_rx_buffer_pool, RxBufPool),
  679. VMSTATE_END_OF_LIST()
  680. }
  681. };
  682. static const VMStateDescription vmstate_spapr_llan = {
  683. .name = "spapr_llan",
  684. .version_id = 1,
  685. .minimum_version_id = 1,
  686. .fields = (const VMStateField[]) {
  687. VMSTATE_SPAPR_VIO(sdev, SpaprVioVlan),
  688. /* LLAN state */
  689. VMSTATE_BOOL(isopen, SpaprVioVlan),
  690. VMSTATE_UINT64(buf_list, SpaprVioVlan),
  691. VMSTATE_UINT32(add_buf_ptr, SpaprVioVlan),
  692. VMSTATE_UINT32(use_buf_ptr, SpaprVioVlan),
  693. VMSTATE_UINT32(rx_bufs, SpaprVioVlan),
  694. VMSTATE_UINT64(rxq_ptr, SpaprVioVlan),
  695. VMSTATE_END_OF_LIST()
  696. },
  697. .subsections = (const VMStateDescription * const []) {
  698. &vmstate_rx_pools,
  699. NULL
  700. }
  701. };
  702. static void spapr_vlan_class_init(ObjectClass *klass, void *data)
  703. {
  704. DeviceClass *dc = DEVICE_CLASS(klass);
  705. SpaprVioDeviceClass *k = VIO_SPAPR_DEVICE_CLASS(klass);
  706. k->realize = spapr_vlan_realize;
  707. k->reset = spapr_vlan_reset;
  708. k->devnode = spapr_vlan_devnode;
  709. k->dt_name = "l-lan";
  710. k->dt_type = "network";
  711. k->dt_compatible = "IBM,l-lan";
  712. k->signal_mask = 0x1;
  713. set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
  714. device_class_set_props(dc, spapr_vlan_properties);
  715. k->rtce_window_size = 0x10000000;
  716. dc->vmsd = &vmstate_spapr_llan;
  717. }
  718. static const TypeInfo spapr_vlan_info = {
  719. .name = TYPE_VIO_SPAPR_VLAN_DEVICE,
  720. .parent = TYPE_VIO_SPAPR_DEVICE,
  721. .instance_size = sizeof(SpaprVioVlan),
  722. .class_init = spapr_vlan_class_init,
  723. .instance_init = spapr_vlan_instance_init,
  724. .instance_finalize = spapr_vlan_instance_finalize,
  725. };
  726. static void spapr_vlan_register_types(void)
  727. {
  728. spapr_register_hypercall(H_REGISTER_LOGICAL_LAN, h_register_logical_lan);
  729. spapr_register_hypercall(H_FREE_LOGICAL_LAN, h_free_logical_lan);
  730. spapr_register_hypercall(H_SEND_LOGICAL_LAN, h_send_logical_lan);
  731. spapr_register_hypercall(H_ADD_LOGICAL_LAN_BUFFER,
  732. h_add_logical_lan_buffer);
  733. spapr_register_hypercall(H_MULTICAST_CTRL, h_multicast_ctrl);
  734. spapr_register_hypercall(H_CHANGE_LOGICAL_LAN_MAC,
  735. h_change_logical_lan_mac);
  736. type_register_static(&spapr_vlan_info);
  737. }
  738. type_init(spapr_vlan_register_types)